1 /*-------------------------------------------------------------------------
4 * POSTGRES type cache code
6 * The type cache exists to speed lookup of certain information about data
7 * types that is not directly available from a type's pg_type row. For
8 * example, we use a type's default btree opclass, or the default hash
9 * opclass if no btree opclass exists, to determine which operators should
10 * be used for grouping and sorting the type (GROUP BY, ORDER BY ASC/DESC).
12 * Several seemingly-odd choices have been made to support use of the type
13 * cache by generic array and record handling routines, such as array_eq(),
14 * record_cmp(), and hash_array(). Because those routines are used as index
15 * support operations, they cannot leak memory. To allow them to execute
16 * efficiently, all information that they would like to re-use across calls
17 * is kept in the type cache.
19 * Once created, a type cache entry lives as long as the backend does, so
20 * there is no need for a call to release a cache entry. If the type is
21 * dropped, the cache entry simply becomes wasted storage. This is not
22 * expected to happen often, and assuming that typcache entries are good
23 * permanently allows caching pointers to them in long-lived places.
25 * We have some provisions for updating cache entries if the stored data
26 * becomes obsolete. Information dependent on opclasses is cleared if we
27 * detect updates to pg_opclass. We also support clearing the tuple
28 * descriptor and operator/function parts of a rowtype's cache entry,
29 * since those may need to change as a consequence of ALTER TABLE.
30 * Domain constraint changes are also tracked properly.
33 * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
34 * Portions Copyright (c) 1994, Regents of the University of California
37 * src/backend/utils/cache/typcache.c
39 *-------------------------------------------------------------------------
45 #include "access/hash.h"
46 #include "access/heapam.h"
47 #include "access/htup_details.h"
48 #include "access/nbtree.h"
49 #include "catalog/indexing.h"
50 #include "catalog/pg_am.h"
51 #include "catalog/pg_constraint.h"
52 #include "catalog/pg_enum.h"
53 #include "catalog/pg_operator.h"
54 #include "catalog/pg_range.h"
55 #include "catalog/pg_type.h"
56 #include "commands/defrem.h"
57 #include "executor/executor.h"
58 #include "optimizer/planner.h"
59 #include "utils/builtins.h"
60 #include "utils/catcache.h"
61 #include "utils/fmgroids.h"
62 #include "utils/inval.h"
63 #include "utils/lsyscache.h"
64 #include "utils/memutils.h"
65 #include "utils/rel.h"
66 #include "utils/snapmgr.h"
67 #include "utils/syscache.h"
68 #include "utils/typcache.h"
71 /* The main type cache hashtable searched by lookup_type_cache */
72 static HTAB *TypeCacheHash = NULL;
74 /* List of type cache entries for domain types */
75 static TypeCacheEntry *firstDomainTypeEntry = NULL;
77 /* Private flag bits in the TypeCacheEntry.flags field */
78 #define TCFLAGS_CHECKED_BTREE_OPCLASS 0x0001
79 #define TCFLAGS_CHECKED_HASH_OPCLASS 0x0002
80 #define TCFLAGS_CHECKED_EQ_OPR 0x0004
81 #define TCFLAGS_CHECKED_LT_OPR 0x0008
82 #define TCFLAGS_CHECKED_GT_OPR 0x0010
83 #define TCFLAGS_CHECKED_CMP_PROC 0x0020
84 #define TCFLAGS_CHECKED_HASH_PROC 0x0040
85 #define TCFLAGS_CHECKED_ELEM_PROPERTIES 0x0080
86 #define TCFLAGS_HAVE_ELEM_EQUALITY 0x0100
87 #define TCFLAGS_HAVE_ELEM_COMPARE 0x0200
88 #define TCFLAGS_HAVE_ELEM_HASHING 0x0400
89 #define TCFLAGS_CHECKED_FIELD_PROPERTIES 0x0800
90 #define TCFLAGS_HAVE_FIELD_EQUALITY 0x1000
91 #define TCFLAGS_HAVE_FIELD_COMPARE 0x2000
92 #define TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS 0x4000
95 * Data stored about a domain type's constraints. Note that we do not create
96 * this struct for the common case of a constraint-less domain; we just set
97 * domainData to NULL to indicate that.
99 * Within a DomainConstraintCache, we abuse the DomainConstraintState node
100 * type a bit: check_expr fields point to expression plan trees, not plan
101 * state trees. When needed, expression state trees are built by flat-copying
102 * the DomainConstraintState nodes and applying ExecInitExpr to check_expr.
103 * Such a state tree is not part of the DomainConstraintCache, but is
104 * considered to belong to a DomainConstraintRef.
106 struct DomainConstraintCache
108 List *constraints; /* list of DomainConstraintState nodes */
109 MemoryContext dccContext; /* memory context holding all associated data */
110 long dccRefCount; /* number of references to this struct */
113 /* Private information to support comparisons of enum values */
116 Oid enum_oid; /* OID of one enum value */
117 float4 sort_order; /* its sort position */
120 typedef struct TypeCacheEnumData
122 Oid bitmap_base; /* OID corresponding to bit 0 of bitmapset */
123 Bitmapset *sorted_values; /* Set of OIDs known to be in order */
124 int num_values; /* total number of values in enum */
125 EnumItem enum_values[FLEXIBLE_ARRAY_MEMBER];
129 * We use a separate table for storing the definitions of non-anonymous
130 * record types. Once defined, a record type will be remembered for the
131 * life of the backend. Subsequent uses of the "same" record type (where
132 * sameness means equalTupleDescs) will refer to the existing table entry.
134 * Stored record types are remembered in a linear array of TupleDescs,
135 * which can be indexed quickly with the assigned typmod. There is also
136 * a hash table to speed searches for matching TupleDescs. The hash key
137 * uses just the first N columns' type OIDs, and so we may have multiple
138 * entries with the same hash key.
140 #define REC_HASH_KEYS 16 /* use this many columns in hash key */
142 typedef struct RecordCacheEntry
144 /* the hash lookup key MUST BE FIRST */
145 Oid hashkey[REC_HASH_KEYS]; /* column type IDs, zero-filled */
147 /* list of TupleDescs for record types with this hashkey */
151 static HTAB *RecordCacheHash = NULL;
153 static TupleDesc *RecordCacheArray = NULL;
154 static int32 RecordCacheArrayLen = 0; /* allocated length of array */
155 static int32 NextRecordTypmod = 0; /* number of entries used */
157 static void load_typcache_tupdesc(TypeCacheEntry *typentry);
158 static void load_rangetype_info(TypeCacheEntry *typentry);
159 static void load_domaintype_info(TypeCacheEntry *typentry);
160 static int dcs_cmp(const void *a, const void *b);
161 static void decr_dcc_refcount(DomainConstraintCache *dcc);
162 static void dccref_deletion_callback(void *arg);
163 static List *prep_domain_constraints(List *constraints, MemoryContext execctx);
164 static bool array_element_has_equality(TypeCacheEntry *typentry);
165 static bool array_element_has_compare(TypeCacheEntry *typentry);
166 static bool array_element_has_hashing(TypeCacheEntry *typentry);
167 static void cache_array_element_properties(TypeCacheEntry *typentry);
168 static bool record_fields_have_equality(TypeCacheEntry *typentry);
169 static bool record_fields_have_compare(TypeCacheEntry *typentry);
170 static void cache_record_field_properties(TypeCacheEntry *typentry);
171 static void TypeCacheRelCallback(Datum arg, Oid relid);
172 static void TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue);
173 static void TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue);
174 static void load_enum_cache_data(TypeCacheEntry *tcache);
175 static EnumItem *find_enumitem(TypeCacheEnumData *enumdata, Oid arg);
176 static int enum_oid_cmp(const void *left, const void *right);
182 * Fetch the type cache entry for the specified datatype, and make sure that
183 * all the fields requested by bits in 'flags' are valid.
185 * The result is never NULL --- we will elog() if the passed type OID is
186 * invalid. Note however that we may fail to find one or more of the
187 * requested opclass-dependent fields; the caller needs to check whether
188 * the fields are InvalidOid or not.
191 lookup_type_cache(Oid type_id, int flags)
193 TypeCacheEntry *typentry;
196 if (TypeCacheHash == NULL)
198 /* First time through: initialize the hash table */
201 MemSet(&ctl, 0, sizeof(ctl));
202 ctl.keysize = sizeof(Oid);
203 ctl.entrysize = sizeof(TypeCacheEntry);
204 TypeCacheHash = hash_create("Type information cache", 64,
205 &ctl, HASH_ELEM | HASH_BLOBS);
207 /* Also set up callbacks for SI invalidations */
208 CacheRegisterRelcacheCallback(TypeCacheRelCallback, (Datum) 0);
209 CacheRegisterSyscacheCallback(CLAOID, TypeCacheOpcCallback, (Datum) 0);
210 CacheRegisterSyscacheCallback(CONSTROID, TypeCacheConstrCallback, (Datum) 0);
211 CacheRegisterSyscacheCallback(TYPEOID, TypeCacheConstrCallback, (Datum) 0);
213 /* Also make sure CacheMemoryContext exists */
214 if (!CacheMemoryContext)
215 CreateCacheMemoryContext();
218 /* Try to look up an existing entry */
219 typentry = (TypeCacheEntry *) hash_search(TypeCacheHash,
222 if (typentry == NULL)
225 * If we didn't find one, we want to make one. But first look up the
226 * pg_type row, just to make sure we don't make a cache entry for an
232 tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type_id));
233 if (!HeapTupleIsValid(tp))
234 elog(ERROR, "cache lookup failed for type %u", type_id);
235 typtup = (Form_pg_type) GETSTRUCT(tp);
236 if (!typtup->typisdefined)
238 (errcode(ERRCODE_UNDEFINED_OBJECT),
239 errmsg("type \"%s\" is only a shell",
240 NameStr(typtup->typname))));
242 /* Now make the typcache entry */
243 typentry = (TypeCacheEntry *) hash_search(TypeCacheHash,
246 Assert(!found); /* it wasn't there a moment ago */
248 MemSet(typentry, 0, sizeof(TypeCacheEntry));
249 typentry->type_id = type_id;
250 typentry->typlen = typtup->typlen;
251 typentry->typbyval = typtup->typbyval;
252 typentry->typalign = typtup->typalign;
253 typentry->typstorage = typtup->typstorage;
254 typentry->typtype = typtup->typtype;
255 typentry->typrelid = typtup->typrelid;
257 /* If it's a domain, immediately thread it into the domain cache list */
258 if (typentry->typtype == TYPTYPE_DOMAIN)
260 typentry->nextDomain = firstDomainTypeEntry;
261 firstDomainTypeEntry = typentry;
268 * Look up opclasses if we haven't already and any dependent info is
271 if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_LT_OPR | TYPECACHE_GT_OPR |
273 TYPECACHE_EQ_OPR_FINFO | TYPECACHE_CMP_PROC_FINFO |
274 TYPECACHE_BTREE_OPFAMILY)) &&
275 !(typentry->flags & TCFLAGS_CHECKED_BTREE_OPCLASS))
279 opclass = GetDefaultOpClass(type_id, BTREE_AM_OID);
280 if (OidIsValid(opclass))
282 typentry->btree_opf = get_opclass_family(opclass);
283 typentry->btree_opintype = get_opclass_input_type(opclass);
287 typentry->btree_opf = typentry->btree_opintype = InvalidOid;
291 * Reset information derived from btree opclass. Note in particular
292 * that we'll redetermine the eq_opr even if we previously found one;
293 * this matters in case a btree opclass has been added to a type that
294 * previously had only a hash opclass.
296 typentry->flags &= ~(TCFLAGS_CHECKED_EQ_OPR |
297 TCFLAGS_CHECKED_LT_OPR |
298 TCFLAGS_CHECKED_GT_OPR |
299 TCFLAGS_CHECKED_CMP_PROC);
300 typentry->flags |= TCFLAGS_CHECKED_BTREE_OPCLASS;
304 * If we need to look up equality operator, and there's no btree opclass,
305 * force lookup of hash opclass.
307 if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
308 !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR) &&
309 typentry->btree_opf == InvalidOid)
310 flags |= TYPECACHE_HASH_OPFAMILY;
312 if ((flags & (TYPECACHE_HASH_PROC | TYPECACHE_HASH_PROC_FINFO |
313 TYPECACHE_HASH_OPFAMILY)) &&
314 !(typentry->flags & TCFLAGS_CHECKED_HASH_OPCLASS))
318 opclass = GetDefaultOpClass(type_id, HASH_AM_OID);
319 if (OidIsValid(opclass))
321 typentry->hash_opf = get_opclass_family(opclass);
322 typentry->hash_opintype = get_opclass_input_type(opclass);
326 typentry->hash_opf = typentry->hash_opintype = InvalidOid;
330 * Reset information derived from hash opclass. We do *not* reset the
331 * eq_opr; if we already found one from the btree opclass, that
332 * decision is still good.
334 typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC);
335 typentry->flags |= TCFLAGS_CHECKED_HASH_OPCLASS;
339 * Look for requested operators and functions, if we haven't already.
341 if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
342 !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR))
344 Oid eq_opr = InvalidOid;
346 if (typentry->btree_opf != InvalidOid)
347 eq_opr = get_opfamily_member(typentry->btree_opf,
348 typentry->btree_opintype,
349 typentry->btree_opintype,
350 BTEqualStrategyNumber);
351 if (eq_opr == InvalidOid &&
352 typentry->hash_opf != InvalidOid)
353 eq_opr = get_opfamily_member(typentry->hash_opf,
354 typentry->hash_opintype,
355 typentry->hash_opintype,
356 HTEqualStrategyNumber);
359 * If the proposed equality operator is array_eq or record_eq, check
360 * to see if the element type or column types support equality. If
361 * not, array_eq or record_eq would fail at runtime, so we don't want
362 * to report that the type has equality.
364 if (eq_opr == ARRAY_EQ_OP &&
365 !array_element_has_equality(typentry))
367 else if (eq_opr == RECORD_EQ_OP &&
368 !record_fields_have_equality(typentry))
371 /* Force update of eq_opr_finfo only if we're changing state */
372 if (typentry->eq_opr != eq_opr)
373 typentry->eq_opr_finfo.fn_oid = InvalidOid;
375 typentry->eq_opr = eq_opr;
378 * Reset info about hash function whenever we pick up new info about
379 * equality operator. This is so we can ensure that the hash function
380 * matches the operator.
382 typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC);
383 typentry->flags |= TCFLAGS_CHECKED_EQ_OPR;
385 if ((flags & TYPECACHE_LT_OPR) &&
386 !(typentry->flags & TCFLAGS_CHECKED_LT_OPR))
388 Oid lt_opr = InvalidOid;
390 if (typentry->btree_opf != InvalidOid)
391 lt_opr = get_opfamily_member(typentry->btree_opf,
392 typentry->btree_opintype,
393 typentry->btree_opintype,
394 BTLessStrategyNumber);
396 /* As above, make sure array_cmp or record_cmp will succeed */
397 if (lt_opr == ARRAY_LT_OP &&
398 !array_element_has_compare(typentry))
400 else if (lt_opr == RECORD_LT_OP &&
401 !record_fields_have_compare(typentry))
404 typentry->lt_opr = lt_opr;
405 typentry->flags |= TCFLAGS_CHECKED_LT_OPR;
407 if ((flags & TYPECACHE_GT_OPR) &&
408 !(typentry->flags & TCFLAGS_CHECKED_GT_OPR))
410 Oid gt_opr = InvalidOid;
412 if (typentry->btree_opf != InvalidOid)
413 gt_opr = get_opfamily_member(typentry->btree_opf,
414 typentry->btree_opintype,
415 typentry->btree_opintype,
416 BTGreaterStrategyNumber);
418 /* As above, make sure array_cmp or record_cmp will succeed */
419 if (gt_opr == ARRAY_GT_OP &&
420 !array_element_has_compare(typentry))
422 else if (gt_opr == RECORD_GT_OP &&
423 !record_fields_have_compare(typentry))
426 typentry->gt_opr = gt_opr;
427 typentry->flags |= TCFLAGS_CHECKED_GT_OPR;
429 if ((flags & (TYPECACHE_CMP_PROC | TYPECACHE_CMP_PROC_FINFO)) &&
430 !(typentry->flags & TCFLAGS_CHECKED_CMP_PROC))
432 Oid cmp_proc = InvalidOid;
434 if (typentry->btree_opf != InvalidOid)
435 cmp_proc = get_opfamily_proc(typentry->btree_opf,
436 typentry->btree_opintype,
437 typentry->btree_opintype,
440 /* As above, make sure array_cmp or record_cmp will succeed */
441 if (cmp_proc == F_BTARRAYCMP &&
442 !array_element_has_compare(typentry))
443 cmp_proc = InvalidOid;
444 else if (cmp_proc == F_BTRECORDCMP &&
445 !record_fields_have_compare(typentry))
446 cmp_proc = InvalidOid;
448 /* Force update of cmp_proc_finfo only if we're changing state */
449 if (typentry->cmp_proc != cmp_proc)
450 typentry->cmp_proc_finfo.fn_oid = InvalidOid;
452 typentry->cmp_proc = cmp_proc;
453 typentry->flags |= TCFLAGS_CHECKED_CMP_PROC;
455 if ((flags & (TYPECACHE_HASH_PROC | TYPECACHE_HASH_PROC_FINFO)) &&
456 !(typentry->flags & TCFLAGS_CHECKED_HASH_PROC))
458 Oid hash_proc = InvalidOid;
461 * We insist that the eq_opr, if one has been determined, match the
462 * hash opclass; else report there is no hash function.
464 if (typentry->hash_opf != InvalidOid &&
465 (!OidIsValid(typentry->eq_opr) ||
466 typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
467 typentry->hash_opintype,
468 typentry->hash_opintype,
469 HTEqualStrategyNumber)))
470 hash_proc = get_opfamily_proc(typentry->hash_opf,
471 typentry->hash_opintype,
472 typentry->hash_opintype,
476 * As above, make sure hash_array will succeed. We don't currently
477 * support hashing for composite types, but when we do, we'll need
478 * more logic here to check that case too.
480 if (hash_proc == F_HASH_ARRAY &&
481 !array_element_has_hashing(typentry))
482 hash_proc = InvalidOid;
484 /* Force update of hash_proc_finfo only if we're changing state */
485 if (typentry->hash_proc != hash_proc)
486 typentry->hash_proc_finfo.fn_oid = InvalidOid;
488 typentry->hash_proc = hash_proc;
489 typentry->flags |= TCFLAGS_CHECKED_HASH_PROC;
493 * Set up fmgr lookup info as requested
495 * Note: we tell fmgr the finfo structures live in CacheMemoryContext,
496 * which is not quite right (they're really in the hash table's private
497 * memory context) but this will do for our purposes.
499 * Note: the code above avoids invalidating the finfo structs unless the
500 * referenced operator/function OID actually changes. This is to prevent
501 * unnecessary leakage of any subsidiary data attached to an finfo, since
502 * that would cause session-lifespan memory leaks.
504 if ((flags & TYPECACHE_EQ_OPR_FINFO) &&
505 typentry->eq_opr_finfo.fn_oid == InvalidOid &&
506 typentry->eq_opr != InvalidOid)
510 eq_opr_func = get_opcode(typentry->eq_opr);
511 if (eq_opr_func != InvalidOid)
512 fmgr_info_cxt(eq_opr_func, &typentry->eq_opr_finfo,
515 if ((flags & TYPECACHE_CMP_PROC_FINFO) &&
516 typentry->cmp_proc_finfo.fn_oid == InvalidOid &&
517 typentry->cmp_proc != InvalidOid)
519 fmgr_info_cxt(typentry->cmp_proc, &typentry->cmp_proc_finfo,
522 if ((flags & TYPECACHE_HASH_PROC_FINFO) &&
523 typentry->hash_proc_finfo.fn_oid == InvalidOid &&
524 typentry->hash_proc != InvalidOid)
526 fmgr_info_cxt(typentry->hash_proc, &typentry->hash_proc_finfo,
531 * If it's a composite type (row type), get tupdesc if requested
533 if ((flags & TYPECACHE_TUPDESC) &&
534 typentry->tupDesc == NULL &&
535 typentry->typtype == TYPTYPE_COMPOSITE)
537 load_typcache_tupdesc(typentry);
541 * If requested, get information about a range type
543 if ((flags & TYPECACHE_RANGE_INFO) &&
544 typentry->rngelemtype == NULL &&
545 typentry->typtype == TYPTYPE_RANGE)
547 load_rangetype_info(typentry);
551 * If requested, get information about a domain type
553 if ((flags & TYPECACHE_DOMAIN_INFO) &&
554 (typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
555 typentry->typtype == TYPTYPE_DOMAIN)
557 load_domaintype_info(typentry);
564 * load_typcache_tupdesc --- helper routine to set up composite type's tupDesc
567 load_typcache_tupdesc(TypeCacheEntry *typentry)
571 if (!OidIsValid(typentry->typrelid)) /* should not happen */
572 elog(ERROR, "invalid typrelid for composite type %u",
574 rel = relation_open(typentry->typrelid, AccessShareLock);
575 Assert(rel->rd_rel->reltype == typentry->type_id);
578 * Link to the tupdesc and increment its refcount (we assert it's a
579 * refcounted descriptor). We don't use IncrTupleDescRefCount() for this,
580 * because the reference mustn't be entered in the current resource owner;
581 * it can outlive the current query.
583 typentry->tupDesc = RelationGetDescr(rel);
585 Assert(typentry->tupDesc->tdrefcount > 0);
586 typentry->tupDesc->tdrefcount++;
588 relation_close(rel, AccessShareLock);
592 * load_rangetype_info --- helper routine to set up range type information
595 load_rangetype_info(TypeCacheEntry *typentry)
597 Form_pg_range pg_range;
607 /* get information from pg_range */
608 tup = SearchSysCache1(RANGETYPE, ObjectIdGetDatum(typentry->type_id));
609 /* should not fail, since we already checked typtype ... */
610 if (!HeapTupleIsValid(tup))
611 elog(ERROR, "cache lookup failed for range type %u",
613 pg_range = (Form_pg_range) GETSTRUCT(tup);
615 subtypeOid = pg_range->rngsubtype;
616 typentry->rng_collation = pg_range->rngcollation;
617 opclassOid = pg_range->rngsubopc;
618 canonicalOid = pg_range->rngcanonical;
619 subdiffOid = pg_range->rngsubdiff;
621 ReleaseSysCache(tup);
623 /* get opclass properties and look up the comparison function */
624 opfamilyOid = get_opclass_family(opclassOid);
625 opcintype = get_opclass_input_type(opclassOid);
627 cmpFnOid = get_opfamily_proc(opfamilyOid, opcintype, opcintype,
629 if (!RegProcedureIsValid(cmpFnOid))
630 elog(ERROR, "missing support function %d(%u,%u) in opfamily %u",
631 BTORDER_PROC, opcintype, opcintype, opfamilyOid);
633 /* set up cached fmgrinfo structs */
634 fmgr_info_cxt(cmpFnOid, &typentry->rng_cmp_proc_finfo,
636 if (OidIsValid(canonicalOid))
637 fmgr_info_cxt(canonicalOid, &typentry->rng_canonical_finfo,
639 if (OidIsValid(subdiffOid))
640 fmgr_info_cxt(subdiffOid, &typentry->rng_subdiff_finfo,
643 /* Lastly, set up link to the element type --- this marks data valid */
644 typentry->rngelemtype = lookup_type_cache(subtypeOid, 0);
649 * load_domaintype_info --- helper routine to set up domain constraint info
651 * Note: we assume we're called in a relatively short-lived context, so it's
652 * okay to leak data into the current context while scanning pg_constraint.
653 * We build the new DomainConstraintCache data in a context underneath
654 * CurrentMemoryContext, and reparent it under CacheMemoryContext when
658 load_domaintype_info(TypeCacheEntry *typentry)
660 Oid typeOid = typentry->type_id;
661 DomainConstraintCache *dcc;
662 bool notNull = false;
663 DomainConstraintState **ccons;
666 MemoryContext oldcxt;
669 * If we're here, any existing constraint info is stale, so release it.
670 * For safety, be sure to null the link before trying to delete the data.
672 if (typentry->domainData)
674 dcc = typentry->domainData;
675 typentry->domainData = NULL;
676 decr_dcc_refcount(dcc);
680 * We try to optimize the common case of no domain constraints, so don't
681 * create the dcc object and context until we find a constraint. Likewise
682 * for the temp sorting array.
689 * Scan pg_constraint for relevant constraints. We want to find
690 * constraints for not just this domain, but any ancestor domains, so the
691 * outer loop crawls up the domain stack.
693 conRel = heap_open(ConstraintRelationId, AccessShareLock);
704 tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typeOid));
705 if (!HeapTupleIsValid(tup))
706 elog(ERROR, "cache lookup failed for type %u", typeOid);
707 typTup = (Form_pg_type) GETSTRUCT(tup);
709 if (typTup->typtype != TYPTYPE_DOMAIN)
711 /* Not a domain, so done */
712 ReleaseSysCache(tup);
716 /* Test for NOT NULL Constraint */
717 if (typTup->typnotnull)
720 /* Look for CHECK Constraints on this domain */
722 Anum_pg_constraint_contypid,
723 BTEqualStrategyNumber, F_OIDEQ,
724 ObjectIdGetDatum(typeOid));
726 scan = systable_beginscan(conRel, ConstraintTypidIndexId, true,
729 while (HeapTupleIsValid(conTup = systable_getnext(scan)))
731 Form_pg_constraint c = (Form_pg_constraint) GETSTRUCT(conTup);
736 DomainConstraintState *r;
738 /* Ignore non-CHECK constraints (presently, shouldn't be any) */
739 if (c->contype != CONSTRAINT_CHECK)
742 /* Not expecting conbin to be NULL, but we'll test for it anyway */
743 val = fastgetattr(conTup, Anum_pg_constraint_conbin,
744 conRel->rd_att, &isNull);
746 elog(ERROR, "domain \"%s\" constraint \"%s\" has NULL conbin",
747 NameStr(typTup->typname), NameStr(c->conname));
749 /* Convert conbin to C string in caller context */
750 constring = TextDatumGetCString(val);
752 /* Create the DomainConstraintCache object and context if needed */
757 cxt = AllocSetContextCreate(CurrentMemoryContext,
758 "Domain constraints",
759 ALLOCSET_SMALL_SIZES);
760 dcc = (DomainConstraintCache *)
761 MemoryContextAlloc(cxt, sizeof(DomainConstraintCache));
762 dcc->constraints = NIL;
763 dcc->dccContext = cxt;
764 dcc->dccRefCount = 0;
767 /* Create node trees in DomainConstraintCache's context */
768 oldcxt = MemoryContextSwitchTo(dcc->dccContext);
770 check_expr = (Expr *) stringToNode(constring);
772 /* ExecInitExpr will assume we've planned the expression */
773 check_expr = expression_planner(check_expr);
775 r = makeNode(DomainConstraintState);
776 r->constrainttype = DOM_CONSTRAINT_CHECK;
777 r->name = pstrdup(NameStr(c->conname));
778 /* Must cast here because we're not storing an expr state node */
779 r->check_expr = (ExprState *) check_expr;
781 MemoryContextSwitchTo(oldcxt);
783 /* Accumulate constraints in an array, for sorting below */
787 ccons = (DomainConstraintState **)
788 palloc(cconslen * sizeof(DomainConstraintState *));
790 else if (nccons >= cconslen)
793 ccons = (DomainConstraintState **)
794 repalloc(ccons, cconslen * sizeof(DomainConstraintState *));
799 systable_endscan(scan);
804 * Sort the items for this domain, so that CHECKs are applied in a
805 * deterministic order.
808 qsort(ccons, nccons, sizeof(DomainConstraintState *), dcs_cmp);
811 * Now attach them to the overall list. Use lcons() here because
812 * constraints of parent domains should be applied earlier.
814 oldcxt = MemoryContextSwitchTo(dcc->dccContext);
816 dcc->constraints = lcons(ccons[--nccons], dcc->constraints);
817 MemoryContextSwitchTo(oldcxt);
820 /* loop to next domain in stack */
821 typeOid = typTup->typbasetype;
822 ReleaseSysCache(tup);
825 heap_close(conRel, AccessShareLock);
828 * Only need to add one NOT NULL check regardless of how many domains in
829 * the stack request it.
833 DomainConstraintState *r;
835 /* Create the DomainConstraintCache object and context if needed */
840 cxt = AllocSetContextCreate(CurrentMemoryContext,
841 "Domain constraints",
842 ALLOCSET_SMALL_SIZES);
843 dcc = (DomainConstraintCache *)
844 MemoryContextAlloc(cxt, sizeof(DomainConstraintCache));
845 dcc->constraints = NIL;
846 dcc->dccContext = cxt;
847 dcc->dccRefCount = 0;
850 /* Create node trees in DomainConstraintCache's context */
851 oldcxt = MemoryContextSwitchTo(dcc->dccContext);
853 r = makeNode(DomainConstraintState);
855 r->constrainttype = DOM_CONSTRAINT_NOTNULL;
856 r->name = pstrdup("NOT NULL");
857 r->check_expr = NULL;
859 /* lcons to apply the nullness check FIRST */
860 dcc->constraints = lcons(r, dcc->constraints);
862 MemoryContextSwitchTo(oldcxt);
866 * If we made a constraint object, move it into CacheMemoryContext and
867 * attach it to the typcache entry.
871 MemoryContextSetParent(dcc->dccContext, CacheMemoryContext);
872 typentry->domainData = dcc;
873 dcc->dccRefCount++; /* count the typcache's reference */
876 /* Either way, the typcache entry's domain data is now valid. */
877 typentry->flags |= TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS;
881 * qsort comparator to sort DomainConstraintState pointers by name
884 dcs_cmp(const void *a, const void *b)
886 const DomainConstraintState *const * ca = (const DomainConstraintState *const *) a;
887 const DomainConstraintState *const * cb = (const DomainConstraintState *const *) b;
889 return strcmp((*ca)->name, (*cb)->name);
893 * decr_dcc_refcount --- decrement a DomainConstraintCache's refcount,
894 * and free it if no references remain
897 decr_dcc_refcount(DomainConstraintCache *dcc)
899 Assert(dcc->dccRefCount > 0);
900 if (--(dcc->dccRefCount) <= 0)
901 MemoryContextDelete(dcc->dccContext);
905 * Context reset/delete callback for a DomainConstraintRef
908 dccref_deletion_callback(void *arg)
910 DomainConstraintRef *ref = (DomainConstraintRef *) arg;
911 DomainConstraintCache *dcc = ref->dcc;
913 /* Paranoia --- be sure link is nulled before trying to release */
916 ref->constraints = NIL;
918 decr_dcc_refcount(dcc);
923 * prep_domain_constraints --- prepare domain constraints for execution
925 * The expression trees stored in the DomainConstraintCache's list are
926 * converted to executable expression state trees stored in execctx.
929 prep_domain_constraints(List *constraints, MemoryContext execctx)
932 MemoryContext oldcxt;
935 oldcxt = MemoryContextSwitchTo(execctx);
937 foreach(lc, constraints)
939 DomainConstraintState *r = (DomainConstraintState *) lfirst(lc);
940 DomainConstraintState *newr;
942 newr = makeNode(DomainConstraintState);
943 newr->constrainttype = r->constrainttype;
944 newr->name = r->name;
945 /* Must cast here because cache items contain expr plan trees */
946 newr->check_expr = ExecInitExpr((Expr *) r->check_expr, NULL);
948 result = lappend(result, newr);
951 MemoryContextSwitchTo(oldcxt);
957 * InitDomainConstraintRef --- initialize a DomainConstraintRef struct
959 * Caller must tell us the MemoryContext in which the DomainConstraintRef
960 * lives. The ref will be cleaned up when that context is reset/deleted.
963 InitDomainConstraintRef(Oid type_id, DomainConstraintRef *ref,
964 MemoryContext refctx)
966 /* Look up the typcache entry --- we assume it survives indefinitely */
967 ref->tcache = lookup_type_cache(type_id, TYPECACHE_DOMAIN_INFO);
968 /* For safety, establish the callback before acquiring a refcount */
969 ref->refctx = refctx;
971 ref->callback.func = dccref_deletion_callback;
972 ref->callback.arg = (void *) ref;
973 MemoryContextRegisterResetCallback(refctx, &ref->callback);
974 /* Acquire refcount if there are constraints, and set up exported list */
975 if (ref->tcache->domainData)
977 ref->dcc = ref->tcache->domainData;
978 ref->dcc->dccRefCount++;
979 ref->constraints = prep_domain_constraints(ref->dcc->constraints,
983 ref->constraints = NIL;
987 * UpdateDomainConstraintRef --- recheck validity of domain constraint info
989 * If the domain's constraint set changed, ref->constraints is updated to
990 * point at a new list of cached constraints.
992 * In the normal case where nothing happened to the domain, this is cheap
993 * enough that it's reasonable (and expected) to check before *each* use
994 * of the constraint info.
997 UpdateDomainConstraintRef(DomainConstraintRef *ref)
999 TypeCacheEntry *typentry = ref->tcache;
1001 /* Make sure typcache entry's data is up to date */
1002 if ((typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
1003 typentry->typtype == TYPTYPE_DOMAIN)
1004 load_domaintype_info(typentry);
1006 /* Transfer to ref object if there's new info, adjusting refcounts */
1007 if (ref->dcc != typentry->domainData)
1009 /* Paranoia --- be sure link is nulled before trying to release */
1010 DomainConstraintCache *dcc = ref->dcc;
1015 * Note: we just leak the previous list of executable domain
1016 * constraints. Alternatively, we could keep those in a child
1017 * context of ref->refctx and free that context at this point.
1018 * However, in practice this code path will be taken so seldom
1019 * that the extra bookkeeping for a child context doesn't seem
1020 * worthwhile; we'll just allow a leak for the lifespan of refctx.
1022 ref->constraints = NIL;
1024 decr_dcc_refcount(dcc);
1026 dcc = typentry->domainData;
1031 ref->constraints = prep_domain_constraints(dcc->constraints,
1038 * DomainHasConstraints --- utility routine to check if a domain has constraints
1040 * This is defined to return false, not fail, if type is not a domain.
1043 DomainHasConstraints(Oid type_id)
1045 TypeCacheEntry *typentry;
1048 * Note: a side effect is to cause the typcache's domain data to become
1049 * valid. This is fine since we'll likely need it soon if there is any.
1051 typentry = lookup_type_cache(type_id, TYPECACHE_DOMAIN_INFO);
1053 return (typentry->domainData != NULL);
1058 * array_element_has_equality and friends are helper routines to check
1059 * whether we should believe that array_eq and related functions will work
1060 * on the given array type or composite type.
1062 * The logic above may call these repeatedly on the same type entry, so we
1063 * make use of the typentry->flags field to cache the results once known.
1064 * Also, we assume that we'll probably want all these facts about the type
1065 * if we want any, so we cache them all using only one lookup of the
1066 * component datatype(s).
1070 array_element_has_equality(TypeCacheEntry *typentry)
1072 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1073 cache_array_element_properties(typentry);
1074 return (typentry->flags & TCFLAGS_HAVE_ELEM_EQUALITY) != 0;
1078 array_element_has_compare(TypeCacheEntry *typentry)
1080 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1081 cache_array_element_properties(typentry);
1082 return (typentry->flags & TCFLAGS_HAVE_ELEM_COMPARE) != 0;
1086 array_element_has_hashing(TypeCacheEntry *typentry)
1088 if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1089 cache_array_element_properties(typentry);
1090 return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1094 cache_array_element_properties(TypeCacheEntry *typentry)
1096 Oid elem_type = get_base_element_type(typentry->type_id);
1098 if (OidIsValid(elem_type))
1100 TypeCacheEntry *elementry;
1102 elementry = lookup_type_cache(elem_type,
1104 TYPECACHE_CMP_PROC |
1105 TYPECACHE_HASH_PROC);
1106 if (OidIsValid(elementry->eq_opr))
1107 typentry->flags |= TCFLAGS_HAVE_ELEM_EQUALITY;
1108 if (OidIsValid(elementry->cmp_proc))
1109 typentry->flags |= TCFLAGS_HAVE_ELEM_COMPARE;
1110 if (OidIsValid(elementry->hash_proc))
1111 typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1113 typentry->flags |= TCFLAGS_CHECKED_ELEM_PROPERTIES;
1117 record_fields_have_equality(TypeCacheEntry *typentry)
1119 if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1120 cache_record_field_properties(typentry);
1121 return (typentry->flags & TCFLAGS_HAVE_FIELD_EQUALITY) != 0;
1125 record_fields_have_compare(TypeCacheEntry *typentry)
1127 if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1128 cache_record_field_properties(typentry);
1129 return (typentry->flags & TCFLAGS_HAVE_FIELD_COMPARE) != 0;
1133 cache_record_field_properties(TypeCacheEntry *typentry)
1136 * For type RECORD, we can't really tell what will work, since we don't
1137 * have access here to the specific anonymous type. Just assume that
1138 * everything will (we may get a failure at runtime ...)
1140 if (typentry->type_id == RECORDOID)
1141 typentry->flags |= (TCFLAGS_HAVE_FIELD_EQUALITY |
1142 TCFLAGS_HAVE_FIELD_COMPARE);
1143 else if (typentry->typtype == TYPTYPE_COMPOSITE)
1149 /* Fetch composite type's tupdesc if we don't have it already */
1150 if (typentry->tupDesc == NULL)
1151 load_typcache_tupdesc(typentry);
1152 tupdesc = typentry->tupDesc;
1154 /* Must bump the refcount while we do additional catalog lookups */
1155 IncrTupleDescRefCount(tupdesc);
1157 /* Have each property if all non-dropped fields have the property */
1158 newflags = (TCFLAGS_HAVE_FIELD_EQUALITY |
1159 TCFLAGS_HAVE_FIELD_COMPARE);
1160 for (i = 0; i < tupdesc->natts; i++)
1162 TypeCacheEntry *fieldentry;
1164 if (tupdesc->attrs[i]->attisdropped)
1167 fieldentry = lookup_type_cache(tupdesc->attrs[i]->atttypid,
1169 TYPECACHE_CMP_PROC);
1170 if (!OidIsValid(fieldentry->eq_opr))
1171 newflags &= ~TCFLAGS_HAVE_FIELD_EQUALITY;
1172 if (!OidIsValid(fieldentry->cmp_proc))
1173 newflags &= ~TCFLAGS_HAVE_FIELD_COMPARE;
1175 /* We can drop out of the loop once we disprove all bits */
1179 typentry->flags |= newflags;
1181 DecrTupleDescRefCount(tupdesc);
1183 typentry->flags |= TCFLAGS_CHECKED_FIELD_PROPERTIES;
1188 * lookup_rowtype_tupdesc_internal --- internal routine to lookup a rowtype
1190 * Same API as lookup_rowtype_tupdesc_noerror, but the returned tupdesc
1191 * hasn't had its refcount bumped.
1194 lookup_rowtype_tupdesc_internal(Oid type_id, int32 typmod, bool noError)
1196 if (type_id != RECORDOID)
1199 * It's a named composite type, so use the regular typcache.
1201 TypeCacheEntry *typentry;
1203 typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
1204 if (typentry->tupDesc == NULL && !noError)
1206 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1207 errmsg("type %s is not composite",
1208 format_type_be(type_id))));
1209 return typentry->tupDesc;
1214 * It's a transient record type, so look in our record-type table.
1216 if (typmod < 0 || typmod >= NextRecordTypmod)
1220 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1221 errmsg("record type has not been registered")));
1224 return RecordCacheArray[typmod];
1229 * lookup_rowtype_tupdesc
1231 * Given a typeid/typmod that should describe a known composite type,
1232 * return the tuple descriptor for the type. Will ereport on failure.
1234 * Note: on success, we increment the refcount of the returned TupleDesc,
1235 * and log the reference in CurrentResourceOwner. Caller should call
1236 * ReleaseTupleDesc or DecrTupleDescRefCount when done using the tupdesc.
1239 lookup_rowtype_tupdesc(Oid type_id, int32 typmod)
1243 tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
1244 IncrTupleDescRefCount(tupDesc);
1249 * lookup_rowtype_tupdesc_noerror
1251 * As above, but if the type is not a known composite type and noError
1252 * is true, returns NULL instead of ereport'ing. (Note that if a bogus
1253 * type_id is passed, you'll get an ereport anyway.)
1256 lookup_rowtype_tupdesc_noerror(Oid type_id, int32 typmod, bool noError)
1260 tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
1261 if (tupDesc != NULL)
1262 IncrTupleDescRefCount(tupDesc);
1267 * lookup_rowtype_tupdesc_copy
1269 * Like lookup_rowtype_tupdesc(), but the returned TupleDesc has been
1270 * copied into the CurrentMemoryContext and is not reference-counted.
1273 lookup_rowtype_tupdesc_copy(Oid type_id, int32 typmod)
1277 tmp = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
1278 return CreateTupleDescCopyConstr(tmp);
1283 * assign_record_type_typmod
1285 * Given a tuple descriptor for a RECORD type, find or create a cache entry
1286 * for the type, and set the tupdesc's tdtypmod field to a value that will
1287 * identify this cache entry to lookup_rowtype_tupdesc.
1290 assign_record_type_typmod(TupleDesc tupDesc)
1292 RecordCacheEntry *recentry;
1294 Oid hashkey[REC_HASH_KEYS];
1299 MemoryContext oldcxt;
1301 Assert(tupDesc->tdtypeid == RECORDOID);
1303 if (RecordCacheHash == NULL)
1305 /* First time through: initialize the hash table */
1308 MemSet(&ctl, 0, sizeof(ctl));
1309 ctl.keysize = REC_HASH_KEYS * sizeof(Oid);
1310 ctl.entrysize = sizeof(RecordCacheEntry);
1311 RecordCacheHash = hash_create("Record information cache", 64,
1312 &ctl, HASH_ELEM | HASH_BLOBS);
1314 /* Also make sure CacheMemoryContext exists */
1315 if (!CacheMemoryContext)
1316 CreateCacheMemoryContext();
1319 /* Find or create a hashtable entry for this hash class */
1320 MemSet(hashkey, 0, sizeof(hashkey));
1321 for (i = 0; i < tupDesc->natts; i++)
1323 if (i >= REC_HASH_KEYS)
1325 hashkey[i] = tupDesc->attrs[i]->atttypid;
1327 recentry = (RecordCacheEntry *) hash_search(RecordCacheHash,
1329 HASH_ENTER, &found);
1332 /* New entry ... hash_search initialized only the hash key */
1333 recentry->tupdescs = NIL;
1336 /* Look for existing record cache entry */
1337 foreach(l, recentry->tupdescs)
1339 entDesc = (TupleDesc) lfirst(l);
1340 if (equalTupleDescs(tupDesc, entDesc))
1342 tupDesc->tdtypmod = entDesc->tdtypmod;
1347 /* Not present, so need to manufacture an entry */
1348 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1350 if (RecordCacheArray == NULL)
1352 RecordCacheArray = (TupleDesc *) palloc(64 * sizeof(TupleDesc));
1353 RecordCacheArrayLen = 64;
1355 else if (NextRecordTypmod >= RecordCacheArrayLen)
1357 int32 newlen = RecordCacheArrayLen * 2;
1359 RecordCacheArray = (TupleDesc *) repalloc(RecordCacheArray,
1360 newlen * sizeof(TupleDesc));
1361 RecordCacheArrayLen = newlen;
1364 /* if fail in subrs, no damage except possibly some wasted memory... */
1365 entDesc = CreateTupleDescCopy(tupDesc);
1366 recentry->tupdescs = lcons(entDesc, recentry->tupdescs);
1367 /* mark it as a reference-counted tupdesc */
1368 entDesc->tdrefcount = 1;
1369 /* now it's safe to advance NextRecordTypmod */
1370 newtypmod = NextRecordTypmod++;
1371 entDesc->tdtypmod = newtypmod;
1372 RecordCacheArray[newtypmod] = entDesc;
1374 /* report to caller as well */
1375 tupDesc->tdtypmod = newtypmod;
1377 MemoryContextSwitchTo(oldcxt);
1381 * TypeCacheRelCallback
1382 * Relcache inval callback function
1384 * Delete the cached tuple descriptor (if any) for the given rel's composite
1385 * type, or for all composite types if relid == InvalidOid. Also reset
1386 * whatever info we have cached about the composite type's comparability.
1388 * This is called when a relcache invalidation event occurs for the given
1389 * relid. We must scan the whole typcache hash since we don't know the
1390 * type OID corresponding to the relid. We could do a direct search if this
1391 * were a syscache-flush callback on pg_type, but then we would need all
1392 * ALTER-TABLE-like commands that could modify a rowtype to issue syscache
1393 * invals against the rel's pg_type OID. The extra SI signaling could very
1394 * well cost more than we'd save, since in most usages there are not very
1395 * many entries in a backend's typcache. The risk of bugs-of-omission seems
1398 * Another possibility, with only localized impact, is to maintain a second
1399 * hashtable that indexes composite-type typcache entries by their typrelid.
1400 * But it's still not clear it's worth the trouble.
1403 TypeCacheRelCallback(Datum arg, Oid relid)
1405 HASH_SEQ_STATUS status;
1406 TypeCacheEntry *typentry;
1408 /* TypeCacheHash must exist, else this callback wouldn't be registered */
1409 hash_seq_init(&status, TypeCacheHash);
1410 while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
1412 if (typentry->typtype != TYPTYPE_COMPOSITE)
1413 continue; /* skip non-composites */
1415 /* Skip if no match, unless we're zapping all composite types */
1416 if (relid != typentry->typrelid && relid != InvalidOid)
1419 /* Delete tupdesc if we have it */
1420 if (typentry->tupDesc != NULL)
1423 * Release our refcount, and free the tupdesc if none remain.
1424 * (Can't use DecrTupleDescRefCount because this reference is not
1425 * logged in current resource owner.)
1427 Assert(typentry->tupDesc->tdrefcount > 0);
1428 if (--typentry->tupDesc->tdrefcount == 0)
1429 FreeTupleDesc(typentry->tupDesc);
1430 typentry->tupDesc = NULL;
1433 /* Reset equality/comparison/hashing validity information */
1434 typentry->flags = 0;
1439 * TypeCacheOpcCallback
1440 * Syscache inval callback function
1442 * This is called when a syscache invalidation event occurs for any pg_opclass
1443 * row. In principle we could probably just invalidate data dependent on the
1444 * particular opclass, but since updates on pg_opclass are rare in production
1445 * it doesn't seem worth a lot of complication: we just mark all cached data
1448 * Note that we don't bother watching for updates on pg_amop or pg_amproc.
1449 * This should be safe because ALTER OPERATOR FAMILY ADD/DROP OPERATOR/FUNCTION
1450 * is not allowed to be used to add/drop the primary operators and functions
1451 * of an opclass, only cross-type members of a family; and the latter sorts
1452 * of members are not going to get cached here.
1455 TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue)
1457 HASH_SEQ_STATUS status;
1458 TypeCacheEntry *typentry;
1460 /* TypeCacheHash must exist, else this callback wouldn't be registered */
1461 hash_seq_init(&status, TypeCacheHash);
1462 while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
1464 /* Reset equality/comparison/hashing validity information */
1465 typentry->flags = 0;
1470 * TypeCacheConstrCallback
1471 * Syscache inval callback function
1473 * This is called when a syscache invalidation event occurs for any
1474 * pg_constraint or pg_type row. We flush information about domain
1475 * constraints when this happens.
1477 * It's slightly annoying that we can't tell whether the inval event was for a
1478 * domain constraint/type record or not; there's usually more update traffic
1479 * for table constraints/types than domain constraints, so we'll do a lot of
1480 * useless flushes. Still, this is better than the old no-caching-at-all
1481 * approach to domain constraints.
1484 TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue)
1486 TypeCacheEntry *typentry;
1489 * Because this is called very frequently, and typically very few of the
1490 * typcache entries are for domains, we don't use hash_seq_search here.
1491 * Instead we thread all the domain-type entries together so that we can
1492 * visit them cheaply.
1494 for (typentry = firstDomainTypeEntry;
1496 typentry = typentry->nextDomain)
1498 /* Reset domain constraint validity information */
1499 typentry->flags &= ~TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS;
1505 * Check if given OID is part of the subset that's sortable by comparisons
1508 enum_known_sorted(TypeCacheEnumData *enumdata, Oid arg)
1512 if (arg < enumdata->bitmap_base)
1514 offset = arg - enumdata->bitmap_base;
1515 if (offset > (Oid) INT_MAX)
1517 return bms_is_member((int) offset, enumdata->sorted_values);
1522 * compare_values_of_enum
1523 * Compare two members of an enum type.
1524 * Return <0, 0, or >0 according as arg1 <, =, or > arg2.
1526 * Note: currently, the enumData cache is refreshed only if we are asked
1527 * to compare an enum value that is not already in the cache. This is okay
1528 * because there is no support for re-ordering existing values, so comparisons
1529 * of previously cached values will return the right answer even if other
1530 * values have been added since we last loaded the cache.
1532 * Note: the enum logic has a special-case rule about even-numbered versus
1533 * odd-numbered OIDs, but we take no account of that rule here; this
1534 * routine shouldn't even get called when that rule applies.
1537 compare_values_of_enum(TypeCacheEntry *tcache, Oid arg1, Oid arg2)
1539 TypeCacheEnumData *enumdata;
1544 * Equal OIDs are certainly equal --- this case was probably handled by
1545 * our caller, but we may as well check.
1550 /* Load up the cache if first time through */
1551 if (tcache->enumData == NULL)
1552 load_enum_cache_data(tcache);
1553 enumdata = tcache->enumData;
1556 * If both OIDs are known-sorted, we can just compare them directly.
1558 if (enum_known_sorted(enumdata, arg1) &&
1559 enum_known_sorted(enumdata, arg2))
1568 * Slow path: we have to identify their actual sort-order positions.
1570 item1 = find_enumitem(enumdata, arg1);
1571 item2 = find_enumitem(enumdata, arg2);
1573 if (item1 == NULL || item2 == NULL)
1576 * We couldn't find one or both values. That means the enum has
1577 * changed under us, so re-initialize the cache and try again. We
1578 * don't bother retrying the known-sorted case in this path.
1580 load_enum_cache_data(tcache);
1581 enumdata = tcache->enumData;
1583 item1 = find_enumitem(enumdata, arg1);
1584 item2 = find_enumitem(enumdata, arg2);
1587 * If we still can't find the values, complain: we must have corrupt
1591 elog(ERROR, "enum value %u not found in cache for enum %s",
1592 arg1, format_type_be(tcache->type_id));
1594 elog(ERROR, "enum value %u not found in cache for enum %s",
1595 arg2, format_type_be(tcache->type_id));
1598 if (item1->sort_order < item2->sort_order)
1600 else if (item1->sort_order > item2->sort_order)
1607 * Load (or re-load) the enumData member of the typcache entry.
1610 load_enum_cache_data(TypeCacheEntry *tcache)
1612 TypeCacheEnumData *enumdata;
1614 SysScanDesc enum_scan;
1615 HeapTuple enum_tuple;
1622 MemoryContext oldcxt;
1626 /* Check that this is actually an enum */
1627 if (tcache->typtype != TYPTYPE_ENUM)
1629 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1630 errmsg("%s is not an enum",
1631 format_type_be(tcache->type_id))));
1634 * Read all the information for members of the enum type. We collect the
1635 * info in working memory in the caller's context, and then transfer it to
1636 * permanent memory in CacheMemoryContext. This minimizes the risk of
1637 * leaking memory from CacheMemoryContext in the event of an error partway
1641 items = (EnumItem *) palloc(sizeof(EnumItem) * maxitems);
1644 /* Scan pg_enum for the members of the target enum type. */
1646 Anum_pg_enum_enumtypid,
1647 BTEqualStrategyNumber, F_OIDEQ,
1648 ObjectIdGetDatum(tcache->type_id));
1650 enum_rel = heap_open(EnumRelationId, AccessShareLock);
1651 enum_scan = systable_beginscan(enum_rel,
1652 EnumTypIdLabelIndexId,
1656 while (HeapTupleIsValid(enum_tuple = systable_getnext(enum_scan)))
1658 Form_pg_enum en = (Form_pg_enum) GETSTRUCT(enum_tuple);
1660 if (numitems >= maxitems)
1663 items = (EnumItem *) repalloc(items, sizeof(EnumItem) * maxitems);
1665 items[numitems].enum_oid = HeapTupleGetOid(enum_tuple);
1666 items[numitems].sort_order = en->enumsortorder;
1670 systable_endscan(enum_scan);
1671 heap_close(enum_rel, AccessShareLock);
1673 /* Sort the items into OID order */
1674 qsort(items, numitems, sizeof(EnumItem), enum_oid_cmp);
1677 * Here, we create a bitmap listing a subset of the enum's OIDs that are
1678 * known to be in order and can thus be compared with just OID comparison.
1680 * The point of this is that the enum's initial OIDs were certainly in
1681 * order, so there is some subset that can be compared via OID comparison;
1682 * and we'd rather not do binary searches unnecessarily.
1684 * This is somewhat heuristic, and might identify a subset of OIDs that
1685 * isn't exactly what the type started with. That's okay as long as the
1686 * subset is correctly sorted.
1688 bitmap_base = InvalidOid;
1690 bm_size = 1; /* only save sets of at least 2 OIDs */
1692 for (start_pos = 0; start_pos < numitems - 1; start_pos++)
1695 * Identify longest sorted subsequence starting at start_pos
1697 Bitmapset *this_bitmap = bms_make_singleton(0);
1698 int this_bm_size = 1;
1699 Oid start_oid = items[start_pos].enum_oid;
1700 float4 prev_order = items[start_pos].sort_order;
1703 for (i = start_pos + 1; i < numitems; i++)
1707 offset = items[i].enum_oid - start_oid;
1708 /* quit if bitmap would be too large; cutoff is arbitrary */
1711 /* include the item if it's in-order */
1712 if (items[i].sort_order > prev_order)
1714 prev_order = items[i].sort_order;
1715 this_bitmap = bms_add_member(this_bitmap, (int) offset);
1720 /* Remember it if larger than previous best */
1721 if (this_bm_size > bm_size)
1724 bitmap_base = start_oid;
1725 bitmap = this_bitmap;
1726 bm_size = this_bm_size;
1729 bms_free(this_bitmap);
1732 * Done if it's not possible to find a longer sequence in the rest of
1733 * the list. In typical cases this will happen on the first
1734 * iteration, which is why we create the bitmaps on the fly instead of
1735 * doing a second pass over the list.
1737 if (bm_size >= (numitems - start_pos - 1))
1741 /* OK, copy the data into CacheMemoryContext */
1742 oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1743 enumdata = (TypeCacheEnumData *)
1744 palloc(offsetof(TypeCacheEnumData, enum_values) +
1745 numitems * sizeof(EnumItem));
1746 enumdata->bitmap_base = bitmap_base;
1747 enumdata->sorted_values = bms_copy(bitmap);
1748 enumdata->num_values = numitems;
1749 memcpy(enumdata->enum_values, items, numitems * sizeof(EnumItem));
1750 MemoryContextSwitchTo(oldcxt);
1755 /* And link the finished cache struct into the typcache */
1756 if (tcache->enumData != NULL)
1757 pfree(tcache->enumData);
1758 tcache->enumData = enumdata;
1762 * Locate the EnumItem with the given OID, if present
1765 find_enumitem(TypeCacheEnumData *enumdata, Oid arg)
1769 /* On some versions of Solaris, bsearch of zero items dumps core */
1770 if (enumdata->num_values <= 0)
1773 srch.enum_oid = arg;
1774 return bsearch(&srch, enumdata->enum_values, enumdata->num_values,
1775 sizeof(EnumItem), enum_oid_cmp);
1779 * qsort comparison function for OID-ordered EnumItems
1782 enum_oid_cmp(const void *left, const void *right)
1784 const EnumItem *l = (const EnumItem *) left;
1785 const EnumItem *r = (const EnumItem *) right;
1787 if (l->enum_oid < r->enum_oid)
1789 else if (l->enum_oid > r->enum_oid)