]> granicus.if.org Git - postgresql/blob - src/backend/utils/cache/typcache.c
Add macros to make AllocSetContextCreate() calls simpler and safer.
[postgresql] / src / backend / utils / cache / typcache.c
1 /*-------------------------------------------------------------------------
2  *
3  * typcache.c
4  *        POSTGRES type cache code
5  *
6  * The type cache exists to speed lookup of certain information about data
7  * types that is not directly available from a type's pg_type row.  For
8  * example, we use a type's default btree opclass, or the default hash
9  * opclass if no btree opclass exists, to determine which operators should
10  * be used for grouping and sorting the type (GROUP BY, ORDER BY ASC/DESC).
11  *
12  * Several seemingly-odd choices have been made to support use of the type
13  * cache by generic array and record handling routines, such as array_eq(),
14  * record_cmp(), and hash_array().  Because those routines are used as index
15  * support operations, they cannot leak memory.  To allow them to execute
16  * efficiently, all information that they would like to re-use across calls
17  * is kept in the type cache.
18  *
19  * Once created, a type cache entry lives as long as the backend does, so
20  * there is no need for a call to release a cache entry.  If the type is
21  * dropped, the cache entry simply becomes wasted storage.  This is not
22  * expected to happen often, and assuming that typcache entries are good
23  * permanently allows caching pointers to them in long-lived places.
24  *
25  * We have some provisions for updating cache entries if the stored data
26  * becomes obsolete.  Information dependent on opclasses is cleared if we
27  * detect updates to pg_opclass.  We also support clearing the tuple
28  * descriptor and operator/function parts of a rowtype's cache entry,
29  * since those may need to change as a consequence of ALTER TABLE.
30  * Domain constraint changes are also tracked properly.
31  *
32  *
33  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
34  * Portions Copyright (c) 1994, Regents of the University of California
35  *
36  * IDENTIFICATION
37  *        src/backend/utils/cache/typcache.c
38  *
39  *-------------------------------------------------------------------------
40  */
41 #include "postgres.h"
42
43 #include <limits.h>
44
45 #include "access/hash.h"
46 #include "access/heapam.h"
47 #include "access/htup_details.h"
48 #include "access/nbtree.h"
49 #include "catalog/indexing.h"
50 #include "catalog/pg_am.h"
51 #include "catalog/pg_constraint.h"
52 #include "catalog/pg_enum.h"
53 #include "catalog/pg_operator.h"
54 #include "catalog/pg_range.h"
55 #include "catalog/pg_type.h"
56 #include "commands/defrem.h"
57 #include "executor/executor.h"
58 #include "optimizer/planner.h"
59 #include "utils/builtins.h"
60 #include "utils/catcache.h"
61 #include "utils/fmgroids.h"
62 #include "utils/inval.h"
63 #include "utils/lsyscache.h"
64 #include "utils/memutils.h"
65 #include "utils/rel.h"
66 #include "utils/snapmgr.h"
67 #include "utils/syscache.h"
68 #include "utils/typcache.h"
69
70
71 /* The main type cache hashtable searched by lookup_type_cache */
72 static HTAB *TypeCacheHash = NULL;
73
74 /* List of type cache entries for domain types */
75 static TypeCacheEntry *firstDomainTypeEntry = NULL;
76
77 /* Private flag bits in the TypeCacheEntry.flags field */
78 #define TCFLAGS_CHECKED_BTREE_OPCLASS           0x0001
79 #define TCFLAGS_CHECKED_HASH_OPCLASS            0x0002
80 #define TCFLAGS_CHECKED_EQ_OPR                          0x0004
81 #define TCFLAGS_CHECKED_LT_OPR                          0x0008
82 #define TCFLAGS_CHECKED_GT_OPR                          0x0010
83 #define TCFLAGS_CHECKED_CMP_PROC                        0x0020
84 #define TCFLAGS_CHECKED_HASH_PROC                       0x0040
85 #define TCFLAGS_CHECKED_ELEM_PROPERTIES         0x0080
86 #define TCFLAGS_HAVE_ELEM_EQUALITY                      0x0100
87 #define TCFLAGS_HAVE_ELEM_COMPARE                       0x0200
88 #define TCFLAGS_HAVE_ELEM_HASHING                       0x0400
89 #define TCFLAGS_CHECKED_FIELD_PROPERTIES        0x0800
90 #define TCFLAGS_HAVE_FIELD_EQUALITY                     0x1000
91 #define TCFLAGS_HAVE_FIELD_COMPARE                      0x2000
92 #define TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS      0x4000
93
94 /*
95  * Data stored about a domain type's constraints.  Note that we do not create
96  * this struct for the common case of a constraint-less domain; we just set
97  * domainData to NULL to indicate that.
98  *
99  * Within a DomainConstraintCache, we abuse the DomainConstraintState node
100  * type a bit: check_expr fields point to expression plan trees, not plan
101  * state trees.  When needed, expression state trees are built by flat-copying
102  * the DomainConstraintState nodes and applying ExecInitExpr to check_expr.
103  * Such a state tree is not part of the DomainConstraintCache, but is
104  * considered to belong to a DomainConstraintRef.
105  */
106 struct DomainConstraintCache
107 {
108         List       *constraints;        /* list of DomainConstraintState nodes */
109         MemoryContext dccContext;       /* memory context holding all associated data */
110         long            dccRefCount;    /* number of references to this struct */
111 };
112
113 /* Private information to support comparisons of enum values */
114 typedef struct
115 {
116         Oid                     enum_oid;               /* OID of one enum value */
117         float4          sort_order;             /* its sort position */
118 } EnumItem;
119
120 typedef struct TypeCacheEnumData
121 {
122         Oid                     bitmap_base;    /* OID corresponding to bit 0 of bitmapset */
123         Bitmapset  *sorted_values;      /* Set of OIDs known to be in order */
124         int                     num_values;             /* total number of values in enum */
125         EnumItem        enum_values[FLEXIBLE_ARRAY_MEMBER];
126 } TypeCacheEnumData;
127
128 /*
129  * We use a separate table for storing the definitions of non-anonymous
130  * record types.  Once defined, a record type will be remembered for the
131  * life of the backend.  Subsequent uses of the "same" record type (where
132  * sameness means equalTupleDescs) will refer to the existing table entry.
133  *
134  * Stored record types are remembered in a linear array of TupleDescs,
135  * which can be indexed quickly with the assigned typmod.  There is also
136  * a hash table to speed searches for matching TupleDescs.  The hash key
137  * uses just the first N columns' type OIDs, and so we may have multiple
138  * entries with the same hash key.
139  */
140 #define REC_HASH_KEYS   16              /* use this many columns in hash key */
141
142 typedef struct RecordCacheEntry
143 {
144         /* the hash lookup key MUST BE FIRST */
145         Oid                     hashkey[REC_HASH_KEYS]; /* column type IDs, zero-filled */
146
147         /* list of TupleDescs for record types with this hashkey */
148         List       *tupdescs;
149 } RecordCacheEntry;
150
151 static HTAB *RecordCacheHash = NULL;
152
153 static TupleDesc *RecordCacheArray = NULL;
154 static int32 RecordCacheArrayLen = 0;   /* allocated length of array */
155 static int32 NextRecordTypmod = 0;              /* number of entries used */
156
157 static void load_typcache_tupdesc(TypeCacheEntry *typentry);
158 static void load_rangetype_info(TypeCacheEntry *typentry);
159 static void load_domaintype_info(TypeCacheEntry *typentry);
160 static int      dcs_cmp(const void *a, const void *b);
161 static void decr_dcc_refcount(DomainConstraintCache *dcc);
162 static void dccref_deletion_callback(void *arg);
163 static List *prep_domain_constraints(List *constraints, MemoryContext execctx);
164 static bool array_element_has_equality(TypeCacheEntry *typentry);
165 static bool array_element_has_compare(TypeCacheEntry *typentry);
166 static bool array_element_has_hashing(TypeCacheEntry *typentry);
167 static void cache_array_element_properties(TypeCacheEntry *typentry);
168 static bool record_fields_have_equality(TypeCacheEntry *typentry);
169 static bool record_fields_have_compare(TypeCacheEntry *typentry);
170 static void cache_record_field_properties(TypeCacheEntry *typentry);
171 static void TypeCacheRelCallback(Datum arg, Oid relid);
172 static void TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue);
173 static void TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue);
174 static void load_enum_cache_data(TypeCacheEntry *tcache);
175 static EnumItem *find_enumitem(TypeCacheEnumData *enumdata, Oid arg);
176 static int      enum_oid_cmp(const void *left, const void *right);
177
178
179 /*
180  * lookup_type_cache
181  *
182  * Fetch the type cache entry for the specified datatype, and make sure that
183  * all the fields requested by bits in 'flags' are valid.
184  *
185  * The result is never NULL --- we will elog() if the passed type OID is
186  * invalid.  Note however that we may fail to find one or more of the
187  * requested opclass-dependent fields; the caller needs to check whether
188  * the fields are InvalidOid or not.
189  */
190 TypeCacheEntry *
191 lookup_type_cache(Oid type_id, int flags)
192 {
193         TypeCacheEntry *typentry;
194         bool            found;
195
196         if (TypeCacheHash == NULL)
197         {
198                 /* First time through: initialize the hash table */
199                 HASHCTL         ctl;
200
201                 MemSet(&ctl, 0, sizeof(ctl));
202                 ctl.keysize = sizeof(Oid);
203                 ctl.entrysize = sizeof(TypeCacheEntry);
204                 TypeCacheHash = hash_create("Type information cache", 64,
205                                                                         &ctl, HASH_ELEM | HASH_BLOBS);
206
207                 /* Also set up callbacks for SI invalidations */
208                 CacheRegisterRelcacheCallback(TypeCacheRelCallback, (Datum) 0);
209                 CacheRegisterSyscacheCallback(CLAOID, TypeCacheOpcCallback, (Datum) 0);
210                 CacheRegisterSyscacheCallback(CONSTROID, TypeCacheConstrCallback, (Datum) 0);
211                 CacheRegisterSyscacheCallback(TYPEOID, TypeCacheConstrCallback, (Datum) 0);
212
213                 /* Also make sure CacheMemoryContext exists */
214                 if (!CacheMemoryContext)
215                         CreateCacheMemoryContext();
216         }
217
218         /* Try to look up an existing entry */
219         typentry = (TypeCacheEntry *) hash_search(TypeCacheHash,
220                                                                                           (void *) &type_id,
221                                                                                           HASH_FIND, NULL);
222         if (typentry == NULL)
223         {
224                 /*
225                  * If we didn't find one, we want to make one.  But first look up the
226                  * pg_type row, just to make sure we don't make a cache entry for an
227                  * invalid type OID.
228                  */
229                 HeapTuple       tp;
230                 Form_pg_type typtup;
231
232                 tp = SearchSysCache1(TYPEOID, ObjectIdGetDatum(type_id));
233                 if (!HeapTupleIsValid(tp))
234                         elog(ERROR, "cache lookup failed for type %u", type_id);
235                 typtup = (Form_pg_type) GETSTRUCT(tp);
236                 if (!typtup->typisdefined)
237                         ereport(ERROR,
238                                         (errcode(ERRCODE_UNDEFINED_OBJECT),
239                                          errmsg("type \"%s\" is only a shell",
240                                                         NameStr(typtup->typname))));
241
242                 /* Now make the typcache entry */
243                 typentry = (TypeCacheEntry *) hash_search(TypeCacheHash,
244                                                                                                   (void *) &type_id,
245                                                                                                   HASH_ENTER, &found);
246                 Assert(!found);                 /* it wasn't there a moment ago */
247
248                 MemSet(typentry, 0, sizeof(TypeCacheEntry));
249                 typentry->type_id = type_id;
250                 typentry->typlen = typtup->typlen;
251                 typentry->typbyval = typtup->typbyval;
252                 typentry->typalign = typtup->typalign;
253                 typentry->typstorage = typtup->typstorage;
254                 typentry->typtype = typtup->typtype;
255                 typentry->typrelid = typtup->typrelid;
256
257                 /* If it's a domain, immediately thread it into the domain cache list */
258                 if (typentry->typtype == TYPTYPE_DOMAIN)
259                 {
260                         typentry->nextDomain = firstDomainTypeEntry;
261                         firstDomainTypeEntry = typentry;
262                 }
263
264                 ReleaseSysCache(tp);
265         }
266
267         /*
268          * Look up opclasses if we haven't already and any dependent info is
269          * requested.
270          */
271         if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_LT_OPR | TYPECACHE_GT_OPR |
272                                   TYPECACHE_CMP_PROC |
273                                   TYPECACHE_EQ_OPR_FINFO | TYPECACHE_CMP_PROC_FINFO |
274                                   TYPECACHE_BTREE_OPFAMILY)) &&
275                 !(typentry->flags & TCFLAGS_CHECKED_BTREE_OPCLASS))
276         {
277                 Oid                     opclass;
278
279                 opclass = GetDefaultOpClass(type_id, BTREE_AM_OID);
280                 if (OidIsValid(opclass))
281                 {
282                         typentry->btree_opf = get_opclass_family(opclass);
283                         typentry->btree_opintype = get_opclass_input_type(opclass);
284                 }
285                 else
286                 {
287                         typentry->btree_opf = typentry->btree_opintype = InvalidOid;
288                 }
289
290                 /*
291                  * Reset information derived from btree opclass.  Note in particular
292                  * that we'll redetermine the eq_opr even if we previously found one;
293                  * this matters in case a btree opclass has been added to a type that
294                  * previously had only a hash opclass.
295                  */
296                 typentry->flags &= ~(TCFLAGS_CHECKED_EQ_OPR |
297                                                          TCFLAGS_CHECKED_LT_OPR |
298                                                          TCFLAGS_CHECKED_GT_OPR |
299                                                          TCFLAGS_CHECKED_CMP_PROC);
300                 typentry->flags |= TCFLAGS_CHECKED_BTREE_OPCLASS;
301         }
302
303         /*
304          * If we need to look up equality operator, and there's no btree opclass,
305          * force lookup of hash opclass.
306          */
307         if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
308                 !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR) &&
309                 typentry->btree_opf == InvalidOid)
310                 flags |= TYPECACHE_HASH_OPFAMILY;
311
312         if ((flags & (TYPECACHE_HASH_PROC | TYPECACHE_HASH_PROC_FINFO |
313                                   TYPECACHE_HASH_OPFAMILY)) &&
314                 !(typentry->flags & TCFLAGS_CHECKED_HASH_OPCLASS))
315         {
316                 Oid                     opclass;
317
318                 opclass = GetDefaultOpClass(type_id, HASH_AM_OID);
319                 if (OidIsValid(opclass))
320                 {
321                         typentry->hash_opf = get_opclass_family(opclass);
322                         typentry->hash_opintype = get_opclass_input_type(opclass);
323                 }
324                 else
325                 {
326                         typentry->hash_opf = typentry->hash_opintype = InvalidOid;
327                 }
328
329                 /*
330                  * Reset information derived from hash opclass.  We do *not* reset the
331                  * eq_opr; if we already found one from the btree opclass, that
332                  * decision is still good.
333                  */
334                 typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC);
335                 typentry->flags |= TCFLAGS_CHECKED_HASH_OPCLASS;
336         }
337
338         /*
339          * Look for requested operators and functions, if we haven't already.
340          */
341         if ((flags & (TYPECACHE_EQ_OPR | TYPECACHE_EQ_OPR_FINFO)) &&
342                 !(typentry->flags & TCFLAGS_CHECKED_EQ_OPR))
343         {
344                 Oid                     eq_opr = InvalidOid;
345
346                 if (typentry->btree_opf != InvalidOid)
347                         eq_opr = get_opfamily_member(typentry->btree_opf,
348                                                                                  typentry->btree_opintype,
349                                                                                  typentry->btree_opintype,
350                                                                                  BTEqualStrategyNumber);
351                 if (eq_opr == InvalidOid &&
352                         typentry->hash_opf != InvalidOid)
353                         eq_opr = get_opfamily_member(typentry->hash_opf,
354                                                                                  typentry->hash_opintype,
355                                                                                  typentry->hash_opintype,
356                                                                                  HTEqualStrategyNumber);
357
358                 /*
359                  * If the proposed equality operator is array_eq or record_eq, check
360                  * to see if the element type or column types support equality. If
361                  * not, array_eq or record_eq would fail at runtime, so we don't want
362                  * to report that the type has equality.
363                  */
364                 if (eq_opr == ARRAY_EQ_OP &&
365                         !array_element_has_equality(typentry))
366                         eq_opr = InvalidOid;
367                 else if (eq_opr == RECORD_EQ_OP &&
368                                  !record_fields_have_equality(typentry))
369                         eq_opr = InvalidOid;
370
371                 /* Force update of eq_opr_finfo only if we're changing state */
372                 if (typentry->eq_opr != eq_opr)
373                         typentry->eq_opr_finfo.fn_oid = InvalidOid;
374
375                 typentry->eq_opr = eq_opr;
376
377                 /*
378                  * Reset info about hash function whenever we pick up new info about
379                  * equality operator.  This is so we can ensure that the hash function
380                  * matches the operator.
381                  */
382                 typentry->flags &= ~(TCFLAGS_CHECKED_HASH_PROC);
383                 typentry->flags |= TCFLAGS_CHECKED_EQ_OPR;
384         }
385         if ((flags & TYPECACHE_LT_OPR) &&
386                 !(typentry->flags & TCFLAGS_CHECKED_LT_OPR))
387         {
388                 Oid                     lt_opr = InvalidOid;
389
390                 if (typentry->btree_opf != InvalidOid)
391                         lt_opr = get_opfamily_member(typentry->btree_opf,
392                                                                                  typentry->btree_opintype,
393                                                                                  typentry->btree_opintype,
394                                                                                  BTLessStrategyNumber);
395
396                 /* As above, make sure array_cmp or record_cmp will succeed */
397                 if (lt_opr == ARRAY_LT_OP &&
398                         !array_element_has_compare(typentry))
399                         lt_opr = InvalidOid;
400                 else if (lt_opr == RECORD_LT_OP &&
401                                  !record_fields_have_compare(typentry))
402                         lt_opr = InvalidOid;
403
404                 typentry->lt_opr = lt_opr;
405                 typentry->flags |= TCFLAGS_CHECKED_LT_OPR;
406         }
407         if ((flags & TYPECACHE_GT_OPR) &&
408                 !(typentry->flags & TCFLAGS_CHECKED_GT_OPR))
409         {
410                 Oid                     gt_opr = InvalidOid;
411
412                 if (typentry->btree_opf != InvalidOid)
413                         gt_opr = get_opfamily_member(typentry->btree_opf,
414                                                                                  typentry->btree_opintype,
415                                                                                  typentry->btree_opintype,
416                                                                                  BTGreaterStrategyNumber);
417
418                 /* As above, make sure array_cmp or record_cmp will succeed */
419                 if (gt_opr == ARRAY_GT_OP &&
420                         !array_element_has_compare(typentry))
421                         gt_opr = InvalidOid;
422                 else if (gt_opr == RECORD_GT_OP &&
423                                  !record_fields_have_compare(typentry))
424                         gt_opr = InvalidOid;
425
426                 typentry->gt_opr = gt_opr;
427                 typentry->flags |= TCFLAGS_CHECKED_GT_OPR;
428         }
429         if ((flags & (TYPECACHE_CMP_PROC | TYPECACHE_CMP_PROC_FINFO)) &&
430                 !(typentry->flags & TCFLAGS_CHECKED_CMP_PROC))
431         {
432                 Oid                     cmp_proc = InvalidOid;
433
434                 if (typentry->btree_opf != InvalidOid)
435                         cmp_proc = get_opfamily_proc(typentry->btree_opf,
436                                                                                  typentry->btree_opintype,
437                                                                                  typentry->btree_opintype,
438                                                                                  BTORDER_PROC);
439
440                 /* As above, make sure array_cmp or record_cmp will succeed */
441                 if (cmp_proc == F_BTARRAYCMP &&
442                         !array_element_has_compare(typentry))
443                         cmp_proc = InvalidOid;
444                 else if (cmp_proc == F_BTRECORDCMP &&
445                                  !record_fields_have_compare(typentry))
446                         cmp_proc = InvalidOid;
447
448                 /* Force update of cmp_proc_finfo only if we're changing state */
449                 if (typentry->cmp_proc != cmp_proc)
450                         typentry->cmp_proc_finfo.fn_oid = InvalidOid;
451
452                 typentry->cmp_proc = cmp_proc;
453                 typentry->flags |= TCFLAGS_CHECKED_CMP_PROC;
454         }
455         if ((flags & (TYPECACHE_HASH_PROC | TYPECACHE_HASH_PROC_FINFO)) &&
456                 !(typentry->flags & TCFLAGS_CHECKED_HASH_PROC))
457         {
458                 Oid                     hash_proc = InvalidOid;
459
460                 /*
461                  * We insist that the eq_opr, if one has been determined, match the
462                  * hash opclass; else report there is no hash function.
463                  */
464                 if (typentry->hash_opf != InvalidOid &&
465                         (!OidIsValid(typentry->eq_opr) ||
466                          typentry->eq_opr == get_opfamily_member(typentry->hash_opf,
467                                                                                                          typentry->hash_opintype,
468                                                                                                          typentry->hash_opintype,
469                                                                                                          HTEqualStrategyNumber)))
470                         hash_proc = get_opfamily_proc(typentry->hash_opf,
471                                                                                   typentry->hash_opintype,
472                                                                                   typentry->hash_opintype,
473                                                                                   HASHPROC);
474
475                 /*
476                  * As above, make sure hash_array will succeed.  We don't currently
477                  * support hashing for composite types, but when we do, we'll need
478                  * more logic here to check that case too.
479                  */
480                 if (hash_proc == F_HASH_ARRAY &&
481                         !array_element_has_hashing(typentry))
482                         hash_proc = InvalidOid;
483
484                 /* Force update of hash_proc_finfo only if we're changing state */
485                 if (typentry->hash_proc != hash_proc)
486                         typentry->hash_proc_finfo.fn_oid = InvalidOid;
487
488                 typentry->hash_proc = hash_proc;
489                 typentry->flags |= TCFLAGS_CHECKED_HASH_PROC;
490         }
491
492         /*
493          * Set up fmgr lookup info as requested
494          *
495          * Note: we tell fmgr the finfo structures live in CacheMemoryContext,
496          * which is not quite right (they're really in the hash table's private
497          * memory context) but this will do for our purposes.
498          *
499          * Note: the code above avoids invalidating the finfo structs unless the
500          * referenced operator/function OID actually changes.  This is to prevent
501          * unnecessary leakage of any subsidiary data attached to an finfo, since
502          * that would cause session-lifespan memory leaks.
503          */
504         if ((flags & TYPECACHE_EQ_OPR_FINFO) &&
505                 typentry->eq_opr_finfo.fn_oid == InvalidOid &&
506                 typentry->eq_opr != InvalidOid)
507         {
508                 Oid                     eq_opr_func;
509
510                 eq_opr_func = get_opcode(typentry->eq_opr);
511                 if (eq_opr_func != InvalidOid)
512                         fmgr_info_cxt(eq_opr_func, &typentry->eq_opr_finfo,
513                                                   CacheMemoryContext);
514         }
515         if ((flags & TYPECACHE_CMP_PROC_FINFO) &&
516                 typentry->cmp_proc_finfo.fn_oid == InvalidOid &&
517                 typentry->cmp_proc != InvalidOid)
518         {
519                 fmgr_info_cxt(typentry->cmp_proc, &typentry->cmp_proc_finfo,
520                                           CacheMemoryContext);
521         }
522         if ((flags & TYPECACHE_HASH_PROC_FINFO) &&
523                 typentry->hash_proc_finfo.fn_oid == InvalidOid &&
524                 typentry->hash_proc != InvalidOid)
525         {
526                 fmgr_info_cxt(typentry->hash_proc, &typentry->hash_proc_finfo,
527                                           CacheMemoryContext);
528         }
529
530         /*
531          * If it's a composite type (row type), get tupdesc if requested
532          */
533         if ((flags & TYPECACHE_TUPDESC) &&
534                 typentry->tupDesc == NULL &&
535                 typentry->typtype == TYPTYPE_COMPOSITE)
536         {
537                 load_typcache_tupdesc(typentry);
538         }
539
540         /*
541          * If requested, get information about a range type
542          */
543         if ((flags & TYPECACHE_RANGE_INFO) &&
544                 typentry->rngelemtype == NULL &&
545                 typentry->typtype == TYPTYPE_RANGE)
546         {
547                 load_rangetype_info(typentry);
548         }
549
550         /*
551          * If requested, get information about a domain type
552          */
553         if ((flags & TYPECACHE_DOMAIN_INFO) &&
554                 (typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
555                 typentry->typtype == TYPTYPE_DOMAIN)
556         {
557                 load_domaintype_info(typentry);
558         }
559
560         return typentry;
561 }
562
563 /*
564  * load_typcache_tupdesc --- helper routine to set up composite type's tupDesc
565  */
566 static void
567 load_typcache_tupdesc(TypeCacheEntry *typentry)
568 {
569         Relation        rel;
570
571         if (!OidIsValid(typentry->typrelid))            /* should not happen */
572                 elog(ERROR, "invalid typrelid for composite type %u",
573                          typentry->type_id);
574         rel = relation_open(typentry->typrelid, AccessShareLock);
575         Assert(rel->rd_rel->reltype == typentry->type_id);
576
577         /*
578          * Link to the tupdesc and increment its refcount (we assert it's a
579          * refcounted descriptor).  We don't use IncrTupleDescRefCount() for this,
580          * because the reference mustn't be entered in the current resource owner;
581          * it can outlive the current query.
582          */
583         typentry->tupDesc = RelationGetDescr(rel);
584
585         Assert(typentry->tupDesc->tdrefcount > 0);
586         typentry->tupDesc->tdrefcount++;
587
588         relation_close(rel, AccessShareLock);
589 }
590
591 /*
592  * load_rangetype_info --- helper routine to set up range type information
593  */
594 static void
595 load_rangetype_info(TypeCacheEntry *typentry)
596 {
597         Form_pg_range pg_range;
598         HeapTuple       tup;
599         Oid                     subtypeOid;
600         Oid                     opclassOid;
601         Oid                     canonicalOid;
602         Oid                     subdiffOid;
603         Oid                     opfamilyOid;
604         Oid                     opcintype;
605         Oid                     cmpFnOid;
606
607         /* get information from pg_range */
608         tup = SearchSysCache1(RANGETYPE, ObjectIdGetDatum(typentry->type_id));
609         /* should not fail, since we already checked typtype ... */
610         if (!HeapTupleIsValid(tup))
611                 elog(ERROR, "cache lookup failed for range type %u",
612                          typentry->type_id);
613         pg_range = (Form_pg_range) GETSTRUCT(tup);
614
615         subtypeOid = pg_range->rngsubtype;
616         typentry->rng_collation = pg_range->rngcollation;
617         opclassOid = pg_range->rngsubopc;
618         canonicalOid = pg_range->rngcanonical;
619         subdiffOid = pg_range->rngsubdiff;
620
621         ReleaseSysCache(tup);
622
623         /* get opclass properties and look up the comparison function */
624         opfamilyOid = get_opclass_family(opclassOid);
625         opcintype = get_opclass_input_type(opclassOid);
626
627         cmpFnOid = get_opfamily_proc(opfamilyOid, opcintype, opcintype,
628                                                                  BTORDER_PROC);
629         if (!RegProcedureIsValid(cmpFnOid))
630                 elog(ERROR, "missing support function %d(%u,%u) in opfamily %u",
631                          BTORDER_PROC, opcintype, opcintype, opfamilyOid);
632
633         /* set up cached fmgrinfo structs */
634         fmgr_info_cxt(cmpFnOid, &typentry->rng_cmp_proc_finfo,
635                                   CacheMemoryContext);
636         if (OidIsValid(canonicalOid))
637                 fmgr_info_cxt(canonicalOid, &typentry->rng_canonical_finfo,
638                                           CacheMemoryContext);
639         if (OidIsValid(subdiffOid))
640                 fmgr_info_cxt(subdiffOid, &typentry->rng_subdiff_finfo,
641                                           CacheMemoryContext);
642
643         /* Lastly, set up link to the element type --- this marks data valid */
644         typentry->rngelemtype = lookup_type_cache(subtypeOid, 0);
645 }
646
647
648 /*
649  * load_domaintype_info --- helper routine to set up domain constraint info
650  *
651  * Note: we assume we're called in a relatively short-lived context, so it's
652  * okay to leak data into the current context while scanning pg_constraint.
653  * We build the new DomainConstraintCache data in a context underneath
654  * CurrentMemoryContext, and reparent it under CacheMemoryContext when
655  * complete.
656  */
657 static void
658 load_domaintype_info(TypeCacheEntry *typentry)
659 {
660         Oid                     typeOid = typentry->type_id;
661         DomainConstraintCache *dcc;
662         bool            notNull = false;
663         DomainConstraintState **ccons;
664         int                     cconslen;
665         Relation        conRel;
666         MemoryContext oldcxt;
667
668         /*
669          * If we're here, any existing constraint info is stale, so release it.
670          * For safety, be sure to null the link before trying to delete the data.
671          */
672         if (typentry->domainData)
673         {
674                 dcc = typentry->domainData;
675                 typentry->domainData = NULL;
676                 decr_dcc_refcount(dcc);
677         }
678
679         /*
680          * We try to optimize the common case of no domain constraints, so don't
681          * create the dcc object and context until we find a constraint.  Likewise
682          * for the temp sorting array.
683          */
684         dcc = NULL;
685         ccons = NULL;
686         cconslen = 0;
687
688         /*
689          * Scan pg_constraint for relevant constraints.  We want to find
690          * constraints for not just this domain, but any ancestor domains, so the
691          * outer loop crawls up the domain stack.
692          */
693         conRel = heap_open(ConstraintRelationId, AccessShareLock);
694
695         for (;;)
696         {
697                 HeapTuple       tup;
698                 HeapTuple       conTup;
699                 Form_pg_type typTup;
700                 int                     nccons = 0;
701                 ScanKeyData key[1];
702                 SysScanDesc scan;
703
704                 tup = SearchSysCache1(TYPEOID, ObjectIdGetDatum(typeOid));
705                 if (!HeapTupleIsValid(tup))
706                         elog(ERROR, "cache lookup failed for type %u", typeOid);
707                 typTup = (Form_pg_type) GETSTRUCT(tup);
708
709                 if (typTup->typtype != TYPTYPE_DOMAIN)
710                 {
711                         /* Not a domain, so done */
712                         ReleaseSysCache(tup);
713                         break;
714                 }
715
716                 /* Test for NOT NULL Constraint */
717                 if (typTup->typnotnull)
718                         notNull = true;
719
720                 /* Look for CHECK Constraints on this domain */
721                 ScanKeyInit(&key[0],
722                                         Anum_pg_constraint_contypid,
723                                         BTEqualStrategyNumber, F_OIDEQ,
724                                         ObjectIdGetDatum(typeOid));
725
726                 scan = systable_beginscan(conRel, ConstraintTypidIndexId, true,
727                                                                   NULL, 1, key);
728
729                 while (HeapTupleIsValid(conTup = systable_getnext(scan)))
730                 {
731                         Form_pg_constraint c = (Form_pg_constraint) GETSTRUCT(conTup);
732                         Datum           val;
733                         bool            isNull;
734                         char       *constring;
735                         Expr       *check_expr;
736                         DomainConstraintState *r;
737
738                         /* Ignore non-CHECK constraints (presently, shouldn't be any) */
739                         if (c->contype != CONSTRAINT_CHECK)
740                                 continue;
741
742                         /* Not expecting conbin to be NULL, but we'll test for it anyway */
743                         val = fastgetattr(conTup, Anum_pg_constraint_conbin,
744                                                           conRel->rd_att, &isNull);
745                         if (isNull)
746                                 elog(ERROR, "domain \"%s\" constraint \"%s\" has NULL conbin",
747                                          NameStr(typTup->typname), NameStr(c->conname));
748
749                         /* Convert conbin to C string in caller context */
750                         constring = TextDatumGetCString(val);
751
752                         /* Create the DomainConstraintCache object and context if needed */
753                         if (dcc == NULL)
754                         {
755                                 MemoryContext cxt;
756
757                                 cxt = AllocSetContextCreate(CurrentMemoryContext,
758                                                                                         "Domain constraints",
759                                                                                         ALLOCSET_SMALL_SIZES);
760                                 dcc = (DomainConstraintCache *)
761                                         MemoryContextAlloc(cxt, sizeof(DomainConstraintCache));
762                                 dcc->constraints = NIL;
763                                 dcc->dccContext = cxt;
764                                 dcc->dccRefCount = 0;
765                         }
766
767                         /* Create node trees in DomainConstraintCache's context */
768                         oldcxt = MemoryContextSwitchTo(dcc->dccContext);
769
770                         check_expr = (Expr *) stringToNode(constring);
771
772                         /* ExecInitExpr will assume we've planned the expression */
773                         check_expr = expression_planner(check_expr);
774
775                         r = makeNode(DomainConstraintState);
776                         r->constrainttype = DOM_CONSTRAINT_CHECK;
777                         r->name = pstrdup(NameStr(c->conname));
778                         /* Must cast here because we're not storing an expr state node */
779                         r->check_expr = (ExprState *) check_expr;
780
781                         MemoryContextSwitchTo(oldcxt);
782
783                         /* Accumulate constraints in an array, for sorting below */
784                         if (ccons == NULL)
785                         {
786                                 cconslen = 8;
787                                 ccons = (DomainConstraintState **)
788                                         palloc(cconslen * sizeof(DomainConstraintState *));
789                         }
790                         else if (nccons >= cconslen)
791                         {
792                                 cconslen *= 2;
793                                 ccons = (DomainConstraintState **)
794                                         repalloc(ccons, cconslen * sizeof(DomainConstraintState *));
795                         }
796                         ccons[nccons++] = r;
797                 }
798
799                 systable_endscan(scan);
800
801                 if (nccons > 0)
802                 {
803                         /*
804                          * Sort the items for this domain, so that CHECKs are applied in a
805                          * deterministic order.
806                          */
807                         if (nccons > 1)
808                                 qsort(ccons, nccons, sizeof(DomainConstraintState *), dcs_cmp);
809
810                         /*
811                          * Now attach them to the overall list.  Use lcons() here because
812                          * constraints of parent domains should be applied earlier.
813                          */
814                         oldcxt = MemoryContextSwitchTo(dcc->dccContext);
815                         while (nccons > 0)
816                                 dcc->constraints = lcons(ccons[--nccons], dcc->constraints);
817                         MemoryContextSwitchTo(oldcxt);
818                 }
819
820                 /* loop to next domain in stack */
821                 typeOid = typTup->typbasetype;
822                 ReleaseSysCache(tup);
823         }
824
825         heap_close(conRel, AccessShareLock);
826
827         /*
828          * Only need to add one NOT NULL check regardless of how many domains in
829          * the stack request it.
830          */
831         if (notNull)
832         {
833                 DomainConstraintState *r;
834
835                 /* Create the DomainConstraintCache object and context if needed */
836                 if (dcc == NULL)
837                 {
838                         MemoryContext cxt;
839
840                         cxt = AllocSetContextCreate(CurrentMemoryContext,
841                                                                                 "Domain constraints",
842                                                                                 ALLOCSET_SMALL_SIZES);
843                         dcc = (DomainConstraintCache *)
844                                 MemoryContextAlloc(cxt, sizeof(DomainConstraintCache));
845                         dcc->constraints = NIL;
846                         dcc->dccContext = cxt;
847                         dcc->dccRefCount = 0;
848                 }
849
850                 /* Create node trees in DomainConstraintCache's context */
851                 oldcxt = MemoryContextSwitchTo(dcc->dccContext);
852
853                 r = makeNode(DomainConstraintState);
854
855                 r->constrainttype = DOM_CONSTRAINT_NOTNULL;
856                 r->name = pstrdup("NOT NULL");
857                 r->check_expr = NULL;
858
859                 /* lcons to apply the nullness check FIRST */
860                 dcc->constraints = lcons(r, dcc->constraints);
861
862                 MemoryContextSwitchTo(oldcxt);
863         }
864
865         /*
866          * If we made a constraint object, move it into CacheMemoryContext and
867          * attach it to the typcache entry.
868          */
869         if (dcc)
870         {
871                 MemoryContextSetParent(dcc->dccContext, CacheMemoryContext);
872                 typentry->domainData = dcc;
873                 dcc->dccRefCount++;             /* count the typcache's reference */
874         }
875
876         /* Either way, the typcache entry's domain data is now valid. */
877         typentry->flags |= TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS;
878 }
879
880 /*
881  * qsort comparator to sort DomainConstraintState pointers by name
882  */
883 static int
884 dcs_cmp(const void *a, const void *b)
885 {
886         const DomainConstraintState *const * ca = (const DomainConstraintState *const *) a;
887         const DomainConstraintState *const * cb = (const DomainConstraintState *const *) b;
888
889         return strcmp((*ca)->name, (*cb)->name);
890 }
891
892 /*
893  * decr_dcc_refcount --- decrement a DomainConstraintCache's refcount,
894  * and free it if no references remain
895  */
896 static void
897 decr_dcc_refcount(DomainConstraintCache *dcc)
898 {
899         Assert(dcc->dccRefCount > 0);
900         if (--(dcc->dccRefCount) <= 0)
901                 MemoryContextDelete(dcc->dccContext);
902 }
903
904 /*
905  * Context reset/delete callback for a DomainConstraintRef
906  */
907 static void
908 dccref_deletion_callback(void *arg)
909 {
910         DomainConstraintRef *ref = (DomainConstraintRef *) arg;
911         DomainConstraintCache *dcc = ref->dcc;
912
913         /* Paranoia --- be sure link is nulled before trying to release */
914         if (dcc)
915         {
916                 ref->constraints = NIL;
917                 ref->dcc = NULL;
918                 decr_dcc_refcount(dcc);
919         }
920 }
921
922 /*
923  * prep_domain_constraints --- prepare domain constraints for execution
924  *
925  * The expression trees stored in the DomainConstraintCache's list are
926  * converted to executable expression state trees stored in execctx.
927  */
928 static List *
929 prep_domain_constraints(List *constraints, MemoryContext execctx)
930 {
931         List       *result = NIL;
932         MemoryContext oldcxt;
933         ListCell   *lc;
934
935         oldcxt = MemoryContextSwitchTo(execctx);
936
937         foreach(lc, constraints)
938         {
939                 DomainConstraintState *r = (DomainConstraintState *) lfirst(lc);
940                 DomainConstraintState *newr;
941
942                 newr = makeNode(DomainConstraintState);
943                 newr->constrainttype = r->constrainttype;
944                 newr->name = r->name;
945                 /* Must cast here because cache items contain expr plan trees */
946                 newr->check_expr = ExecInitExpr((Expr *) r->check_expr, NULL);
947
948                 result = lappend(result, newr);
949         }
950
951         MemoryContextSwitchTo(oldcxt);
952
953         return result;
954 }
955
956 /*
957  * InitDomainConstraintRef --- initialize a DomainConstraintRef struct
958  *
959  * Caller must tell us the MemoryContext in which the DomainConstraintRef
960  * lives.  The ref will be cleaned up when that context is reset/deleted.
961  */
962 void
963 InitDomainConstraintRef(Oid type_id, DomainConstraintRef *ref,
964                                                 MemoryContext refctx)
965 {
966         /* Look up the typcache entry --- we assume it survives indefinitely */
967         ref->tcache = lookup_type_cache(type_id, TYPECACHE_DOMAIN_INFO);
968         /* For safety, establish the callback before acquiring a refcount */
969         ref->refctx = refctx;
970         ref->dcc = NULL;
971         ref->callback.func = dccref_deletion_callback;
972         ref->callback.arg = (void *) ref;
973         MemoryContextRegisterResetCallback(refctx, &ref->callback);
974         /* Acquire refcount if there are constraints, and set up exported list */
975         if (ref->tcache->domainData)
976         {
977                 ref->dcc = ref->tcache->domainData;
978                 ref->dcc->dccRefCount++;
979                 ref->constraints = prep_domain_constraints(ref->dcc->constraints,
980                                                                                                    ref->refctx);
981         }
982         else
983                 ref->constraints = NIL;
984 }
985
986 /*
987  * UpdateDomainConstraintRef --- recheck validity of domain constraint info
988  *
989  * If the domain's constraint set changed, ref->constraints is updated to
990  * point at a new list of cached constraints.
991  *
992  * In the normal case where nothing happened to the domain, this is cheap
993  * enough that it's reasonable (and expected) to check before *each* use
994  * of the constraint info.
995  */
996 void
997 UpdateDomainConstraintRef(DomainConstraintRef *ref)
998 {
999         TypeCacheEntry *typentry = ref->tcache;
1000
1001         /* Make sure typcache entry's data is up to date */
1002         if ((typentry->flags & TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS) == 0 &&
1003                 typentry->typtype == TYPTYPE_DOMAIN)
1004                 load_domaintype_info(typentry);
1005
1006         /* Transfer to ref object if there's new info, adjusting refcounts */
1007         if (ref->dcc != typentry->domainData)
1008         {
1009                 /* Paranoia --- be sure link is nulled before trying to release */
1010                 DomainConstraintCache *dcc = ref->dcc;
1011
1012                 if (dcc)
1013                 {
1014                         /*
1015                          * Note: we just leak the previous list of executable domain
1016                          * constraints.  Alternatively, we could keep those in a child
1017                          * context of ref->refctx and free that context at this point.
1018                          * However, in practice this code path will be taken so seldom
1019                          * that the extra bookkeeping for a child context doesn't seem
1020                          * worthwhile; we'll just allow a leak for the lifespan of refctx.
1021                          */
1022                         ref->constraints = NIL;
1023                         ref->dcc = NULL;
1024                         decr_dcc_refcount(dcc);
1025                 }
1026                 dcc = typentry->domainData;
1027                 if (dcc)
1028                 {
1029                         ref->dcc = dcc;
1030                         dcc->dccRefCount++;
1031                         ref->constraints = prep_domain_constraints(dcc->constraints,
1032                                                                                                            ref->refctx);
1033                 }
1034         }
1035 }
1036
1037 /*
1038  * DomainHasConstraints --- utility routine to check if a domain has constraints
1039  *
1040  * This is defined to return false, not fail, if type is not a domain.
1041  */
1042 bool
1043 DomainHasConstraints(Oid type_id)
1044 {
1045         TypeCacheEntry *typentry;
1046
1047         /*
1048          * Note: a side effect is to cause the typcache's domain data to become
1049          * valid.  This is fine since we'll likely need it soon if there is any.
1050          */
1051         typentry = lookup_type_cache(type_id, TYPECACHE_DOMAIN_INFO);
1052
1053         return (typentry->domainData != NULL);
1054 }
1055
1056
1057 /*
1058  * array_element_has_equality and friends are helper routines to check
1059  * whether we should believe that array_eq and related functions will work
1060  * on the given array type or composite type.
1061  *
1062  * The logic above may call these repeatedly on the same type entry, so we
1063  * make use of the typentry->flags field to cache the results once known.
1064  * Also, we assume that we'll probably want all these facts about the type
1065  * if we want any, so we cache them all using only one lookup of the
1066  * component datatype(s).
1067  */
1068
1069 static bool
1070 array_element_has_equality(TypeCacheEntry *typentry)
1071 {
1072         if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1073                 cache_array_element_properties(typentry);
1074         return (typentry->flags & TCFLAGS_HAVE_ELEM_EQUALITY) != 0;
1075 }
1076
1077 static bool
1078 array_element_has_compare(TypeCacheEntry *typentry)
1079 {
1080         if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1081                 cache_array_element_properties(typentry);
1082         return (typentry->flags & TCFLAGS_HAVE_ELEM_COMPARE) != 0;
1083 }
1084
1085 static bool
1086 array_element_has_hashing(TypeCacheEntry *typentry)
1087 {
1088         if (!(typentry->flags & TCFLAGS_CHECKED_ELEM_PROPERTIES))
1089                 cache_array_element_properties(typentry);
1090         return (typentry->flags & TCFLAGS_HAVE_ELEM_HASHING) != 0;
1091 }
1092
1093 static void
1094 cache_array_element_properties(TypeCacheEntry *typentry)
1095 {
1096         Oid                     elem_type = get_base_element_type(typentry->type_id);
1097
1098         if (OidIsValid(elem_type))
1099         {
1100                 TypeCacheEntry *elementry;
1101
1102                 elementry = lookup_type_cache(elem_type,
1103                                                                           TYPECACHE_EQ_OPR |
1104                                                                           TYPECACHE_CMP_PROC |
1105                                                                           TYPECACHE_HASH_PROC);
1106                 if (OidIsValid(elementry->eq_opr))
1107                         typentry->flags |= TCFLAGS_HAVE_ELEM_EQUALITY;
1108                 if (OidIsValid(elementry->cmp_proc))
1109                         typentry->flags |= TCFLAGS_HAVE_ELEM_COMPARE;
1110                 if (OidIsValid(elementry->hash_proc))
1111                         typentry->flags |= TCFLAGS_HAVE_ELEM_HASHING;
1112         }
1113         typentry->flags |= TCFLAGS_CHECKED_ELEM_PROPERTIES;
1114 }
1115
1116 static bool
1117 record_fields_have_equality(TypeCacheEntry *typentry)
1118 {
1119         if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1120                 cache_record_field_properties(typentry);
1121         return (typentry->flags & TCFLAGS_HAVE_FIELD_EQUALITY) != 0;
1122 }
1123
1124 static bool
1125 record_fields_have_compare(TypeCacheEntry *typentry)
1126 {
1127         if (!(typentry->flags & TCFLAGS_CHECKED_FIELD_PROPERTIES))
1128                 cache_record_field_properties(typentry);
1129         return (typentry->flags & TCFLAGS_HAVE_FIELD_COMPARE) != 0;
1130 }
1131
1132 static void
1133 cache_record_field_properties(TypeCacheEntry *typentry)
1134 {
1135         /*
1136          * For type RECORD, we can't really tell what will work, since we don't
1137          * have access here to the specific anonymous type.  Just assume that
1138          * everything will (we may get a failure at runtime ...)
1139          */
1140         if (typentry->type_id == RECORDOID)
1141                 typentry->flags |= (TCFLAGS_HAVE_FIELD_EQUALITY |
1142                                                         TCFLAGS_HAVE_FIELD_COMPARE);
1143         else if (typentry->typtype == TYPTYPE_COMPOSITE)
1144         {
1145                 TupleDesc       tupdesc;
1146                 int                     newflags;
1147                 int                     i;
1148
1149                 /* Fetch composite type's tupdesc if we don't have it already */
1150                 if (typentry->tupDesc == NULL)
1151                         load_typcache_tupdesc(typentry);
1152                 tupdesc = typentry->tupDesc;
1153
1154                 /* Must bump the refcount while we do additional catalog lookups */
1155                 IncrTupleDescRefCount(tupdesc);
1156
1157                 /* Have each property if all non-dropped fields have the property */
1158                 newflags = (TCFLAGS_HAVE_FIELD_EQUALITY |
1159                                         TCFLAGS_HAVE_FIELD_COMPARE);
1160                 for (i = 0; i < tupdesc->natts; i++)
1161                 {
1162                         TypeCacheEntry *fieldentry;
1163
1164                         if (tupdesc->attrs[i]->attisdropped)
1165                                 continue;
1166
1167                         fieldentry = lookup_type_cache(tupdesc->attrs[i]->atttypid,
1168                                                                                    TYPECACHE_EQ_OPR |
1169                                                                                    TYPECACHE_CMP_PROC);
1170                         if (!OidIsValid(fieldentry->eq_opr))
1171                                 newflags &= ~TCFLAGS_HAVE_FIELD_EQUALITY;
1172                         if (!OidIsValid(fieldentry->cmp_proc))
1173                                 newflags &= ~TCFLAGS_HAVE_FIELD_COMPARE;
1174
1175                         /* We can drop out of the loop once we disprove all bits */
1176                         if (newflags == 0)
1177                                 break;
1178                 }
1179                 typentry->flags |= newflags;
1180
1181                 DecrTupleDescRefCount(tupdesc);
1182         }
1183         typentry->flags |= TCFLAGS_CHECKED_FIELD_PROPERTIES;
1184 }
1185
1186
1187 /*
1188  * lookup_rowtype_tupdesc_internal --- internal routine to lookup a rowtype
1189  *
1190  * Same API as lookup_rowtype_tupdesc_noerror, but the returned tupdesc
1191  * hasn't had its refcount bumped.
1192  */
1193 static TupleDesc
1194 lookup_rowtype_tupdesc_internal(Oid type_id, int32 typmod, bool noError)
1195 {
1196         if (type_id != RECORDOID)
1197         {
1198                 /*
1199                  * It's a named composite type, so use the regular typcache.
1200                  */
1201                 TypeCacheEntry *typentry;
1202
1203                 typentry = lookup_type_cache(type_id, TYPECACHE_TUPDESC);
1204                 if (typentry->tupDesc == NULL && !noError)
1205                         ereport(ERROR,
1206                                         (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1207                                          errmsg("type %s is not composite",
1208                                                         format_type_be(type_id))));
1209                 return typentry->tupDesc;
1210         }
1211         else
1212         {
1213                 /*
1214                  * It's a transient record type, so look in our record-type table.
1215                  */
1216                 if (typmod < 0 || typmod >= NextRecordTypmod)
1217                 {
1218                         if (!noError)
1219                                 ereport(ERROR,
1220                                                 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1221                                                  errmsg("record type has not been registered")));
1222                         return NULL;
1223                 }
1224                 return RecordCacheArray[typmod];
1225         }
1226 }
1227
1228 /*
1229  * lookup_rowtype_tupdesc
1230  *
1231  * Given a typeid/typmod that should describe a known composite type,
1232  * return the tuple descriptor for the type.  Will ereport on failure.
1233  *
1234  * Note: on success, we increment the refcount of the returned TupleDesc,
1235  * and log the reference in CurrentResourceOwner.  Caller should call
1236  * ReleaseTupleDesc or DecrTupleDescRefCount when done using the tupdesc.
1237  */
1238 TupleDesc
1239 lookup_rowtype_tupdesc(Oid type_id, int32 typmod)
1240 {
1241         TupleDesc       tupDesc;
1242
1243         tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
1244         IncrTupleDescRefCount(tupDesc);
1245         return tupDesc;
1246 }
1247
1248 /*
1249  * lookup_rowtype_tupdesc_noerror
1250  *
1251  * As above, but if the type is not a known composite type and noError
1252  * is true, returns NULL instead of ereport'ing.  (Note that if a bogus
1253  * type_id is passed, you'll get an ereport anyway.)
1254  */
1255 TupleDesc
1256 lookup_rowtype_tupdesc_noerror(Oid type_id, int32 typmod, bool noError)
1257 {
1258         TupleDesc       tupDesc;
1259
1260         tupDesc = lookup_rowtype_tupdesc_internal(type_id, typmod, noError);
1261         if (tupDesc != NULL)
1262                 IncrTupleDescRefCount(tupDesc);
1263         return tupDesc;
1264 }
1265
1266 /*
1267  * lookup_rowtype_tupdesc_copy
1268  *
1269  * Like lookup_rowtype_tupdesc(), but the returned TupleDesc has been
1270  * copied into the CurrentMemoryContext and is not reference-counted.
1271  */
1272 TupleDesc
1273 lookup_rowtype_tupdesc_copy(Oid type_id, int32 typmod)
1274 {
1275         TupleDesc       tmp;
1276
1277         tmp = lookup_rowtype_tupdesc_internal(type_id, typmod, false);
1278         return CreateTupleDescCopyConstr(tmp);
1279 }
1280
1281
1282 /*
1283  * assign_record_type_typmod
1284  *
1285  * Given a tuple descriptor for a RECORD type, find or create a cache entry
1286  * for the type, and set the tupdesc's tdtypmod field to a value that will
1287  * identify this cache entry to lookup_rowtype_tupdesc.
1288  */
1289 void
1290 assign_record_type_typmod(TupleDesc tupDesc)
1291 {
1292         RecordCacheEntry *recentry;
1293         TupleDesc       entDesc;
1294         Oid                     hashkey[REC_HASH_KEYS];
1295         bool            found;
1296         int                     i;
1297         ListCell   *l;
1298         int32           newtypmod;
1299         MemoryContext oldcxt;
1300
1301         Assert(tupDesc->tdtypeid == RECORDOID);
1302
1303         if (RecordCacheHash == NULL)
1304         {
1305                 /* First time through: initialize the hash table */
1306                 HASHCTL         ctl;
1307
1308                 MemSet(&ctl, 0, sizeof(ctl));
1309                 ctl.keysize = REC_HASH_KEYS * sizeof(Oid);
1310                 ctl.entrysize = sizeof(RecordCacheEntry);
1311                 RecordCacheHash = hash_create("Record information cache", 64,
1312                                                                           &ctl, HASH_ELEM | HASH_BLOBS);
1313
1314                 /* Also make sure CacheMemoryContext exists */
1315                 if (!CacheMemoryContext)
1316                         CreateCacheMemoryContext();
1317         }
1318
1319         /* Find or create a hashtable entry for this hash class */
1320         MemSet(hashkey, 0, sizeof(hashkey));
1321         for (i = 0; i < tupDesc->natts; i++)
1322         {
1323                 if (i >= REC_HASH_KEYS)
1324                         break;
1325                 hashkey[i] = tupDesc->attrs[i]->atttypid;
1326         }
1327         recentry = (RecordCacheEntry *) hash_search(RecordCacheHash,
1328                                                                                                 (void *) hashkey,
1329                                                                                                 HASH_ENTER, &found);
1330         if (!found)
1331         {
1332                 /* New entry ... hash_search initialized only the hash key */
1333                 recentry->tupdescs = NIL;
1334         }
1335
1336         /* Look for existing record cache entry */
1337         foreach(l, recentry->tupdescs)
1338         {
1339                 entDesc = (TupleDesc) lfirst(l);
1340                 if (equalTupleDescs(tupDesc, entDesc))
1341                 {
1342                         tupDesc->tdtypmod = entDesc->tdtypmod;
1343                         return;
1344                 }
1345         }
1346
1347         /* Not present, so need to manufacture an entry */
1348         oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1349
1350         if (RecordCacheArray == NULL)
1351         {
1352                 RecordCacheArray = (TupleDesc *) palloc(64 * sizeof(TupleDesc));
1353                 RecordCacheArrayLen = 64;
1354         }
1355         else if (NextRecordTypmod >= RecordCacheArrayLen)
1356         {
1357                 int32           newlen = RecordCacheArrayLen * 2;
1358
1359                 RecordCacheArray = (TupleDesc *) repalloc(RecordCacheArray,
1360                                                                                                   newlen * sizeof(TupleDesc));
1361                 RecordCacheArrayLen = newlen;
1362         }
1363
1364         /* if fail in subrs, no damage except possibly some wasted memory... */
1365         entDesc = CreateTupleDescCopy(tupDesc);
1366         recentry->tupdescs = lcons(entDesc, recentry->tupdescs);
1367         /* mark it as a reference-counted tupdesc */
1368         entDesc->tdrefcount = 1;
1369         /* now it's safe to advance NextRecordTypmod */
1370         newtypmod = NextRecordTypmod++;
1371         entDesc->tdtypmod = newtypmod;
1372         RecordCacheArray[newtypmod] = entDesc;
1373
1374         /* report to caller as well */
1375         tupDesc->tdtypmod = newtypmod;
1376
1377         MemoryContextSwitchTo(oldcxt);
1378 }
1379
1380 /*
1381  * TypeCacheRelCallback
1382  *              Relcache inval callback function
1383  *
1384  * Delete the cached tuple descriptor (if any) for the given rel's composite
1385  * type, or for all composite types if relid == InvalidOid.  Also reset
1386  * whatever info we have cached about the composite type's comparability.
1387  *
1388  * This is called when a relcache invalidation event occurs for the given
1389  * relid.  We must scan the whole typcache hash since we don't know the
1390  * type OID corresponding to the relid.  We could do a direct search if this
1391  * were a syscache-flush callback on pg_type, but then we would need all
1392  * ALTER-TABLE-like commands that could modify a rowtype to issue syscache
1393  * invals against the rel's pg_type OID.  The extra SI signaling could very
1394  * well cost more than we'd save, since in most usages there are not very
1395  * many entries in a backend's typcache.  The risk of bugs-of-omission seems
1396  * high, too.
1397  *
1398  * Another possibility, with only localized impact, is to maintain a second
1399  * hashtable that indexes composite-type typcache entries by their typrelid.
1400  * But it's still not clear it's worth the trouble.
1401  */
1402 static void
1403 TypeCacheRelCallback(Datum arg, Oid relid)
1404 {
1405         HASH_SEQ_STATUS status;
1406         TypeCacheEntry *typentry;
1407
1408         /* TypeCacheHash must exist, else this callback wouldn't be registered */
1409         hash_seq_init(&status, TypeCacheHash);
1410         while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
1411         {
1412                 if (typentry->typtype != TYPTYPE_COMPOSITE)
1413                         continue;                       /* skip non-composites */
1414
1415                 /* Skip if no match, unless we're zapping all composite types */
1416                 if (relid != typentry->typrelid && relid != InvalidOid)
1417                         continue;
1418
1419                 /* Delete tupdesc if we have it */
1420                 if (typentry->tupDesc != NULL)
1421                 {
1422                         /*
1423                          * Release our refcount, and free the tupdesc if none remain.
1424                          * (Can't use DecrTupleDescRefCount because this reference is not
1425                          * logged in current resource owner.)
1426                          */
1427                         Assert(typentry->tupDesc->tdrefcount > 0);
1428                         if (--typentry->tupDesc->tdrefcount == 0)
1429                                 FreeTupleDesc(typentry->tupDesc);
1430                         typentry->tupDesc = NULL;
1431                 }
1432
1433                 /* Reset equality/comparison/hashing validity information */
1434                 typentry->flags = 0;
1435         }
1436 }
1437
1438 /*
1439  * TypeCacheOpcCallback
1440  *              Syscache inval callback function
1441  *
1442  * This is called when a syscache invalidation event occurs for any pg_opclass
1443  * row.  In principle we could probably just invalidate data dependent on the
1444  * particular opclass, but since updates on pg_opclass are rare in production
1445  * it doesn't seem worth a lot of complication: we just mark all cached data
1446  * invalid.
1447  *
1448  * Note that we don't bother watching for updates on pg_amop or pg_amproc.
1449  * This should be safe because ALTER OPERATOR FAMILY ADD/DROP OPERATOR/FUNCTION
1450  * is not allowed to be used to add/drop the primary operators and functions
1451  * of an opclass, only cross-type members of a family; and the latter sorts
1452  * of members are not going to get cached here.
1453  */
1454 static void
1455 TypeCacheOpcCallback(Datum arg, int cacheid, uint32 hashvalue)
1456 {
1457         HASH_SEQ_STATUS status;
1458         TypeCacheEntry *typentry;
1459
1460         /* TypeCacheHash must exist, else this callback wouldn't be registered */
1461         hash_seq_init(&status, TypeCacheHash);
1462         while ((typentry = (TypeCacheEntry *) hash_seq_search(&status)) != NULL)
1463         {
1464                 /* Reset equality/comparison/hashing validity information */
1465                 typentry->flags = 0;
1466         }
1467 }
1468
1469 /*
1470  * TypeCacheConstrCallback
1471  *              Syscache inval callback function
1472  *
1473  * This is called when a syscache invalidation event occurs for any
1474  * pg_constraint or pg_type row.  We flush information about domain
1475  * constraints when this happens.
1476  *
1477  * It's slightly annoying that we can't tell whether the inval event was for a
1478  * domain constraint/type record or not; there's usually more update traffic
1479  * for table constraints/types than domain constraints, so we'll do a lot of
1480  * useless flushes.  Still, this is better than the old no-caching-at-all
1481  * approach to domain constraints.
1482  */
1483 static void
1484 TypeCacheConstrCallback(Datum arg, int cacheid, uint32 hashvalue)
1485 {
1486         TypeCacheEntry *typentry;
1487
1488         /*
1489          * Because this is called very frequently, and typically very few of the
1490          * typcache entries are for domains, we don't use hash_seq_search here.
1491          * Instead we thread all the domain-type entries together so that we can
1492          * visit them cheaply.
1493          */
1494         for (typentry = firstDomainTypeEntry;
1495                  typentry != NULL;
1496                  typentry = typentry->nextDomain)
1497         {
1498                 /* Reset domain constraint validity information */
1499                 typentry->flags &= ~TCFLAGS_CHECKED_DOMAIN_CONSTRAINTS;
1500         }
1501 }
1502
1503
1504 /*
1505  * Check if given OID is part of the subset that's sortable by comparisons
1506  */
1507 static inline bool
1508 enum_known_sorted(TypeCacheEnumData *enumdata, Oid arg)
1509 {
1510         Oid                     offset;
1511
1512         if (arg < enumdata->bitmap_base)
1513                 return false;
1514         offset = arg - enumdata->bitmap_base;
1515         if (offset > (Oid) INT_MAX)
1516                 return false;
1517         return bms_is_member((int) offset, enumdata->sorted_values);
1518 }
1519
1520
1521 /*
1522  * compare_values_of_enum
1523  *              Compare two members of an enum type.
1524  *              Return <0, 0, or >0 according as arg1 <, =, or > arg2.
1525  *
1526  * Note: currently, the enumData cache is refreshed only if we are asked
1527  * to compare an enum value that is not already in the cache.  This is okay
1528  * because there is no support for re-ordering existing values, so comparisons
1529  * of previously cached values will return the right answer even if other
1530  * values have been added since we last loaded the cache.
1531  *
1532  * Note: the enum logic has a special-case rule about even-numbered versus
1533  * odd-numbered OIDs, but we take no account of that rule here; this
1534  * routine shouldn't even get called when that rule applies.
1535  */
1536 int
1537 compare_values_of_enum(TypeCacheEntry *tcache, Oid arg1, Oid arg2)
1538 {
1539         TypeCacheEnumData *enumdata;
1540         EnumItem   *item1;
1541         EnumItem   *item2;
1542
1543         /*
1544          * Equal OIDs are certainly equal --- this case was probably handled by
1545          * our caller, but we may as well check.
1546          */
1547         if (arg1 == arg2)
1548                 return 0;
1549
1550         /* Load up the cache if first time through */
1551         if (tcache->enumData == NULL)
1552                 load_enum_cache_data(tcache);
1553         enumdata = tcache->enumData;
1554
1555         /*
1556          * If both OIDs are known-sorted, we can just compare them directly.
1557          */
1558         if (enum_known_sorted(enumdata, arg1) &&
1559                 enum_known_sorted(enumdata, arg2))
1560         {
1561                 if (arg1 < arg2)
1562                         return -1;
1563                 else
1564                         return 1;
1565         }
1566
1567         /*
1568          * Slow path: we have to identify their actual sort-order positions.
1569          */
1570         item1 = find_enumitem(enumdata, arg1);
1571         item2 = find_enumitem(enumdata, arg2);
1572
1573         if (item1 == NULL || item2 == NULL)
1574         {
1575                 /*
1576                  * We couldn't find one or both values.  That means the enum has
1577                  * changed under us, so re-initialize the cache and try again. We
1578                  * don't bother retrying the known-sorted case in this path.
1579                  */
1580                 load_enum_cache_data(tcache);
1581                 enumdata = tcache->enumData;
1582
1583                 item1 = find_enumitem(enumdata, arg1);
1584                 item2 = find_enumitem(enumdata, arg2);
1585
1586                 /*
1587                  * If we still can't find the values, complain: we must have corrupt
1588                  * data.
1589                  */
1590                 if (item1 == NULL)
1591                         elog(ERROR, "enum value %u not found in cache for enum %s",
1592                                  arg1, format_type_be(tcache->type_id));
1593                 if (item2 == NULL)
1594                         elog(ERROR, "enum value %u not found in cache for enum %s",
1595                                  arg2, format_type_be(tcache->type_id));
1596         }
1597
1598         if (item1->sort_order < item2->sort_order)
1599                 return -1;
1600         else if (item1->sort_order > item2->sort_order)
1601                 return 1;
1602         else
1603                 return 0;
1604 }
1605
1606 /*
1607  * Load (or re-load) the enumData member of the typcache entry.
1608  */
1609 static void
1610 load_enum_cache_data(TypeCacheEntry *tcache)
1611 {
1612         TypeCacheEnumData *enumdata;
1613         Relation        enum_rel;
1614         SysScanDesc enum_scan;
1615         HeapTuple       enum_tuple;
1616         ScanKeyData skey;
1617         EnumItem   *items;
1618         int                     numitems;
1619         int                     maxitems;
1620         Oid                     bitmap_base;
1621         Bitmapset  *bitmap;
1622         MemoryContext oldcxt;
1623         int                     bm_size,
1624                                 start_pos;
1625
1626         /* Check that this is actually an enum */
1627         if (tcache->typtype != TYPTYPE_ENUM)
1628                 ereport(ERROR,
1629                                 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
1630                                  errmsg("%s is not an enum",
1631                                                 format_type_be(tcache->type_id))));
1632
1633         /*
1634          * Read all the information for members of the enum type.  We collect the
1635          * info in working memory in the caller's context, and then transfer it to
1636          * permanent memory in CacheMemoryContext.  This minimizes the risk of
1637          * leaking memory from CacheMemoryContext in the event of an error partway
1638          * through.
1639          */
1640         maxitems = 64;
1641         items = (EnumItem *) palloc(sizeof(EnumItem) * maxitems);
1642         numitems = 0;
1643
1644         /* Scan pg_enum for the members of the target enum type. */
1645         ScanKeyInit(&skey,
1646                                 Anum_pg_enum_enumtypid,
1647                                 BTEqualStrategyNumber, F_OIDEQ,
1648                                 ObjectIdGetDatum(tcache->type_id));
1649
1650         enum_rel = heap_open(EnumRelationId, AccessShareLock);
1651         enum_scan = systable_beginscan(enum_rel,
1652                                                                    EnumTypIdLabelIndexId,
1653                                                                    true, NULL,
1654                                                                    1, &skey);
1655
1656         while (HeapTupleIsValid(enum_tuple = systable_getnext(enum_scan)))
1657         {
1658                 Form_pg_enum en = (Form_pg_enum) GETSTRUCT(enum_tuple);
1659
1660                 if (numitems >= maxitems)
1661                 {
1662                         maxitems *= 2;
1663                         items = (EnumItem *) repalloc(items, sizeof(EnumItem) * maxitems);
1664                 }
1665                 items[numitems].enum_oid = HeapTupleGetOid(enum_tuple);
1666                 items[numitems].sort_order = en->enumsortorder;
1667                 numitems++;
1668         }
1669
1670         systable_endscan(enum_scan);
1671         heap_close(enum_rel, AccessShareLock);
1672
1673         /* Sort the items into OID order */
1674         qsort(items, numitems, sizeof(EnumItem), enum_oid_cmp);
1675
1676         /*
1677          * Here, we create a bitmap listing a subset of the enum's OIDs that are
1678          * known to be in order and can thus be compared with just OID comparison.
1679          *
1680          * The point of this is that the enum's initial OIDs were certainly in
1681          * order, so there is some subset that can be compared via OID comparison;
1682          * and we'd rather not do binary searches unnecessarily.
1683          *
1684          * This is somewhat heuristic, and might identify a subset of OIDs that
1685          * isn't exactly what the type started with.  That's okay as long as the
1686          * subset is correctly sorted.
1687          */
1688         bitmap_base = InvalidOid;
1689         bitmap = NULL;
1690         bm_size = 1;                            /* only save sets of at least 2 OIDs */
1691
1692         for (start_pos = 0; start_pos < numitems - 1; start_pos++)
1693         {
1694                 /*
1695                  * Identify longest sorted subsequence starting at start_pos
1696                  */
1697                 Bitmapset  *this_bitmap = bms_make_singleton(0);
1698                 int                     this_bm_size = 1;
1699                 Oid                     start_oid = items[start_pos].enum_oid;
1700                 float4          prev_order = items[start_pos].sort_order;
1701                 int                     i;
1702
1703                 for (i = start_pos + 1; i < numitems; i++)
1704                 {
1705                         Oid                     offset;
1706
1707                         offset = items[i].enum_oid - start_oid;
1708                         /* quit if bitmap would be too large; cutoff is arbitrary */
1709                         if (offset >= 8192)
1710                                 break;
1711                         /* include the item if it's in-order */
1712                         if (items[i].sort_order > prev_order)
1713                         {
1714                                 prev_order = items[i].sort_order;
1715                                 this_bitmap = bms_add_member(this_bitmap, (int) offset);
1716                                 this_bm_size++;
1717                         }
1718                 }
1719
1720                 /* Remember it if larger than previous best */
1721                 if (this_bm_size > bm_size)
1722                 {
1723                         bms_free(bitmap);
1724                         bitmap_base = start_oid;
1725                         bitmap = this_bitmap;
1726                         bm_size = this_bm_size;
1727                 }
1728                 else
1729                         bms_free(this_bitmap);
1730
1731                 /*
1732                  * Done if it's not possible to find a longer sequence in the rest of
1733                  * the list.  In typical cases this will happen on the first
1734                  * iteration, which is why we create the bitmaps on the fly instead of
1735                  * doing a second pass over the list.
1736                  */
1737                 if (bm_size >= (numitems - start_pos - 1))
1738                         break;
1739         }
1740
1741         /* OK, copy the data into CacheMemoryContext */
1742         oldcxt = MemoryContextSwitchTo(CacheMemoryContext);
1743         enumdata = (TypeCacheEnumData *)
1744                 palloc(offsetof(TypeCacheEnumData, enum_values) +
1745                            numitems * sizeof(EnumItem));
1746         enumdata->bitmap_base = bitmap_base;
1747         enumdata->sorted_values = bms_copy(bitmap);
1748         enumdata->num_values = numitems;
1749         memcpy(enumdata->enum_values, items, numitems * sizeof(EnumItem));
1750         MemoryContextSwitchTo(oldcxt);
1751
1752         pfree(items);
1753         bms_free(bitmap);
1754
1755         /* And link the finished cache struct into the typcache */
1756         if (tcache->enumData != NULL)
1757                 pfree(tcache->enumData);
1758         tcache->enumData = enumdata;
1759 }
1760
1761 /*
1762  * Locate the EnumItem with the given OID, if present
1763  */
1764 static EnumItem *
1765 find_enumitem(TypeCacheEnumData *enumdata, Oid arg)
1766 {
1767         EnumItem        srch;
1768
1769         /* On some versions of Solaris, bsearch of zero items dumps core */
1770         if (enumdata->num_values <= 0)
1771                 return NULL;
1772
1773         srch.enum_oid = arg;
1774         return bsearch(&srch, enumdata->enum_values, enumdata->num_values,
1775                                    sizeof(EnumItem), enum_oid_cmp);
1776 }
1777
1778 /*
1779  * qsort comparison function for OID-ordered EnumItems
1780  */
1781 static int
1782 enum_oid_cmp(const void *left, const void *right)
1783 {
1784         const EnumItem *l = (const EnumItem *) left;
1785         const EnumItem *r = (const EnumItem *) right;
1786
1787         if (l->enum_oid < r->enum_oid)
1788                 return -1;
1789         else if (l->enum_oid > r->enum_oid)
1790                 return 1;
1791         else
1792                 return 0;
1793 }