]> granicus.if.org Git - postgresql/blob - src/backend/access/common/indextuple.c
Update copyrights for 2013
[postgresql] / src / backend / access / common / indextuple.c
1 /*-------------------------------------------------------------------------
2  *
3  * indextuple.c
4  *         This file contains index tuple accessor and mutator routines,
5  *         as well as various tuple utilities.
6  *
7  * Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
8  * Portions Copyright (c) 1994, Regents of the University of California
9  *
10  *
11  * IDENTIFICATION
12  *        src/backend/access/common/indextuple.c
13  *
14  *-------------------------------------------------------------------------
15  */
16
17 #include "postgres.h"
18
19 #include "access/heapam.h"
20 #include "access/itup.h"
21 #include "access/tuptoaster.h"
22
23
24 /* ----------------------------------------------------------------
25  *                                index_ tuple interface routines
26  * ----------------------------------------------------------------
27  */
28
29 /* ----------------
30  *              index_form_tuple
31  * ----------------
32  */
33 IndexTuple
34 index_form_tuple(TupleDesc tupleDescriptor,
35                                  Datum *values,
36                                  bool *isnull)
37 {
38         char       *tp;                         /* tuple pointer */
39         IndexTuple      tuple;                  /* return tuple */
40         Size            size,
41                                 data_size,
42                                 hoff;
43         int                     i;
44         unsigned short infomask = 0;
45         bool            hasnull = false;
46         uint16          tupmask = 0;
47         int                     numberOfAttributes = tupleDescriptor->natts;
48
49 #ifdef TOAST_INDEX_HACK
50         Datum           untoasted_values[INDEX_MAX_KEYS];
51         bool            untoasted_free[INDEX_MAX_KEYS];
52 #endif
53
54         if (numberOfAttributes > INDEX_MAX_KEYS)
55                 ereport(ERROR,
56                                 (errcode(ERRCODE_TOO_MANY_COLUMNS),
57                                  errmsg("number of index columns (%d) exceeds limit (%d)",
58                                                 numberOfAttributes, INDEX_MAX_KEYS)));
59
60 #ifdef TOAST_INDEX_HACK
61         for (i = 0; i < numberOfAttributes; i++)
62         {
63                 Form_pg_attribute att = tupleDescriptor->attrs[i];
64
65                 untoasted_values[i] = values[i];
66                 untoasted_free[i] = false;
67
68                 /* Do nothing if value is NULL or not of varlena type */
69                 if (isnull[i] || att->attlen != -1)
70                         continue;
71
72                 /*
73                  * If value is stored EXTERNAL, must fetch it so we are not depending
74                  * on outside storage.  This should be improved someday.
75                  */
76                 if (VARATT_IS_EXTERNAL(DatumGetPointer(values[i])))
77                 {
78                         untoasted_values[i] =
79                                 PointerGetDatum(heap_tuple_fetch_attr((struct varlena *)
80                                                                                                 DatumGetPointer(values[i])));
81                         untoasted_free[i] = true;
82                 }
83
84                 /*
85                  * If value is above size target, and is of a compressible datatype,
86                  * try to compress it in-line.
87                  */
88                 if (!VARATT_IS_EXTENDED(DatumGetPointer(untoasted_values[i])) &&
89                 VARSIZE(DatumGetPointer(untoasted_values[i])) > TOAST_INDEX_TARGET &&
90                         (att->attstorage == 'x' || att->attstorage == 'm'))
91                 {
92                         Datum           cvalue = toast_compress_datum(untoasted_values[i]);
93
94                         if (DatumGetPointer(cvalue) != NULL)
95                         {
96                                 /* successful compression */
97                                 if (untoasted_free[i])
98                                         pfree(DatumGetPointer(untoasted_values[i]));
99                                 untoasted_values[i] = cvalue;
100                                 untoasted_free[i] = true;
101                         }
102                 }
103         }
104 #endif
105
106         for (i = 0; i < numberOfAttributes; i++)
107         {
108                 if (isnull[i])
109                 {
110                         hasnull = true;
111                         break;
112                 }
113         }
114
115         if (hasnull)
116                 infomask |= INDEX_NULL_MASK;
117
118         hoff = IndexInfoFindDataOffset(infomask);
119 #ifdef TOAST_INDEX_HACK
120         data_size = heap_compute_data_size(tupleDescriptor,
121                                                                            untoasted_values, isnull);
122 #else
123         data_size = heap_compute_data_size(tupleDescriptor,
124                                                                            values, isnull);
125 #endif
126         size = hoff + data_size;
127         size = MAXALIGN(size);          /* be conservative */
128
129         tp = (char *) palloc0(size);
130         tuple = (IndexTuple) tp;
131
132         heap_fill_tuple(tupleDescriptor,
133 #ifdef TOAST_INDEX_HACK
134                                         untoasted_values,
135 #else
136                                         values,
137 #endif
138                                         isnull,
139                                         (char *) tp + hoff,
140                                         data_size,
141                                         &tupmask,
142                                         (hasnull ? (bits8 *) tp + sizeof(IndexTupleData) : NULL));
143
144 #ifdef TOAST_INDEX_HACK
145         for (i = 0; i < numberOfAttributes; i++)
146         {
147                 if (untoasted_free[i])
148                         pfree(DatumGetPointer(untoasted_values[i]));
149         }
150 #endif
151
152         /*
153          * We do this because heap_fill_tuple wants to initialize a "tupmask"
154          * which is used for HeapTuples, but we want an indextuple infomask. The
155          * only relevant info is the "has variable attributes" field. We have
156          * already set the hasnull bit above.
157          */
158         if (tupmask & HEAP_HASVARWIDTH)
159                 infomask |= INDEX_VAR_MASK;
160
161         /*
162          * Here we make sure that the size will fit in the field reserved for it
163          * in t_info.
164          */
165         if ((size & INDEX_SIZE_MASK) != size)
166                 ereport(ERROR,
167                                 (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
168                                  errmsg("index row requires %lu bytes, maximum size is %lu",
169                                                 (unsigned long) size,
170                                                 (unsigned long) INDEX_SIZE_MASK)));
171
172         infomask |= size;
173
174         /*
175          * initialize metadata
176          */
177         tuple->t_info = infomask;
178         return tuple;
179 }
180
181 /* ----------------
182  *              nocache_index_getattr
183  *
184  *              This gets called from index_getattr() macro, and only in cases
185  *              where we can't use cacheoffset and the value is not null.
186  *
187  *              This caches attribute offsets in the attribute descriptor.
188  *
189  *              An alternative way to speed things up would be to cache offsets
190  *              with the tuple, but that seems more difficult unless you take
191  *              the storage hit of actually putting those offsets into the
192  *              tuple you send to disk.  Yuck.
193  *
194  *              This scheme will be slightly slower than that, but should
195  *              perform well for queries which hit large #'s of tuples.  After
196  *              you cache the offsets once, examining all the other tuples using
197  *              the same attribute descriptor will go much quicker. -cim 5/4/91
198  * ----------------
199  */
200 Datum
201 nocache_index_getattr(IndexTuple tup,
202                                           int attnum,
203                                           TupleDesc tupleDesc)
204 {
205         Form_pg_attribute *att = tupleDesc->attrs;
206         char       *tp;                         /* ptr to data part of tuple */
207         bits8      *bp = NULL;          /* ptr to null bitmap in tuple */
208         bool            slow = false;   /* do we have to walk attrs? */
209         int                     data_off;               /* tuple data offset */
210         int                     off;                    /* current offset within data */
211
212         /* ----------------
213          *       Three cases:
214          *
215          *       1: No nulls and no variable-width attributes.
216          *       2: Has a null or a var-width AFTER att.
217          *       3: Has nulls or var-widths BEFORE att.
218          * ----------------
219          */
220
221         data_off = IndexInfoFindDataOffset(tup->t_info);
222
223         attnum--;
224
225         if (IndexTupleHasNulls(tup))
226         {
227                 /*
228                  * there's a null somewhere in the tuple
229                  *
230                  * check to see if desired att is null
231                  */
232
233                 /* XXX "knows" t_bits are just after fixed tuple header! */
234                 bp = (bits8 *) ((char *) tup + sizeof(IndexTupleData));
235
236                 /*
237                  * Now check to see if any preceding bits are null...
238                  */
239                 {
240                         int                     byte = attnum >> 3;
241                         int                     finalbit = attnum & 0x07;
242
243                         /* check for nulls "before" final bit of last byte */
244                         if ((~bp[byte]) & ((1 << finalbit) - 1))
245                                 slow = true;
246                         else
247                         {
248                                 /* check for nulls in any "earlier" bytes */
249                                 int                     i;
250
251                                 for (i = 0; i < byte; i++)
252                                 {
253                                         if (bp[i] != 0xFF)
254                                         {
255                                                 slow = true;
256                                                 break;
257                                         }
258                                 }
259                         }
260                 }
261         }
262
263         tp = (char *) tup + data_off;
264
265         if (!slow)
266         {
267                 /*
268                  * If we get here, there are no nulls up to and including the target
269                  * attribute.  If we have a cached offset, we can use it.
270                  */
271                 if (att[attnum]->attcacheoff >= 0)
272                 {
273                         return fetchatt(att[attnum],
274                                                         tp + att[attnum]->attcacheoff);
275                 }
276
277                 /*
278                  * Otherwise, check for non-fixed-length attrs up to and including
279                  * target.      If there aren't any, it's safe to cheaply initialize the
280                  * cached offsets for these attrs.
281                  */
282                 if (IndexTupleHasVarwidths(tup))
283                 {
284                         int                     j;
285
286                         for (j = 0; j <= attnum; j++)
287                         {
288                                 if (att[j]->attlen <= 0)
289                                 {
290                                         slow = true;
291                                         break;
292                                 }
293                         }
294                 }
295         }
296
297         if (!slow)
298         {
299                 int                     natts = tupleDesc->natts;
300                 int                     j = 1;
301
302                 /*
303                  * If we get here, we have a tuple with no nulls or var-widths up to
304                  * and including the target attribute, so we can use the cached offset
305                  * ... only we don't have it yet, or we'd not have got here.  Since
306                  * it's cheap to compute offsets for fixed-width columns, we take the
307                  * opportunity to initialize the cached offsets for *all* the leading
308                  * fixed-width columns, in hope of avoiding future visits to this
309                  * routine.
310                  */
311                 att[0]->attcacheoff = 0;
312
313                 /* we might have set some offsets in the slow path previously */
314                 while (j < natts && att[j]->attcacheoff > 0)
315                         j++;
316
317                 off = att[j - 1]->attcacheoff + att[j - 1]->attlen;
318
319                 for (; j < natts; j++)
320                 {
321                         if (att[j]->attlen <= 0)
322                                 break;
323
324                         off = att_align_nominal(off, att[j]->attalign);
325
326                         att[j]->attcacheoff = off;
327
328                         off += att[j]->attlen;
329                 }
330
331                 Assert(j > attnum);
332
333                 off = att[attnum]->attcacheoff;
334         }
335         else
336         {
337                 bool            usecache = true;
338                 int                     i;
339
340                 /*
341                  * Now we know that we have to walk the tuple CAREFULLY.  But we still
342                  * might be able to cache some offsets for next time.
343                  *
344                  * Note - This loop is a little tricky.  For each non-null attribute,
345                  * we have to first account for alignment padding before the attr,
346                  * then advance over the attr based on its length.      Nulls have no
347                  * storage and no alignment padding either.  We can use/set
348                  * attcacheoff until we reach either a null or a var-width attribute.
349                  */
350                 off = 0;
351                 for (i = 0;; i++)               /* loop exit is at "break" */
352                 {
353                         if (IndexTupleHasNulls(tup) && att_isnull(i, bp))
354                         {
355                                 usecache = false;
356                                 continue;               /* this cannot be the target att */
357                         }
358
359                         /* If we know the next offset, we can skip the rest */
360                         if (usecache && att[i]->attcacheoff >= 0)
361                                 off = att[i]->attcacheoff;
362                         else if (att[i]->attlen == -1)
363                         {
364                                 /*
365                                  * We can only cache the offset for a varlena attribute if the
366                                  * offset is already suitably aligned, so that there would be
367                                  * no pad bytes in any case: then the offset will be valid for
368                                  * either an aligned or unaligned value.
369                                  */
370                                 if (usecache &&
371                                         off == att_align_nominal(off, att[i]->attalign))
372                                         att[i]->attcacheoff = off;
373                                 else
374                                 {
375                                         off = att_align_pointer(off, att[i]->attalign, -1,
376                                                                                         tp + off);
377                                         usecache = false;
378                                 }
379                         }
380                         else
381                         {
382                                 /* not varlena, so safe to use att_align_nominal */
383                                 off = att_align_nominal(off, att[i]->attalign);
384
385                                 if (usecache)
386                                         att[i]->attcacheoff = off;
387                         }
388
389                         if (i == attnum)
390                                 break;
391
392                         off = att_addlength_pointer(off, att[i]->attlen, tp + off);
393
394                         if (usecache && att[i]->attlen <= 0)
395                                 usecache = false;
396                 }
397         }
398
399         return fetchatt(att[attnum], tp + off);
400 }
401
402 /*
403  * Convert an index tuple into Datum/isnull arrays.
404  *
405  * The caller must allocate sufficient storage for the output arrays.
406  * (INDEX_MAX_KEYS entries should be enough.)
407  */
408 void
409 index_deform_tuple(IndexTuple tup, TupleDesc tupleDescriptor,
410                                    Datum *values, bool *isnull)
411 {
412         int                     i;
413
414         /* Assert to protect callers who allocate fixed-size arrays */
415         Assert(tupleDescriptor->natts <= INDEX_MAX_KEYS);
416
417         for (i = 0; i < tupleDescriptor->natts; i++)
418         {
419                 values[i] = index_getattr(tup, i + 1, tupleDescriptor, &isnull[i]);
420         }
421 }
422
423 /*
424  * Create a palloc'd copy of an index tuple.
425  */
426 IndexTuple
427 CopyIndexTuple(IndexTuple source)
428 {
429         IndexTuple      result;
430         Size            size;
431
432         size = IndexTupleSize(source);
433         result = (IndexTuple) palloc(size);
434         memcpy(result, source, size);
435         return result;
436 }