1 /*-------------------------------------------------------------------------
4 * This file contains index tuple accessor and mutator routines,
5 * as well as various tuple utilities.
7 * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
8 * Portions Copyright (c) 1994, Regents of the University of California
12 * src/backend/access/common/indextuple.c
14 *-------------------------------------------------------------------------
19 #include "access/heapam.h"
20 #include "access/itup.h"
21 #include "access/tuptoaster.h"
22 #include "utils/rel.h"
25 /* ----------------------------------------------------------------
26 * index_ tuple interface routines
27 * ----------------------------------------------------------------
33 * This shouldn't leak any memory; otherwise, callers such as
34 * tuplesort_putindextuplevalues() will be very unhappy.
38 index_form_tuple(TupleDesc tupleDescriptor,
42 char *tp; /* tuple pointer */
43 IndexTuple tuple; /* return tuple */
48 unsigned short infomask = 0;
51 int numberOfAttributes = tupleDescriptor->natts;
53 #ifdef TOAST_INDEX_HACK
54 Datum untoasted_values[INDEX_MAX_KEYS];
55 bool untoasted_free[INDEX_MAX_KEYS];
58 if (numberOfAttributes > INDEX_MAX_KEYS)
60 (errcode(ERRCODE_TOO_MANY_COLUMNS),
61 errmsg("number of index columns (%d) exceeds limit (%d)",
62 numberOfAttributes, INDEX_MAX_KEYS)));
64 #ifdef TOAST_INDEX_HACK
65 for (i = 0; i < numberOfAttributes; i++)
67 Form_pg_attribute att = tupleDescriptor->attrs[i];
69 untoasted_values[i] = values[i];
70 untoasted_free[i] = false;
72 /* Do nothing if value is NULL or not of varlena type */
73 if (isnull[i] || att->attlen != -1)
77 * If value is stored EXTERNAL, must fetch it so we are not depending
78 * on outside storage. This should be improved someday.
80 if (VARATT_IS_EXTERNAL(DatumGetPointer(values[i])))
83 PointerGetDatum(heap_tuple_fetch_attr((struct varlena *)
84 DatumGetPointer(values[i])));
85 untoasted_free[i] = true;
89 * If value is above size target, and is of a compressible datatype,
90 * try to compress it in-line.
92 if (!VARATT_IS_EXTENDED(DatumGetPointer(untoasted_values[i])) &&
93 VARSIZE(DatumGetPointer(untoasted_values[i])) > TOAST_INDEX_TARGET &&
94 (att->attstorage == 'x' || att->attstorage == 'm'))
96 Datum cvalue = toast_compress_datum(untoasted_values[i]);
98 if (DatumGetPointer(cvalue) != NULL)
100 /* successful compression */
101 if (untoasted_free[i])
102 pfree(DatumGetPointer(untoasted_values[i]));
103 untoasted_values[i] = cvalue;
104 untoasted_free[i] = true;
110 for (i = 0; i < numberOfAttributes; i++)
120 infomask |= INDEX_NULL_MASK;
122 hoff = IndexInfoFindDataOffset(infomask);
123 #ifdef TOAST_INDEX_HACK
124 data_size = heap_compute_data_size(tupleDescriptor,
125 untoasted_values, isnull);
127 data_size = heap_compute_data_size(tupleDescriptor,
130 size = hoff + data_size;
131 size = MAXALIGN(size); /* be conservative */
133 tp = (char *) palloc0(size);
134 tuple = (IndexTuple) tp;
136 heap_fill_tuple(tupleDescriptor,
137 #ifdef TOAST_INDEX_HACK
146 (hasnull ? (bits8 *) tp + sizeof(IndexTupleData) : NULL));
148 #ifdef TOAST_INDEX_HACK
149 for (i = 0; i < numberOfAttributes; i++)
151 if (untoasted_free[i])
152 pfree(DatumGetPointer(untoasted_values[i]));
157 * We do this because heap_fill_tuple wants to initialize a "tupmask"
158 * which is used for HeapTuples, but we want an indextuple infomask. The
159 * only relevant info is the "has variable attributes" field. We have
160 * already set the hasnull bit above.
162 if (tupmask & HEAP_HASVARWIDTH)
163 infomask |= INDEX_VAR_MASK;
165 /* Also assert we got rid of external attributes */
166 #ifdef TOAST_INDEX_HACK
167 Assert((tupmask & HEAP_HASEXTERNAL) == 0);
171 * Here we make sure that the size will fit in the field reserved for it
174 if ((size & INDEX_SIZE_MASK) != size)
176 (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
177 errmsg("index row requires %zu bytes, maximum size is %zu",
178 size, (Size) INDEX_SIZE_MASK)));
183 * initialize metadata
185 tuple->t_info = infomask;
190 * nocache_index_getattr
192 * This gets called from index_getattr() macro, and only in cases
193 * where we can't use cacheoffset and the value is not null.
195 * This caches attribute offsets in the attribute descriptor.
197 * An alternative way to speed things up would be to cache offsets
198 * with the tuple, but that seems more difficult unless you take
199 * the storage hit of actually putting those offsets into the
200 * tuple you send to disk. Yuck.
202 * This scheme will be slightly slower than that, but should
203 * perform well for queries which hit large #'s of tuples. After
204 * you cache the offsets once, examining all the other tuples using
205 * the same attribute descriptor will go much quicker. -cim 5/4/91
209 nocache_index_getattr(IndexTuple tup,
213 Form_pg_attribute *att = tupleDesc->attrs;
214 char *tp; /* ptr to data part of tuple */
215 bits8 *bp = NULL; /* ptr to null bitmap in tuple */
216 bool slow = false; /* do we have to walk attrs? */
217 int data_off; /* tuple data offset */
218 int off; /* current offset within data */
223 * 1: No nulls and no variable-width attributes.
224 * 2: Has a null or a var-width AFTER att.
225 * 3: Has nulls or var-widths BEFORE att.
229 data_off = IndexInfoFindDataOffset(tup->t_info);
233 if (IndexTupleHasNulls(tup))
236 * there's a null somewhere in the tuple
238 * check to see if desired att is null
241 /* XXX "knows" t_bits are just after fixed tuple header! */
242 bp = (bits8 *) ((char *) tup + sizeof(IndexTupleData));
245 * Now check to see if any preceding bits are null...
248 int byte = attnum >> 3;
249 int finalbit = attnum & 0x07;
251 /* check for nulls "before" final bit of last byte */
252 if ((~bp[byte]) & ((1 << finalbit) - 1))
256 /* check for nulls in any "earlier" bytes */
259 for (i = 0; i < byte; i++)
271 tp = (char *) tup + data_off;
276 * If we get here, there are no nulls up to and including the target
277 * attribute. If we have a cached offset, we can use it.
279 if (att[attnum]->attcacheoff >= 0)
281 return fetchatt(att[attnum],
282 tp + att[attnum]->attcacheoff);
286 * Otherwise, check for non-fixed-length attrs up to and including
287 * target. If there aren't any, it's safe to cheaply initialize the
288 * cached offsets for these attrs.
290 if (IndexTupleHasVarwidths(tup))
294 for (j = 0; j <= attnum; j++)
296 if (att[j]->attlen <= 0)
307 int natts = tupleDesc->natts;
311 * If we get here, we have a tuple with no nulls or var-widths up to
312 * and including the target attribute, so we can use the cached offset
313 * ... only we don't have it yet, or we'd not have got here. Since
314 * it's cheap to compute offsets for fixed-width columns, we take the
315 * opportunity to initialize the cached offsets for *all* the leading
316 * fixed-width columns, in hope of avoiding future visits to this
319 att[0]->attcacheoff = 0;
321 /* we might have set some offsets in the slow path previously */
322 while (j < natts && att[j]->attcacheoff > 0)
325 off = att[j - 1]->attcacheoff + att[j - 1]->attlen;
327 for (; j < natts; j++)
329 if (att[j]->attlen <= 0)
332 off = att_align_nominal(off, att[j]->attalign);
334 att[j]->attcacheoff = off;
336 off += att[j]->attlen;
341 off = att[attnum]->attcacheoff;
345 bool usecache = true;
349 * Now we know that we have to walk the tuple CAREFULLY. But we still
350 * might be able to cache some offsets for next time.
352 * Note - This loop is a little tricky. For each non-null attribute,
353 * we have to first account for alignment padding before the attr,
354 * then advance over the attr based on its length. Nulls have no
355 * storage and no alignment padding either. We can use/set
356 * attcacheoff until we reach either a null or a var-width attribute.
359 for (i = 0;; i++) /* loop exit is at "break" */
361 if (IndexTupleHasNulls(tup) && att_isnull(i, bp))
364 continue; /* this cannot be the target att */
367 /* If we know the next offset, we can skip the rest */
368 if (usecache && att[i]->attcacheoff >= 0)
369 off = att[i]->attcacheoff;
370 else if (att[i]->attlen == -1)
373 * We can only cache the offset for a varlena attribute if the
374 * offset is already suitably aligned, so that there would be
375 * no pad bytes in any case: then the offset will be valid for
376 * either an aligned or unaligned value.
379 off == att_align_nominal(off, att[i]->attalign))
380 att[i]->attcacheoff = off;
383 off = att_align_pointer(off, att[i]->attalign, -1,
390 /* not varlena, so safe to use att_align_nominal */
391 off = att_align_nominal(off, att[i]->attalign);
394 att[i]->attcacheoff = off;
400 off = att_addlength_pointer(off, att[i]->attlen, tp + off);
402 if (usecache && att[i]->attlen <= 0)
407 return fetchatt(att[attnum], tp + off);
411 * Convert an index tuple into Datum/isnull arrays.
413 * The caller must allocate sufficient storage for the output arrays.
414 * (INDEX_MAX_KEYS entries should be enough.)
417 index_deform_tuple(IndexTuple tup, TupleDesc tupleDescriptor,
418 Datum *values, bool *isnull)
422 /* Assert to protect callers who allocate fixed-size arrays */
423 Assert(tupleDescriptor->natts <= INDEX_MAX_KEYS);
425 for (i = 0; i < tupleDescriptor->natts; i++)
427 values[i] = index_getattr(tup, i + 1, tupleDescriptor, &isnull[i]);
432 * Create a palloc'd copy of an index tuple.
435 CopyIndexTuple(IndexTuple source)
440 size = IndexTupleSize(source);
441 result = (IndexTuple) palloc(size);
442 memcpy(result, source, size);
447 * Reform index tuple. Truncate nonkey (INCLUDING) attributes.
450 index_truncate_tuple(Relation idxrel, IndexTuple olditup)
452 TupleDesc itupdesc = RelationGetDescr(idxrel);
453 Datum values[INDEX_MAX_KEYS];
454 bool isnull[INDEX_MAX_KEYS];
456 int indnatts = IndexRelationGetNumberOfAttributes(idxrel);
457 int indnkeyatts = IndexRelationGetNumberOfKeyAttributes(idxrel);
459 Assert(indnatts <= INDEX_MAX_KEYS);
460 Assert(indnkeyatts > 0);
461 Assert(indnkeyatts < indnatts);
463 index_deform_tuple(olditup, itupdesc, values, isnull);
465 /* form new tuple that will contain only key attributes */
466 itupdesc->natts = indnkeyatts;
467 newitup = index_form_tuple(itupdesc, values, isnull);
468 newitup->t_tid = olditup->t_tid;
470 itupdesc->natts = indnatts;
472 Assert(IndexTupleSize(newitup) <= IndexTupleSize(olditup));