1 /*-------------------------------------------------------------------------
4 * general index access method routines
6 * Portions Copyright (c) 1996-2011, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
11 * src/backend/access/index/indexam.c
14 * index_open - open an index relation by relation OID
15 * index_close - close an index relation
16 * index_beginscan - start a scan of an index with amgettuple
17 * index_beginscan_bitmap - start a scan of an index with amgetbitmap
18 * index_rescan - restart a scan of an index
19 * index_endscan - end a scan
20 * index_insert - insert an index tuple into a relation
21 * index_markpos - mark a scan position
22 * index_restrpos - restore a scan position
23 * index_getnext - get the next tuple from a scan
24 * index_getbitmap - get all tuples from a scan
25 * index_bulk_delete - bulk deletion of index tuples
26 * index_vacuum_cleanup - post-deletion cleanup of an index
27 * index_getprocid - get a support procedure OID
28 * index_getprocinfo - get a support procedure's lookup info
31 * This file contains the index_ routines which used
32 * to be a scattered collection of stuff in access/genam.
36 * Scans are implemented as follows:
38 * `0' represents an invalid item pointer.
39 * `-' represents an unknown item pointer.
40 * `X' represents a known item pointers.
41 * `+' represents known or invalid item pointers.
42 * `*' represents any item pointers.
44 * State is represented by a triple of these symbols in the order of
45 * previous, current, next. Note that the case of reverse scans works
49 * (1) + + - + 0 0 (if the next item pointer is invalid)
50 * (2) + X - (otherwise)
51 * (3) * 0 0 * 0 0 (no change)
52 * (4) + X 0 X 0 0 (shift)
53 * (5) * + X + X - (shift, add unknown)
55 * All other states cannot occur.
57 * Note: It would be possible to cache the status of the previous and
58 * next item pointer using the flags.
60 *-------------------------------------------------------------------------
65 #include "access/relscan.h"
66 #include "access/transam.h"
67 #include "access/xact.h"
69 #include "storage/bufmgr.h"
70 #include "storage/lmgr.h"
71 #include "storage/predicate.h"
72 #include "utils/relcache.h"
73 #include "utils/snapmgr.h"
74 #include "utils/tqual.h"
77 /* ----------------------------------------------------------------
78 * macros used in index_ routines
79 * ----------------------------------------------------------------
81 #define RELATION_CHECKS \
83 AssertMacro(RelationIsValid(indexRelation)), \
84 AssertMacro(PointerIsValid(indexRelation->rd_am)) \
89 AssertMacro(IndexScanIsValid(scan)), \
90 AssertMacro(RelationIsValid(scan->indexRelation)), \
91 AssertMacro(PointerIsValid(scan->indexRelation->rd_am)) \
94 #define GET_REL_PROCEDURE(pname) \
96 procedure = &indexRelation->rd_aminfo->pname; \
97 if (!OidIsValid(procedure->fn_oid)) \
99 RegProcedure procOid = indexRelation->rd_am->pname; \
100 if (!RegProcedureIsValid(procOid)) \
101 elog(ERROR, "invalid %s regproc", CppAsString(pname)); \
102 fmgr_info_cxt(procOid, procedure, indexRelation->rd_indexcxt); \
106 #define GET_SCAN_PROCEDURE(pname) \
108 procedure = &scan->indexRelation->rd_aminfo->pname; \
109 if (!OidIsValid(procedure->fn_oid)) \
111 RegProcedure procOid = scan->indexRelation->rd_am->pname; \
112 if (!RegProcedureIsValid(procOid)) \
113 elog(ERROR, "invalid %s regproc", CppAsString(pname)); \
114 fmgr_info_cxt(procOid, procedure, scan->indexRelation->rd_indexcxt); \
118 static IndexScanDesc index_beginscan_internal(Relation indexRelation,
119 int nkeys, int norderbys);
122 /* ----------------------------------------------------------------
123 * index_ interface functions
124 * ----------------------------------------------------------------
128 * index_open - open an index relation by relation OID
130 * If lockmode is not "NoLock", the specified kind of lock is
131 * obtained on the index. (Generally, NoLock should only be
132 * used if the caller knows it has some appropriate lock on the
135 * An error is raised if the index does not exist.
137 * This is a convenience routine adapted for indexscan use.
138 * Some callers may prefer to use relation_open directly.
142 index_open(Oid relationId, LOCKMODE lockmode)
146 r = relation_open(relationId, lockmode);
148 if (r->rd_rel->relkind != RELKIND_INDEX)
150 (errcode(ERRCODE_WRONG_OBJECT_TYPE),
151 errmsg("\"%s\" is not an index",
152 RelationGetRelationName(r))));
158 * index_close - close an index relation
160 * If lockmode is not "NoLock", we then release the specified lock.
162 * Note that it is often sensible to hold a lock beyond index_close;
163 * in that case, the lock is released automatically at xact end.
167 index_close(Relation relation, LOCKMODE lockmode)
169 LockRelId relid = relation->rd_lockInfo.lockRelId;
171 Assert(lockmode >= NoLock && lockmode < MAX_LOCKMODES);
173 /* The relcache does the real work... */
174 RelationClose(relation);
176 if (lockmode != NoLock)
177 UnlockRelationId(&relid, lockmode);
181 * index_insert - insert an index tuple into a relation
185 index_insert(Relation indexRelation,
188 ItemPointer heap_t_ctid,
189 Relation heapRelation,
190 IndexUniqueCheck checkUnique)
195 GET_REL_PROCEDURE(aminsert);
197 if (!(indexRelation->rd_am->ampredlocks))
198 CheckForSerializableConflictIn(indexRelation,
203 * have the am's insert proc do all the work.
205 return DatumGetBool(FunctionCall6(procedure,
206 PointerGetDatum(indexRelation),
207 PointerGetDatum(values),
208 PointerGetDatum(isnull),
209 PointerGetDatum(heap_t_ctid),
210 PointerGetDatum(heapRelation),
211 Int32GetDatum((int32) checkUnique)));
215 * index_beginscan - start a scan of an index with amgettuple
217 * Caller must be holding suitable locks on the heap and the index.
220 index_beginscan(Relation heapRelation,
221 Relation indexRelation,
223 int nkeys, int norderbys)
227 scan = index_beginscan_internal(indexRelation, nkeys, norderbys);
230 * Save additional parameters into the scandesc. Everything else was set
231 * up by RelationGetIndexScan.
233 scan->heapRelation = heapRelation;
234 scan->xs_snapshot = snapshot;
240 * index_beginscan_bitmap - start a scan of an index with amgetbitmap
242 * As above, caller had better be holding some lock on the parent heap
243 * relation, even though it's not explicitly mentioned here.
246 index_beginscan_bitmap(Relation indexRelation,
252 scan = index_beginscan_internal(indexRelation, nkeys, 0);
255 * Save additional parameters into the scandesc. Everything else was set
256 * up by RelationGetIndexScan.
258 scan->xs_snapshot = snapshot;
264 * index_beginscan_internal --- common code for index_beginscan variants
267 index_beginscan_internal(Relation indexRelation,
268 int nkeys, int norderbys)
274 GET_REL_PROCEDURE(ambeginscan);
276 if (!(indexRelation->rd_am->ampredlocks))
277 PredicateLockRelation(indexRelation);
280 * We hold a reference count to the relcache entry throughout the scan.
282 RelationIncrementReferenceCount(indexRelation);
285 * Tell the AM to open a scan.
287 scan = (IndexScanDesc)
288 DatumGetPointer(FunctionCall3(procedure,
289 PointerGetDatum(indexRelation),
290 Int32GetDatum(nkeys),
291 Int32GetDatum(norderbys)));
297 * index_rescan - (re)start a scan of an index
299 * During a restart, the caller may specify a new set of scankeys and/or
300 * orderbykeys; but the number of keys cannot differ from what index_beginscan
301 * was told. (Later we might relax that to "must not exceed", but currently
302 * the index AMs tend to assume that scan->numberOfKeys is what to believe.)
303 * To restart the scan without changing keys, pass NULL for the key arrays.
304 * (Of course, keys *must* be passed on the first call, unless
305 * scan->numberOfKeys is zero.)
309 index_rescan(IndexScanDesc scan,
310 ScanKey keys, int nkeys,
311 ScanKey orderbys, int norderbys)
316 GET_SCAN_PROCEDURE(amrescan);
318 Assert(nkeys == scan->numberOfKeys);
319 Assert(norderbys == scan->numberOfOrderBys);
321 /* Release any held pin on a heap page */
322 if (BufferIsValid(scan->xs_cbuf))
324 ReleaseBuffer(scan->xs_cbuf);
325 scan->xs_cbuf = InvalidBuffer;
328 scan->xs_next_hot = InvalidOffsetNumber;
330 scan->kill_prior_tuple = false; /* for safety */
332 FunctionCall5(procedure,
333 PointerGetDatum(scan),
334 PointerGetDatum(keys),
335 Int32GetDatum(nkeys),
336 PointerGetDatum(orderbys),
337 Int32GetDatum(norderbys));
341 * index_endscan - end a scan
345 index_endscan(IndexScanDesc scan)
350 GET_SCAN_PROCEDURE(amendscan);
352 /* Release any held pin on a heap page */
353 if (BufferIsValid(scan->xs_cbuf))
355 ReleaseBuffer(scan->xs_cbuf);
356 scan->xs_cbuf = InvalidBuffer;
359 /* End the AM's scan */
360 FunctionCall1(procedure, PointerGetDatum(scan));
362 /* Release index refcount acquired by index_beginscan */
363 RelationDecrementReferenceCount(scan->indexRelation);
365 /* Release the scan data structure itself */
370 * index_markpos - mark a scan position
374 index_markpos(IndexScanDesc scan)
379 GET_SCAN_PROCEDURE(ammarkpos);
381 FunctionCall1(procedure, PointerGetDatum(scan));
385 * index_restrpos - restore a scan position
387 * NOTE: this only restores the internal scan state of the index AM.
388 * The current result tuple (scan->xs_ctup) doesn't change. See comments
389 * for ExecRestrPos().
391 * NOTE: in the presence of HOT chains, mark/restore only works correctly
392 * if the scan's snapshot is MVCC-safe; that ensures that there's at most one
393 * returnable tuple in each HOT chain, and so restoring the prior state at the
394 * granularity of the index AM is sufficient. Since the only current user
395 * of mark/restore functionality is nodeMergejoin.c, this effectively means
396 * that merge-join plans only work for MVCC snapshots. This could be fixed
397 * if necessary, but for now it seems unimportant.
401 index_restrpos(IndexScanDesc scan)
405 Assert(IsMVCCSnapshot(scan->xs_snapshot));
408 GET_SCAN_PROCEDURE(amrestrpos);
410 scan->xs_next_hot = InvalidOffsetNumber;
412 scan->kill_prior_tuple = false; /* for safety */
414 FunctionCall1(procedure, PointerGetDatum(scan));
418 * index_getnext - get the next heap tuple from a scan
420 * The result is the next heap tuple satisfying the scan keys and the
421 * snapshot, or NULL if no more matching tuples exist. On success,
422 * the buffer containing the heap tuple is pinned (the pin will be dropped
423 * at the next index_getnext or index_endscan).
425 * Note: caller must check scan->xs_recheck, and perform rechecking of the
426 * scan keys if required. We do not do that here because we don't have
427 * enough information to do it efficiently in the general case.
431 index_getnext(IndexScanDesc scan, ScanDirection direction)
433 HeapTuple heapTuple = &scan->xs_ctup;
434 ItemPointer tid = &heapTuple->t_self;
438 GET_SCAN_PROCEDURE(amgettuple);
440 Assert(TransactionIdIsValid(RecentGlobalXmin));
443 * We always reset xs_hot_dead; if we are here then either we are just
444 * starting the scan, or we previously returned a visible tuple, and in
445 * either case it's inappropriate to kill the prior index entry.
447 scan->xs_hot_dead = false;
455 if (scan->xs_next_hot != InvalidOffsetNumber)
458 * We are resuming scan of a HOT chain after having returned an
459 * earlier member. Must still hold pin on current heap page.
461 Assert(BufferIsValid(scan->xs_cbuf));
462 Assert(ItemPointerGetBlockNumber(tid) ==
463 BufferGetBlockNumber(scan->xs_cbuf));
464 Assert(TransactionIdIsValid(scan->xs_prev_xmax));
465 offnum = scan->xs_next_hot;
466 at_chain_start = false;
467 scan->xs_next_hot = InvalidOffsetNumber;
475 * If we scanned a whole HOT chain and found only dead tuples,
476 * tell index AM to kill its entry for that TID. We do not do this
477 * when in recovery because it may violate MVCC to do so. see
478 * comments in RelationGetIndexScan().
480 if (!scan->xactStartedInRecovery)
481 scan->kill_prior_tuple = scan->xs_hot_dead;
484 * The AM's gettuple proc finds the next index entry matching the
485 * scan keys, and puts the TID in xs_ctup.t_self (ie, *tid). It
486 * should also set scan->xs_recheck, though we pay no attention to
489 found = DatumGetBool(FunctionCall2(procedure,
490 PointerGetDatum(scan),
491 Int32GetDatum(direction)));
493 /* Reset kill flag immediately for safety */
494 scan->kill_prior_tuple = false;
496 /* If we're out of index entries, break out of outer loop */
500 pgstat_count_index_tuples(scan->indexRelation, 1);
502 /* Switch to correct buffer if we don't have it already */
503 prev_buf = scan->xs_cbuf;
504 scan->xs_cbuf = ReleaseAndReadBuffer(scan->xs_cbuf,
506 ItemPointerGetBlockNumber(tid));
509 * Prune page, but only if we weren't already on this page
511 if (prev_buf != scan->xs_cbuf)
512 heap_page_prune_opt(scan->heapRelation, scan->xs_cbuf,
515 /* Prepare to scan HOT chain starting at index-referenced offnum */
516 offnum = ItemPointerGetOffsetNumber(tid);
517 at_chain_start = true;
519 /* We don't know what the first tuple's xmin should be */
520 scan->xs_prev_xmax = InvalidTransactionId;
522 /* Initialize flag to detect if all entries are dead */
523 scan->xs_hot_dead = true;
526 /* Obtain share-lock on the buffer so we can examine visibility */
527 LockBuffer(scan->xs_cbuf, BUFFER_LOCK_SHARE);
529 dp = (Page) BufferGetPage(scan->xs_cbuf);
531 /* Scan through possible multiple members of HOT-chain */
538 /* check for bogus TID */
539 if (offnum < FirstOffsetNumber ||
540 offnum > PageGetMaxOffsetNumber(dp))
543 lp = PageGetItemId(dp, offnum);
545 /* check for unused, dead, or redirected items */
546 if (!ItemIdIsNormal(lp))
548 /* We should only see a redirect at start of chain */
549 if (ItemIdIsRedirected(lp) && at_chain_start)
551 /* Follow the redirect */
552 offnum = ItemIdGetRedirect(lp);
553 at_chain_start = false;
556 /* else must be end of chain */
561 * We must initialize all of *heapTuple (ie, scan->xs_ctup) since
562 * it is returned to the executor on success.
564 heapTuple->t_data = (HeapTupleHeader) PageGetItem(dp, lp);
565 heapTuple->t_len = ItemIdGetLength(lp);
566 ItemPointerSetOffsetNumber(tid, offnum);
567 heapTuple->t_tableOid = RelationGetRelid(scan->heapRelation);
568 ctid = &heapTuple->t_data->t_ctid;
571 * Shouldn't see a HEAP_ONLY tuple at chain start. (This test
572 * should be unnecessary, since the chain root can't be removed
573 * while we have pin on the index entry, but let's make it
576 if (at_chain_start && HeapTupleIsHeapOnly(heapTuple))
580 * The xmin should match the previous xmax value, else chain is
581 * broken. (Note: this test is not optional because it protects
582 * us against the case where the prior chain member's xmax aborted
583 * since we looked at it.)
585 if (TransactionIdIsValid(scan->xs_prev_xmax) &&
586 !TransactionIdEquals(scan->xs_prev_xmax,
587 HeapTupleHeaderGetXmin(heapTuple->t_data)))
590 /* If it's visible per the snapshot, we must return it */
591 valid = HeapTupleSatisfiesVisibility(heapTuple, scan->xs_snapshot,
594 CheckForSerializableConflictOut(valid, scan->heapRelation,
595 heapTuple, scan->xs_cbuf);
600 * If the snapshot is MVCC, we know that it could accept at
601 * most one member of the HOT chain, so we can skip examining
602 * any more members. Otherwise, check for continuation of the
603 * HOT-chain, and set state for next time.
605 if (IsMVCCSnapshot(scan->xs_snapshot)
606 && !IsolationIsSerializable())
607 scan->xs_next_hot = InvalidOffsetNumber;
608 else if (HeapTupleIsHotUpdated(heapTuple))
610 Assert(ItemPointerGetBlockNumber(ctid) ==
611 ItemPointerGetBlockNumber(tid));
612 scan->xs_next_hot = ItemPointerGetOffsetNumber(ctid);
613 scan->xs_prev_xmax = HeapTupleHeaderGetXmax(heapTuple->t_data);
616 scan->xs_next_hot = InvalidOffsetNumber;
618 PredicateLockTuple(scan->heapRelation, heapTuple);
620 LockBuffer(scan->xs_cbuf, BUFFER_LOCK_UNLOCK);
622 pgstat_count_heap_fetch(scan->indexRelation);
628 * If we can't see it, maybe no one else can either. Check to see
629 * if the tuple is dead to all transactions. If we find that all
630 * the tuples in the HOT chain are dead, we'll signal the index AM
631 * to not return that TID on future indexscans.
633 if (scan->xs_hot_dead &&
634 HeapTupleSatisfiesVacuum(heapTuple->t_data, RecentGlobalXmin,
635 scan->xs_cbuf) != HEAPTUPLE_DEAD)
636 scan->xs_hot_dead = false;
639 * Check to see if HOT chain continues past this tuple; if so
640 * fetch the next offnum (we don't bother storing it into
641 * xs_next_hot, but must store xs_prev_xmax), and loop around.
643 if (HeapTupleIsHotUpdated(heapTuple))
645 Assert(ItemPointerGetBlockNumber(ctid) ==
646 ItemPointerGetBlockNumber(tid));
647 offnum = ItemPointerGetOffsetNumber(ctid);
648 at_chain_start = false;
649 scan->xs_prev_xmax = HeapTupleHeaderGetXmax(heapTuple->t_data);
652 break; /* end of chain */
653 } /* loop over a single HOT chain */
655 LockBuffer(scan->xs_cbuf, BUFFER_LOCK_UNLOCK);
657 /* Loop around to ask index AM for another TID */
658 scan->xs_next_hot = InvalidOffsetNumber;
661 /* Release any held pin on a heap page */
662 if (BufferIsValid(scan->xs_cbuf))
664 ReleaseBuffer(scan->xs_cbuf);
665 scan->xs_cbuf = InvalidBuffer;
668 return NULL; /* failure exit */
672 * index_getbitmap - get all tuples at once from an index scan
674 * Adds the TIDs of all heap tuples satisfying the scan keys to a bitmap.
675 * Since there's no interlock between the index scan and the eventual heap
676 * access, this is only safe to use with MVCC-based snapshots: the heap
677 * item slot could have been replaced by a newer tuple by the time we get
680 * Returns the number of matching tuples found. (Note: this might be only
681 * approximate, so it should only be used for statistical purposes.)
685 index_getbitmap(IndexScanDesc scan, TIDBitmap *bitmap)
692 GET_SCAN_PROCEDURE(amgetbitmap);
694 /* just make sure this is false... */
695 scan->kill_prior_tuple = false;
698 * have the am's getbitmap proc do all the work.
700 d = FunctionCall2(procedure,
701 PointerGetDatum(scan),
702 PointerGetDatum(bitmap));
704 ntids = DatumGetInt64(d);
706 /* If int8 is pass-by-ref, must free the result to avoid memory leak */
707 #ifndef USE_FLOAT8_BYVAL
708 pfree(DatumGetPointer(d));
711 pgstat_count_index_tuples(scan->indexRelation, ntids);
717 * index_bulk_delete - do mass deletion of index entries
719 * callback routine tells whether a given main-heap tuple is
722 * return value is an optional palloc'd struct of statistics
725 IndexBulkDeleteResult *
726 index_bulk_delete(IndexVacuumInfo *info,
727 IndexBulkDeleteResult *stats,
728 IndexBulkDeleteCallback callback,
729 void *callback_state)
731 Relation indexRelation = info->index;
733 IndexBulkDeleteResult *result;
736 GET_REL_PROCEDURE(ambulkdelete);
738 result = (IndexBulkDeleteResult *)
739 DatumGetPointer(FunctionCall4(procedure,
740 PointerGetDatum(info),
741 PointerGetDatum(stats),
742 PointerGetDatum((Pointer) callback),
743 PointerGetDatum(callback_state)));
749 * index_vacuum_cleanup - do post-deletion cleanup of an index
751 * return value is an optional palloc'd struct of statistics
754 IndexBulkDeleteResult *
755 index_vacuum_cleanup(IndexVacuumInfo *info,
756 IndexBulkDeleteResult *stats)
758 Relation indexRelation = info->index;
760 IndexBulkDeleteResult *result;
763 GET_REL_PROCEDURE(amvacuumcleanup);
765 result = (IndexBulkDeleteResult *)
766 DatumGetPointer(FunctionCall2(procedure,
767 PointerGetDatum(info),
768 PointerGetDatum(stats)));
776 * Index access methods typically require support routines that are
777 * not directly the implementation of any WHERE-clause query operator
778 * and so cannot be kept in pg_amop. Instead, such routines are kept
779 * in pg_amproc. These registered procedure OIDs are assigned numbers
780 * according to a convention established by the access method.
781 * The general index code doesn't know anything about the routines
782 * involved; it just builds an ordered list of them for
783 * each attribute on which an index is defined.
785 * As of Postgres 8.3, support routines within an operator family
786 * are further subdivided by the "left type" and "right type" of the
787 * query operator(s) that they support. The "default" functions for a
788 * particular indexed attribute are those with both types equal to
789 * the index opclass' opcintype (note that this is subtly different
790 * from the indexed attribute's own type: it may be a binary-compatible
791 * type instead). Only the default functions are stored in relcache
792 * entries --- access methods can use the syscache to look up non-default
795 * This routine returns the requested default procedure OID for a
796 * particular indexed attribute.
800 index_getprocid(Relation irel,
808 nproc = irel->rd_am->amsupport;
810 Assert(procnum > 0 && procnum <= (uint16) nproc);
812 procindex = (nproc * (attnum - 1)) + (procnum - 1);
814 loc = irel->rd_support;
818 return loc[procindex];
824 * This routine allows index AMs to keep fmgr lookup info for
825 * support procs in the relcache. As above, only the "default"
826 * functions for any particular indexed attribute are cached.
828 * Note: the return value points into cached data that will be lost during
829 * any relcache rebuild! Therefore, either use the callinfo right away,
830 * or save it only after having acquired some type of lock on the index rel.
834 index_getprocinfo(Relation irel,
842 nproc = irel->rd_am->amsupport;
844 Assert(procnum > 0 && procnum <= (uint16) nproc);
846 procindex = (nproc * (attnum - 1)) + (procnum - 1);
848 locinfo = irel->rd_supportinfo;
850 Assert(locinfo != NULL);
852 locinfo += procindex;
854 /* Initialize the lookup info if first time through */
855 if (locinfo->fn_oid == InvalidOid)
857 RegProcedure *loc = irel->rd_support;
862 procId = loc[procindex];
865 * Complain if function was not found during IndexSupportInitialize.
866 * This should not happen unless the system tables contain bogus
867 * entries for the index opclass. (If an AM wants to allow a support
868 * function to be optional, it can use index_getprocid.)
870 if (!RegProcedureIsValid(procId))
871 elog(ERROR, "missing support function %d for attribute %d of index \"%s\"",
872 procnum, attnum, RelationGetRelationName(irel));
874 fmgr_info_cxt(procId, locinfo, irel->rd_indexcxt);
875 fmgr_info_set_collation(irel->rd_indcollation[attnum-1], locinfo);