1 /*-------------------------------------------------------------------------
4 * POSTGRES generalized index access method definitions.
7 * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
8 * Portions Copyright (c) 1994, Regents of the University of California
10 * src/include/access/genam.h
12 *-------------------------------------------------------------------------
17 #include "access/sdir.h"
18 #include "access/skey.h"
19 #include "nodes/tidbitmap.h"
20 #include "storage/lockdefs.h"
21 #include "utils/relcache.h"
22 #include "utils/snapshot.h"
24 /* We don't want this file to depend on execnodes.h. */
28 * Struct for statistics returned by ambuild
30 typedef struct IndexBuildResult
32 double heap_tuples; /* # of tuples seen in parent table */
33 double index_tuples; /* # of tuples inserted into index */
37 * Struct for input arguments passed to ambulkdelete and amvacuumcleanup
39 * num_heap_tuples is accurate only when estimated_count is false;
40 * otherwise it's just an estimate (currently, the estimate is the
41 * prior value of the relation's pg_class.reltuples field). It will
42 * always just be an estimate during ambulkdelete.
44 typedef struct IndexVacuumInfo
46 Relation index; /* the index being vacuumed */
47 bool analyze_only; /* ANALYZE (without any actual vacuum) */
48 bool estimated_count; /* num_heap_tuples is an estimate */
49 int message_level; /* ereport level for progress messages */
50 double num_heap_tuples; /* tuples remaining in heap */
51 BufferAccessStrategy strategy; /* access strategy for reads */
55 * Struct for statistics returned by ambulkdelete and amvacuumcleanup
57 * This struct is normally allocated by the first ambulkdelete call and then
58 * passed along through subsequent ones until amvacuumcleanup; however,
59 * amvacuumcleanup must be prepared to allocate it in the case where no
60 * ambulkdelete calls were made (because no tuples needed deletion).
61 * Note that an index AM could choose to return a larger struct
62 * of which this is just the first field; this provides a way for ambulkdelete
63 * to communicate additional private data to amvacuumcleanup.
65 * Note: pages_removed is the amount by which the index physically shrank,
66 * if any (ie the change in its total size on disk). pages_deleted and
67 * pages_free refer to free space within the index file. Some index AMs
68 * may compute num_index_tuples by reference to num_heap_tuples, in which
69 * case they should copy the estimated_count field from IndexVacuumInfo.
71 typedef struct IndexBulkDeleteResult
73 BlockNumber num_pages; /* pages remaining in index */
74 BlockNumber pages_removed; /* # removed during vacuum operation */
75 bool estimated_count; /* num_index_tuples is an estimate */
76 double num_index_tuples; /* tuples remaining */
77 double tuples_removed; /* # removed during vacuum operation */
78 BlockNumber pages_deleted; /* # unused pages in index */
79 BlockNumber pages_free; /* # pages available for reuse */
80 } IndexBulkDeleteResult;
82 /* Typedef for callback function to determine if a tuple is bulk-deletable */
83 typedef bool (*IndexBulkDeleteCallback) (ItemPointer itemptr, void *state);
85 /* struct definitions appear in relscan.h */
86 typedef struct IndexScanDescData *IndexScanDesc;
87 typedef struct SysScanDescData *SysScanDesc;
89 typedef struct ParallelIndexScanDescData *ParallelIndexScanDesc;
92 * Enumeration specifying the type of uniqueness check to perform in
95 * UNIQUE_CHECK_YES is the traditional Postgres immediate check, possibly
96 * blocking to see if a conflicting transaction commits.
98 * For deferrable unique constraints, UNIQUE_CHECK_PARTIAL is specified at
99 * insertion time. The index AM should test if the tuple is unique, but
100 * should not throw error, block, or prevent the insertion if the tuple
101 * appears not to be unique. We'll recheck later when it is time for the
102 * constraint to be enforced. The AM must return true if the tuple is
103 * known unique, false if it is possibly non-unique. In the "true" case
104 * it is safe to omit the later recheck.
106 * When it is time to recheck the deferred constraint, a pseudo-insertion
107 * call is made with UNIQUE_CHECK_EXISTING. The tuple is already in the
108 * index in this case, so it should not be inserted again. Rather, just
109 * check for conflicting live tuples (possibly blocking).
111 typedef enum IndexUniqueCheck
113 UNIQUE_CHECK_NO, /* Don't do any uniqueness checking */
114 UNIQUE_CHECK_YES, /* Enforce uniqueness at insertion time */
115 UNIQUE_CHECK_PARTIAL, /* Test uniqueness, but no error */
116 UNIQUE_CHECK_EXISTING /* Check if existing tuple is unique */
121 * generalized index_ interface routines (in indexam.c)
126 * True iff the index scan is valid.
128 #define IndexScanIsValid(scan) PointerIsValid(scan)
130 extern Relation index_open(Oid relationId, LOCKMODE lockmode);
131 extern void index_close(Relation relation, LOCKMODE lockmode);
133 extern bool index_insert(Relation indexRelation,
134 Datum *values, bool *isnull,
135 ItemPointer heap_t_ctid,
136 Relation heapRelation,
137 IndexUniqueCheck checkUnique,
138 struct IndexInfo *indexInfo);
140 extern IndexScanDesc index_beginscan(Relation heapRelation,
141 Relation indexRelation,
143 int nkeys, int norderbys);
144 extern IndexScanDesc index_beginscan_bitmap(Relation indexRelation,
147 extern void index_rescan(IndexScanDesc scan,
148 ScanKey keys, int nkeys,
149 ScanKey orderbys, int norderbys);
150 extern void index_endscan(IndexScanDesc scan);
151 extern void index_markpos(IndexScanDesc scan);
152 extern void index_restrpos(IndexScanDesc scan);
153 extern Size index_parallelscan_estimate(Relation indexrel, Snapshot snapshot);
154 extern void index_parallelscan_initialize(Relation heaprel, Relation indexrel,
155 Snapshot snapshot, ParallelIndexScanDesc target);
156 extern void index_parallelrescan(IndexScanDesc scan);
157 extern IndexScanDesc index_beginscan_parallel(Relation heaprel,
158 Relation indexrel, int nkeys, int norderbys,
159 ParallelIndexScanDesc pscan);
160 extern ItemPointer index_getnext_tid(IndexScanDesc scan,
161 ScanDirection direction);
162 extern HeapTuple index_fetch_heap(IndexScanDesc scan);
163 extern HeapTuple index_getnext(IndexScanDesc scan, ScanDirection direction);
164 extern int64 index_getbitmap(IndexScanDesc scan, TIDBitmap *bitmap);
166 extern IndexBulkDeleteResult *index_bulk_delete(IndexVacuumInfo *info,
167 IndexBulkDeleteResult *stats,
168 IndexBulkDeleteCallback callback,
169 void *callback_state);
170 extern IndexBulkDeleteResult *index_vacuum_cleanup(IndexVacuumInfo *info,
171 IndexBulkDeleteResult *stats);
172 extern bool index_can_return(Relation indexRelation, int attno);
173 extern RegProcedure index_getprocid(Relation irel, AttrNumber attnum,
175 extern FmgrInfo *index_getprocinfo(Relation irel, AttrNumber attnum,
179 * index access method support routines (in genam.c)
181 extern IndexScanDesc RelationGetIndexScan(Relation indexRelation,
182 int nkeys, int norderbys);
183 extern void IndexScanEnd(IndexScanDesc scan);
184 extern char *BuildIndexValueDescription(Relation indexRelation,
185 Datum *values, bool *isnull);
188 * heap-or-index access to system catalogs (in genam.c)
190 extern SysScanDesc systable_beginscan(Relation heapRelation,
194 int nkeys, ScanKey key);
195 extern HeapTuple systable_getnext(SysScanDesc sysscan);
196 extern bool systable_recheck_tuple(SysScanDesc sysscan, HeapTuple tup);
197 extern void systable_endscan(SysScanDesc sysscan);
198 extern SysScanDesc systable_beginscan_ordered(Relation heapRelation,
199 Relation indexRelation,
201 int nkeys, ScanKey key);
202 extern HeapTuple systable_getnext_ordered(SysScanDesc sysscan,
203 ScanDirection direction);
204 extern void systable_endscan_ordered(SysScanDesc sysscan);