1 /*-------------------------------------------------------------------------
4 * the Postgres statistics generator
6 * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
7 * Portions Copyright (c) 1994, Regents of the University of California
11 * src/backend/commands/analyze.c
13 *-------------------------------------------------------------------------
19 #include "access/multixact.h"
20 #include "access/transam.h"
21 #include "access/tupconvert.h"
22 #include "access/tuptoaster.h"
23 #include "access/visibilitymap.h"
24 #include "access/xact.h"
25 #include "catalog/catalog.h"
26 #include "catalog/index.h"
27 #include "catalog/indexing.h"
28 #include "catalog/pg_collation.h"
29 #include "catalog/pg_inherits_fn.h"
30 #include "catalog/pg_namespace.h"
31 #include "commands/dbcommands.h"
32 #include "commands/tablecmds.h"
33 #include "commands/vacuum.h"
34 #include "executor/executor.h"
35 #include "foreign/fdwapi.h"
36 #include "miscadmin.h"
37 #include "nodes/nodeFuncs.h"
38 #include "parser/parse_oper.h"
39 #include "parser/parse_relation.h"
41 #include "postmaster/autovacuum.h"
42 #include "storage/bufmgr.h"
43 #include "storage/lmgr.h"
44 #include "storage/proc.h"
45 #include "storage/procarray.h"
46 #include "utils/acl.h"
47 #include "utils/attoptcache.h"
48 #include "utils/datum.h"
49 #include "utils/guc.h"
50 #include "utils/lsyscache.h"
51 #include "utils/memutils.h"
52 #include "utils/pg_rusage.h"
53 #include "utils/sampling.h"
54 #include "utils/sortsupport.h"
55 #include "utils/syscache.h"
56 #include "utils/timestamp.h"
57 #include "utils/tqual.h"
60 /* Per-index data for ANALYZE */
61 typedef struct AnlIndexData
63 IndexInfo *indexInfo; /* BuildIndexInfo result */
64 double tupleFract; /* fraction of rows for partial index */
65 VacAttrStats **vacattrstats; /* index attrs to analyze */
70 /* Default statistics target (GUC parameter) */
71 int default_statistics_target = 100;
73 /* A few variables that don't seem worth passing around as parameters */
74 static MemoryContext anl_context = NULL;
75 static BufferAccessStrategy vac_strategy;
78 static void do_analyze_rel(Relation onerel, int options,
79 VacuumParams *params, List *va_cols,
80 AcquireSampleRowsFunc acquirefunc, BlockNumber relpages,
81 bool inh, bool in_outer_xact, int elevel);
82 static void compute_index_stats(Relation onerel, double totalrows,
83 AnlIndexData *indexdata, int nindexes,
84 HeapTuple *rows, int numrows,
85 MemoryContext col_context);
86 static VacAttrStats *examine_attribute(Relation onerel, int attnum,
88 static int acquire_sample_rows(Relation onerel, int elevel,
89 HeapTuple *rows, int targrows,
90 double *totalrows, double *totaldeadrows);
91 static int compare_rows(const void *a, const void *b);
92 static int acquire_inherited_sample_rows(Relation onerel, int elevel,
93 HeapTuple *rows, int targrows,
94 double *totalrows, double *totaldeadrows);
95 static void update_attstats(Oid relid, bool inh,
96 int natts, VacAttrStats **vacattrstats);
97 static Datum std_fetch_func(VacAttrStatsP stats, int rownum, bool *isNull);
98 static Datum ind_fetch_func(VacAttrStatsP stats, int rownum, bool *isNull);
102 * analyze_rel() -- analyze one relation
105 analyze_rel(Oid relid, RangeVar *relation, int options,
106 VacuumParams *params, List *va_cols, bool in_outer_xact,
107 BufferAccessStrategy bstrategy)
111 AcquireSampleRowsFunc acquirefunc = NULL;
112 BlockNumber relpages = 0;
114 /* Select logging level */
115 if (options & VACOPT_VERBOSE)
120 /* Set up static variables */
121 vac_strategy = bstrategy;
124 * Check for user-requested abort.
126 CHECK_FOR_INTERRUPTS();
129 * Open the relation, getting ShareUpdateExclusiveLock to ensure that two
130 * ANALYZEs don't run on it concurrently. (This also locks out a
131 * concurrent VACUUM, which doesn't matter much at the moment but might
132 * matter if we ever try to accumulate stats on dead tuples.) If the rel
133 * has been dropped since we last saw it, we don't need to process it.
135 if (!(options & VACOPT_NOWAIT))
136 onerel = try_relation_open(relid, ShareUpdateExclusiveLock);
137 else if (ConditionalLockRelationOid(relid, ShareUpdateExclusiveLock))
138 onerel = try_relation_open(relid, NoLock);
142 if (IsAutoVacuumWorkerProcess() && params->log_min_duration >= 0)
144 (errcode(ERRCODE_LOCK_NOT_AVAILABLE),
145 errmsg("skipping analyze of \"%s\" --- lock not available",
146 relation->relname)));
152 * Check permissions --- this should match vacuum's check!
154 if (!(pg_class_ownercheck(RelationGetRelid(onerel), GetUserId()) ||
155 (pg_database_ownercheck(MyDatabaseId, GetUserId()) && !onerel->rd_rel->relisshared)))
157 /* No need for a WARNING if we already complained during VACUUM */
158 if (!(options & VACOPT_VACUUM))
160 if (onerel->rd_rel->relisshared)
162 (errmsg("skipping \"%s\" --- only superuser can analyze it",
163 RelationGetRelationName(onerel))));
164 else if (onerel->rd_rel->relnamespace == PG_CATALOG_NAMESPACE)
166 (errmsg("skipping \"%s\" --- only superuser or database owner can analyze it",
167 RelationGetRelationName(onerel))));
170 (errmsg("skipping \"%s\" --- only table or database owner can analyze it",
171 RelationGetRelationName(onerel))));
173 relation_close(onerel, ShareUpdateExclusiveLock);
178 * Silently ignore tables that are temp tables of other backends ---
179 * trying to analyze these is rather pointless, since their contents are
180 * probably not up-to-date on disk. (We don't throw a warning here; it
181 * would just lead to chatter during a database-wide ANALYZE.)
183 if (RELATION_IS_OTHER_TEMP(onerel))
185 relation_close(onerel, ShareUpdateExclusiveLock);
190 * We can ANALYZE any table except pg_statistic. See update_attstats
192 if (RelationGetRelid(onerel) == StatisticRelationId)
194 relation_close(onerel, ShareUpdateExclusiveLock);
199 * Check that it's a plain table, materialized view, or foreign table; we
200 * used to do this in get_rel_oids() but seems safer to check after we've
201 * locked the relation.
203 if (onerel->rd_rel->relkind == RELKIND_RELATION ||
204 onerel->rd_rel->relkind == RELKIND_MATVIEW)
206 /* Regular table, so we'll use the regular row acquisition function */
207 acquirefunc = acquire_sample_rows;
208 /* Also get regular table's size */
209 relpages = RelationGetNumberOfBlocks(onerel);
211 else if (onerel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
214 * For a foreign table, call the FDW's hook function to see whether it
217 FdwRoutine *fdwroutine;
220 fdwroutine = GetFdwRoutineForRelation(onerel, false);
222 if (fdwroutine->AnalyzeForeignTable != NULL)
223 ok = fdwroutine->AnalyzeForeignTable(onerel,
230 (errmsg("skipping \"%s\" --- cannot analyze this foreign table",
231 RelationGetRelationName(onerel))));
232 relation_close(onerel, ShareUpdateExclusiveLock);
238 /* No need for a WARNING if we already complained during VACUUM */
239 if (!(options & VACOPT_VACUUM))
241 (errmsg("skipping \"%s\" --- cannot analyze non-tables or special system tables",
242 RelationGetRelationName(onerel))));
243 relation_close(onerel, ShareUpdateExclusiveLock);
248 * OK, let's do it. First let other backends know I'm in ANALYZE.
250 LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
251 MyPgXact->vacuumFlags |= PROC_IN_ANALYZE;
252 LWLockRelease(ProcArrayLock);
255 * Do the normal non-recursive ANALYZE.
257 do_analyze_rel(onerel, options, params, va_cols, acquirefunc, relpages,
258 false, in_outer_xact, elevel);
261 * If there are child tables, do recursive ANALYZE.
263 if (onerel->rd_rel->relhassubclass)
264 do_analyze_rel(onerel, options, params, va_cols, acquirefunc, relpages,
265 true, in_outer_xact, elevel);
268 * Close source relation now, but keep lock so that no one deletes it
269 * before we commit. (If someone did, they'd fail to clean up the entries
270 * we made in pg_statistic. Also, releasing the lock before commit would
271 * expose us to concurrent-update failures in update_attstats.)
273 relation_close(onerel, NoLock);
276 * Reset my PGXACT flag. Note: we need this here, and not in vacuum_rel,
277 * because the vacuum flag is cleared by the end-of-xact code.
279 LWLockAcquire(ProcArrayLock, LW_EXCLUSIVE);
280 MyPgXact->vacuumFlags &= ~PROC_IN_ANALYZE;
281 LWLockRelease(ProcArrayLock);
285 * do_analyze_rel() -- analyze one relation, recursively or not
287 * Note that "acquirefunc" is only relevant for the non-inherited case.
288 * For the inherited case, acquire_inherited_sample_rows() determines the
289 * appropriate acquirefunc for each child table.
292 do_analyze_rel(Relation onerel, int options, VacuumParams *params,
293 List *va_cols, AcquireSampleRowsFunc acquirefunc,
294 BlockNumber relpages, bool inh, bool in_outer_xact,
304 VacAttrStats **vacattrstats;
305 AnlIndexData *indexdata;
312 TimestampTz starttime = 0;
313 MemoryContext caller_context;
315 int save_sec_context;
320 (errmsg("analyzing \"%s.%s\" inheritance tree",
321 get_namespace_name(RelationGetNamespace(onerel)),
322 RelationGetRelationName(onerel))));
325 (errmsg("analyzing \"%s.%s\"",
326 get_namespace_name(RelationGetNamespace(onerel)),
327 RelationGetRelationName(onerel))));
330 * Set up a working context so that we can easily free whatever junk gets
333 anl_context = AllocSetContextCreate(CurrentMemoryContext,
335 ALLOCSET_DEFAULT_MINSIZE,
336 ALLOCSET_DEFAULT_INITSIZE,
337 ALLOCSET_DEFAULT_MAXSIZE);
338 caller_context = MemoryContextSwitchTo(anl_context);
341 * Switch to the table owner's userid, so that any index functions are run
342 * as that user. Also lock down security-restricted operations and
343 * arrange to make GUC variable changes local to this command.
345 GetUserIdAndSecContext(&save_userid, &save_sec_context);
346 SetUserIdAndSecContext(onerel->rd_rel->relowner,
347 save_sec_context | SECURITY_RESTRICTED_OPERATION);
348 save_nestlevel = NewGUCNestLevel();
350 /* measure elapsed time iff autovacuum logging requires it */
351 if (IsAutoVacuumWorkerProcess() && params->log_min_duration >= 0)
353 pg_rusage_init(&ru0);
354 if (params->log_min_duration > 0)
355 starttime = GetCurrentTimestamp();
359 * Determine which columns to analyze
361 * Note that system attributes are never analyzed.
367 vacattrstats = (VacAttrStats **) palloc(list_length(va_cols) *
368 sizeof(VacAttrStats *));
372 char *col = strVal(lfirst(le));
374 i = attnameAttNum(onerel, col, false);
375 if (i == InvalidAttrNumber)
377 (errcode(ERRCODE_UNDEFINED_COLUMN),
378 errmsg("column \"%s\" of relation \"%s\" does not exist",
379 col, RelationGetRelationName(onerel))));
380 vacattrstats[tcnt] = examine_attribute(onerel, i, NULL);
381 if (vacattrstats[tcnt] != NULL)
388 attr_cnt = onerel->rd_att->natts;
389 vacattrstats = (VacAttrStats **)
390 palloc(attr_cnt * sizeof(VacAttrStats *));
392 for (i = 1; i <= attr_cnt; i++)
394 vacattrstats[tcnt] = examine_attribute(onerel, i, NULL);
395 if (vacattrstats[tcnt] != NULL)
402 * Open all indexes of the relation, and see if there are any analyzable
403 * columns in the indexes. We do not analyze index columns if there was
404 * an explicit column list in the ANALYZE command, however. If we are
405 * doing a recursive scan, we don't want to touch the parent's indexes at
409 vac_open_indexes(onerel, AccessShareLock, &nindexes, &Irel);
415 hasindex = (nindexes > 0);
419 indexdata = (AnlIndexData *) palloc0(nindexes * sizeof(AnlIndexData));
420 for (ind = 0; ind < nindexes; ind++)
422 AnlIndexData *thisdata = &indexdata[ind];
423 IndexInfo *indexInfo;
425 thisdata->indexInfo = indexInfo = BuildIndexInfo(Irel[ind]);
426 thisdata->tupleFract = 1.0; /* fix later if partial */
427 if (indexInfo->ii_Expressions != NIL && va_cols == NIL)
429 ListCell *indexpr_item = list_head(indexInfo->ii_Expressions);
431 thisdata->vacattrstats = (VacAttrStats **)
432 palloc(indexInfo->ii_NumIndexAttrs * sizeof(VacAttrStats *));
434 for (i = 0; i < indexInfo->ii_NumIndexAttrs; i++)
436 int keycol = indexInfo->ii_KeyAttrNumbers[i];
440 /* Found an index expression */
443 if (indexpr_item == NULL) /* shouldn't happen */
444 elog(ERROR, "too few entries in indexprs list");
445 indexkey = (Node *) lfirst(indexpr_item);
446 indexpr_item = lnext(indexpr_item);
447 thisdata->vacattrstats[tcnt] =
448 examine_attribute(Irel[ind], i + 1, indexkey);
449 if (thisdata->vacattrstats[tcnt] != NULL)
453 thisdata->attr_cnt = tcnt;
459 * Determine how many rows we need to sample, using the worst case from
460 * all analyzable columns. We use a lower bound of 100 rows to avoid
461 * possible overflow in Vitter's algorithm. (Note: that will also be the
462 * target in the corner case where there are no analyzable columns.)
465 for (i = 0; i < attr_cnt; i++)
467 if (targrows < vacattrstats[i]->minrows)
468 targrows = vacattrstats[i]->minrows;
470 for (ind = 0; ind < nindexes; ind++)
472 AnlIndexData *thisdata = &indexdata[ind];
474 for (i = 0; i < thisdata->attr_cnt; i++)
476 if (targrows < thisdata->vacattrstats[i]->minrows)
477 targrows = thisdata->vacattrstats[i]->minrows;
482 * Acquire the sample rows
484 rows = (HeapTuple *) palloc(targrows * sizeof(HeapTuple));
486 numrows = acquire_inherited_sample_rows(onerel, elevel,
488 &totalrows, &totaldeadrows);
490 numrows = (*acquirefunc) (onerel, elevel,
492 &totalrows, &totaldeadrows);
495 * Compute the statistics. Temporary results during the calculations for
496 * each column are stored in a child context. The calc routines are
497 * responsible to make sure that whatever they store into the VacAttrStats
498 * structure is allocated in anl_context.
502 MemoryContext col_context,
505 col_context = AllocSetContextCreate(anl_context,
507 ALLOCSET_DEFAULT_MINSIZE,
508 ALLOCSET_DEFAULT_INITSIZE,
509 ALLOCSET_DEFAULT_MAXSIZE);
510 old_context = MemoryContextSwitchTo(col_context);
512 for (i = 0; i < attr_cnt; i++)
514 VacAttrStats *stats = vacattrstats[i];
518 stats->tupDesc = onerel->rd_att;
519 (*stats->compute_stats) (stats,
525 * If the appropriate flavor of the n_distinct option is
526 * specified, override with the corresponding value.
528 aopt = get_attribute_options(onerel->rd_id, stats->attr->attnum);
533 n_distinct = inh ? aopt->n_distinct_inherited : aopt->n_distinct;
534 if (n_distinct != 0.0)
535 stats->stadistinct = n_distinct;
538 MemoryContextResetAndDeleteChildren(col_context);
542 compute_index_stats(onerel, totalrows,
547 MemoryContextSwitchTo(old_context);
548 MemoryContextDelete(col_context);
551 * Emit the completed stats rows into pg_statistic, replacing any
552 * previous statistics for the target columns. (If there are stats in
553 * pg_statistic for columns we didn't process, we leave them alone.)
555 update_attstats(RelationGetRelid(onerel), inh,
556 attr_cnt, vacattrstats);
558 for (ind = 0; ind < nindexes; ind++)
560 AnlIndexData *thisdata = &indexdata[ind];
562 update_attstats(RelationGetRelid(Irel[ind]), false,
563 thisdata->attr_cnt, thisdata->vacattrstats);
568 * Update pages/tuples stats in pg_class ... but not if we're doing
573 BlockNumber relallvisible;
575 visibilitymap_count(onerel, &relallvisible, NULL);
577 vac_update_relstats(onerel,
582 InvalidTransactionId,
588 * Same for indexes. Vacuum always scans all indexes, so if we're part of
589 * VACUUM ANALYZE, don't overwrite the accurate count already inserted by
592 if (!inh && !(options & VACOPT_VACUUM))
594 for (ind = 0; ind < nindexes; ind++)
596 AnlIndexData *thisdata = &indexdata[ind];
597 double totalindexrows;
599 totalindexrows = ceil(thisdata->tupleFract * totalrows);
600 vac_update_relstats(Irel[ind],
601 RelationGetNumberOfBlocks(Irel[ind]),
605 InvalidTransactionId,
612 * Report ANALYZE to the stats collector, too. However, if doing
613 * inherited stats we shouldn't report, because the stats collector only
614 * tracks per-table stats.
617 pgstat_report_analyze(onerel, totalrows, totaldeadrows);
619 /* If this isn't part of VACUUM ANALYZE, let index AMs do cleanup */
620 if (!(options & VACOPT_VACUUM))
622 for (ind = 0; ind < nindexes; ind++)
624 IndexBulkDeleteResult *stats;
625 IndexVacuumInfo ivinfo;
627 ivinfo.index = Irel[ind];
628 ivinfo.analyze_only = true;
629 ivinfo.estimated_count = true;
630 ivinfo.message_level = elevel;
631 ivinfo.num_heap_tuples = onerel->rd_rel->reltuples;
632 ivinfo.strategy = vac_strategy;
634 stats = index_vacuum_cleanup(&ivinfo, NULL);
641 /* Done with indexes */
642 vac_close_indexes(nindexes, Irel, NoLock);
644 /* Log the action if appropriate */
645 if (IsAutoVacuumWorkerProcess() && params->log_min_duration >= 0)
647 if (params->log_min_duration == 0 ||
648 TimestampDifferenceExceeds(starttime, GetCurrentTimestamp(),
649 params->log_min_duration))
651 (errmsg("automatic analyze of table \"%s.%s.%s\" system usage: %s",
652 get_database_name(MyDatabaseId),
653 get_namespace_name(RelationGetNamespace(onerel)),
654 RelationGetRelationName(onerel),
655 pg_rusage_show(&ru0))));
658 /* Roll back any GUC changes executed by index functions */
659 AtEOXact_GUC(false, save_nestlevel);
661 /* Restore userid and security context */
662 SetUserIdAndSecContext(save_userid, save_sec_context);
664 /* Restore current context and release memory */
665 MemoryContextSwitchTo(caller_context);
666 MemoryContextDelete(anl_context);
671 * Compute statistics about indexes of a relation
674 compute_index_stats(Relation onerel, double totalrows,
675 AnlIndexData *indexdata, int nindexes,
676 HeapTuple *rows, int numrows,
677 MemoryContext col_context)
679 MemoryContext ind_context,
681 Datum values[INDEX_MAX_KEYS];
682 bool isnull[INDEX_MAX_KEYS];
686 ind_context = AllocSetContextCreate(anl_context,
688 ALLOCSET_DEFAULT_MINSIZE,
689 ALLOCSET_DEFAULT_INITSIZE,
690 ALLOCSET_DEFAULT_MAXSIZE);
691 old_context = MemoryContextSwitchTo(ind_context);
693 for (ind = 0; ind < nindexes; ind++)
695 AnlIndexData *thisdata = &indexdata[ind];
696 IndexInfo *indexInfo = thisdata->indexInfo;
697 int attr_cnt = thisdata->attr_cnt;
698 TupleTableSlot *slot;
700 ExprContext *econtext;
707 double totalindexrows;
709 /* Ignore index if no columns to analyze and not partial */
710 if (attr_cnt == 0 && indexInfo->ii_Predicate == NIL)
714 * Need an EState for evaluation of index expressions and
715 * partial-index predicates. Create it in the per-index context to be
716 * sure it gets cleaned up at the bottom of the loop.
718 estate = CreateExecutorState();
719 econtext = GetPerTupleExprContext(estate);
720 /* Need a slot to hold the current heap tuple, too */
721 slot = MakeSingleTupleTableSlot(RelationGetDescr(onerel));
723 /* Arrange for econtext's scan tuple to be the tuple under test */
724 econtext->ecxt_scantuple = slot;
726 /* Set up execution state for predicate. */
728 ExecPrepareExpr((Expr *) indexInfo->ii_Predicate,
731 /* Compute and save index expression values */
732 exprvals = (Datum *) palloc(numrows * attr_cnt * sizeof(Datum));
733 exprnulls = (bool *) palloc(numrows * attr_cnt * sizeof(bool));
736 for (rowno = 0; rowno < numrows; rowno++)
738 HeapTuple heapTuple = rows[rowno];
740 vacuum_delay_point();
743 * Reset the per-tuple context each time, to reclaim any cruft
744 * left behind by evaluating the predicate or index expressions.
746 ResetExprContext(econtext);
748 /* Set up for predicate or expression evaluation */
749 ExecStoreTuple(heapTuple, slot, InvalidBuffer, false);
751 /* If index is partial, check predicate */
752 if (predicate != NIL)
754 if (!ExecQual(predicate, econtext, false))
762 * Evaluate the index row to compute expression values. We
763 * could do this by hand, but FormIndexDatum is convenient.
765 FormIndexDatum(indexInfo,
772 * Save just the columns we care about. We copy the values
773 * into ind_context from the estate's per-tuple context.
775 for (i = 0; i < attr_cnt; i++)
777 VacAttrStats *stats = thisdata->vacattrstats[i];
778 int attnum = stats->attr->attnum;
780 if (isnull[attnum - 1])
782 exprvals[tcnt] = (Datum) 0;
783 exprnulls[tcnt] = true;
787 exprvals[tcnt] = datumCopy(values[attnum - 1],
788 stats->attrtype->typbyval,
789 stats->attrtype->typlen);
790 exprnulls[tcnt] = false;
798 * Having counted the number of rows that pass the predicate in the
799 * sample, we can estimate the total number of rows in the index.
801 thisdata->tupleFract = (double) numindexrows / (double) numrows;
802 totalindexrows = ceil(thisdata->tupleFract * totalrows);
805 * Now we can compute the statistics for the expression columns.
807 if (numindexrows > 0)
809 MemoryContextSwitchTo(col_context);
810 for (i = 0; i < attr_cnt; i++)
812 VacAttrStats *stats = thisdata->vacattrstats[i];
813 AttributeOpts *aopt =
814 get_attribute_options(stats->attr->attrelid,
815 stats->attr->attnum);
817 stats->exprvals = exprvals + i;
818 stats->exprnulls = exprnulls + i;
819 stats->rowstride = attr_cnt;
820 (*stats->compute_stats) (stats,
826 * If the n_distinct option is specified, it overrides the
827 * above computation. For indices, we always use just
828 * n_distinct, not n_distinct_inherited.
830 if (aopt != NULL && aopt->n_distinct != 0.0)
831 stats->stadistinct = aopt->n_distinct;
833 MemoryContextResetAndDeleteChildren(col_context);
838 MemoryContextSwitchTo(ind_context);
840 ExecDropSingleTupleTableSlot(slot);
841 FreeExecutorState(estate);
842 MemoryContextResetAndDeleteChildren(ind_context);
845 MemoryContextSwitchTo(old_context);
846 MemoryContextDelete(ind_context);
850 * examine_attribute -- pre-analysis of a single column
852 * Determine whether the column is analyzable; if so, create and initialize
853 * a VacAttrStats struct for it. If not, return NULL.
855 * If index_expr isn't NULL, then we're trying to analyze an expression index,
856 * and index_expr is the expression tree representing the column's data.
858 static VacAttrStats *
859 examine_attribute(Relation onerel, int attnum, Node *index_expr)
861 Form_pg_attribute attr = onerel->rd_att->attrs[attnum - 1];
867 /* Never analyze dropped columns */
868 if (attr->attisdropped)
871 /* Don't analyze column if user has specified not to */
872 if (attr->attstattarget == 0)
876 * Create the VacAttrStats struct. Note that we only have a copy of the
877 * fixed fields of the pg_attribute tuple.
879 stats = (VacAttrStats *) palloc0(sizeof(VacAttrStats));
880 stats->attr = (Form_pg_attribute) palloc(ATTRIBUTE_FIXED_PART_SIZE);
881 memcpy(stats->attr, attr, ATTRIBUTE_FIXED_PART_SIZE);
884 * When analyzing an expression index, believe the expression tree's type
885 * not the column datatype --- the latter might be the opckeytype storage
886 * type of the opclass, which is not interesting for our purposes. (Note:
887 * if we did anything with non-expression index columns, we'd need to
888 * figure out where to get the correct type info from, but for now that's
889 * not a problem.) It's not clear whether anyone will care about the
890 * typmod, but we store that too just in case.
894 stats->attrtypid = exprType(index_expr);
895 stats->attrtypmod = exprTypmod(index_expr);
899 stats->attrtypid = attr->atttypid;
900 stats->attrtypmod = attr->atttypmod;
903 typtuple = SearchSysCacheCopy1(TYPEOID,
904 ObjectIdGetDatum(stats->attrtypid));
905 if (!HeapTupleIsValid(typtuple))
906 elog(ERROR, "cache lookup failed for type %u", stats->attrtypid);
907 stats->attrtype = (Form_pg_type) GETSTRUCT(typtuple);
908 stats->anl_context = anl_context;
909 stats->tupattnum = attnum;
912 * The fields describing the stats->stavalues[n] element types default to
913 * the type of the data being analyzed, but the type-specific typanalyze
914 * function can change them if it wants to store something else.
916 for (i = 0; i < STATISTIC_NUM_SLOTS; i++)
918 stats->statypid[i] = stats->attrtypid;
919 stats->statyplen[i] = stats->attrtype->typlen;
920 stats->statypbyval[i] = stats->attrtype->typbyval;
921 stats->statypalign[i] = stats->attrtype->typalign;
925 * Call the type-specific typanalyze function. If none is specified, use
928 if (OidIsValid(stats->attrtype->typanalyze))
929 ok = DatumGetBool(OidFunctionCall1(stats->attrtype->typanalyze,
930 PointerGetDatum(stats)));
932 ok = std_typanalyze(stats);
934 if (!ok || stats->compute_stats == NULL || stats->minrows <= 0)
936 heap_freetuple(typtuple);
946 * acquire_sample_rows -- acquire a random sample of rows from the table
948 * Selected rows are returned in the caller-allocated array rows[], which
949 * must have at least targrows entries.
950 * The actual number of rows selected is returned as the function result.
951 * We also estimate the total numbers of live and dead rows in the table,
952 * and return them into *totalrows and *totaldeadrows, respectively.
954 * The returned list of tuples is in order by physical position in the table.
955 * (We will rely on this later to derive correlation estimates.)
957 * As of May 2004 we use a new two-stage method: Stage one selects up
958 * to targrows random blocks (or all blocks, if there aren't so many).
959 * Stage two scans these blocks and uses the Vitter algorithm to create
960 * a random sample of targrows rows (or less, if there are less in the
961 * sample of blocks). The two stages are executed simultaneously: each
962 * block is processed as soon as stage one returns its number and while
963 * the rows are read stage two controls which ones are to be inserted
966 * Although every row has an equal chance of ending up in the final
967 * sample, this sampling method is not perfect: not every possible
968 * sample has an equal chance of being selected. For large relations
969 * the number of different blocks represented by the sample tends to be
970 * too small. We can live with that for now. Improvements are welcome.
972 * An important property of this sampling method is that because we do
973 * look at a statistically unbiased set of blocks, we should get
974 * unbiased estimates of the average numbers of live and dead rows per
975 * block. The previous sampling method put too much credence in the row
976 * density near the start of the table.
979 acquire_sample_rows(Relation onerel, int elevel,
980 HeapTuple *rows, int targrows,
981 double *totalrows, double *totaldeadrows)
983 int numrows = 0; /* # rows now in reservoir */
984 double samplerows = 0; /* total # rows collected */
985 double liverows = 0; /* # live rows seen */
986 double deadrows = 0; /* # dead rows seen */
987 double rowstoskip = -1; /* -1 means not set yet */
988 BlockNumber totalblocks;
989 TransactionId OldestXmin;
991 ReservoirStateData rstate;
993 Assert(targrows > 0);
995 totalblocks = RelationGetNumberOfBlocks(onerel);
997 /* Need a cutoff xmin for HeapTupleSatisfiesVacuum */
998 OldestXmin = GetOldestXmin(onerel, true);
1000 /* Prepare for sampling block numbers */
1001 BlockSampler_Init(&bs, totalblocks, targrows, random());
1002 /* Prepare for sampling rows */
1003 reservoir_init_selection_state(&rstate, targrows);
1005 /* Outer loop over blocks to sample */
1006 while (BlockSampler_HasMore(&bs))
1008 BlockNumber targblock = BlockSampler_Next(&bs);
1011 OffsetNumber targoffset,
1014 vacuum_delay_point();
1017 * We must maintain a pin on the target page's buffer to ensure that
1018 * the maxoffset value stays good (else concurrent VACUUM might delete
1019 * tuples out from under us). Hence, pin the page until we are done
1020 * looking at it. We also choose to hold sharelock on the buffer
1021 * throughout --- we could release and re-acquire sharelock for each
1022 * tuple, but since we aren't doing much work per tuple, the extra
1023 * lock traffic is probably better avoided.
1025 targbuffer = ReadBufferExtended(onerel, MAIN_FORKNUM, targblock,
1026 RBM_NORMAL, vac_strategy);
1027 LockBuffer(targbuffer, BUFFER_LOCK_SHARE);
1028 targpage = BufferGetPage(targbuffer);
1029 maxoffset = PageGetMaxOffsetNumber(targpage);
1031 /* Inner loop over all tuples on the selected page */
1032 for (targoffset = FirstOffsetNumber; targoffset <= maxoffset; targoffset++)
1035 HeapTupleData targtuple;
1036 bool sample_it = false;
1038 itemid = PageGetItemId(targpage, targoffset);
1041 * We ignore unused and redirect line pointers. DEAD line
1042 * pointers should be counted as dead, because we need vacuum to
1043 * run to get rid of them. Note that this rule agrees with the
1044 * way that heap_page_prune() counts things.
1046 if (!ItemIdIsNormal(itemid))
1048 if (ItemIdIsDead(itemid))
1053 ItemPointerSet(&targtuple.t_self, targblock, targoffset);
1055 targtuple.t_tableOid = RelationGetRelid(onerel);
1056 targtuple.t_data = (HeapTupleHeader) PageGetItem(targpage, itemid);
1057 targtuple.t_len = ItemIdGetLength(itemid);
1059 switch (HeapTupleSatisfiesVacuum(&targtuple,
1063 case HEAPTUPLE_LIVE:
1068 case HEAPTUPLE_DEAD:
1069 case HEAPTUPLE_RECENTLY_DEAD:
1070 /* Count dead and recently-dead rows */
1074 case HEAPTUPLE_INSERT_IN_PROGRESS:
1077 * Insert-in-progress rows are not counted. We assume
1078 * that when the inserting transaction commits or aborts,
1079 * it will send a stats message to increment the proper
1080 * count. This works right only if that transaction ends
1081 * after we finish analyzing the table; if things happen
1082 * in the other order, its stats update will be
1083 * overwritten by ours. However, the error will be large
1084 * only if the other transaction runs long enough to
1085 * insert many tuples, so assuming it will finish after us
1086 * is the safer option.
1088 * A special case is that the inserting transaction might
1089 * be our own. In this case we should count and sample
1090 * the row, to accommodate users who load a table and
1091 * analyze it in one transaction. (pgstat_report_analyze
1092 * has to adjust the numbers we send to the stats
1093 * collector to make this come out right.)
1095 if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetXmin(targtuple.t_data)))
1102 case HEAPTUPLE_DELETE_IN_PROGRESS:
1105 * We count delete-in-progress rows as still live, using
1106 * the same reasoning given above; but we don't bother to
1107 * include them in the sample.
1109 * If the delete was done by our own transaction, however,
1110 * we must count the row as dead to make
1111 * pgstat_report_analyze's stats adjustments come out
1112 * right. (Note: this works out properly when the row was
1113 * both inserted and deleted in our xact.)
1115 if (TransactionIdIsCurrentTransactionId(HeapTupleHeaderGetUpdateXid(targtuple.t_data)))
1122 elog(ERROR, "unexpected HeapTupleSatisfiesVacuum result");
1129 * The first targrows sample rows are simply copied into the
1130 * reservoir. Then we start replacing tuples in the sample
1131 * until we reach the end of the relation. This algorithm is
1132 * from Jeff Vitter's paper (see full citation below). It
1133 * works by repeatedly computing the number of tuples to skip
1134 * before selecting a tuple, which replaces a randomly chosen
1135 * element of the reservoir (current set of tuples). At all
1136 * times the reservoir is a true random sample of the tuples
1137 * we've passed over so far, so when we fall off the end of
1138 * the relation we're done.
1140 if (numrows < targrows)
1141 rows[numrows++] = heap_copytuple(&targtuple);
1145 * t in Vitter's paper is the number of records already
1146 * processed. If we need to compute a new S value, we
1147 * must use the not-yet-incremented value of samplerows as
1151 rowstoskip = reservoir_get_next_S(&rstate, samplerows, targrows);
1153 if (rowstoskip <= 0)
1156 * Found a suitable tuple, so save it, replacing one
1157 * old tuple at random
1159 int k = (int) (targrows * sampler_random_fract(rstate.randstate));
1161 Assert(k >= 0 && k < targrows);
1162 heap_freetuple(rows[k]);
1163 rows[k] = heap_copytuple(&targtuple);
1173 /* Now release the lock and pin on the page */
1174 UnlockReleaseBuffer(targbuffer);
1178 * If we didn't find as many tuples as we wanted then we're done. No sort
1179 * is needed, since they're already in order.
1181 * Otherwise we need to sort the collected tuples by position
1182 * (itempointer). It's not worth worrying about corner cases where the
1183 * tuples are already sorted.
1185 if (numrows == targrows)
1186 qsort((void *) rows, numrows, sizeof(HeapTuple), compare_rows);
1189 * Estimate total numbers of rows in relation. For live rows, use
1190 * vac_estimate_reltuples; for dead rows, we have no source of old
1191 * information, so we have to assume the density is the same in unseen
1192 * pages as in the pages we scanned.
1194 *totalrows = vac_estimate_reltuples(onerel, true,
1199 *totaldeadrows = floor((deadrows / bs.m) * totalblocks + 0.5);
1201 *totaldeadrows = 0.0;
1204 * Emit some interesting relation info
1207 (errmsg("\"%s\": scanned %d of %u pages, "
1208 "containing %.0f live rows and %.0f dead rows; "
1209 "%d rows in sample, %.0f estimated total rows",
1210 RelationGetRelationName(onerel),
1213 numrows, *totalrows)));
1219 * qsort comparator for sorting rows[] array
1222 compare_rows(const void *a, const void *b)
1224 HeapTuple ha = *(const HeapTuple *) a;
1225 HeapTuple hb = *(const HeapTuple *) b;
1226 BlockNumber ba = ItemPointerGetBlockNumber(&ha->t_self);
1227 OffsetNumber oa = ItemPointerGetOffsetNumber(&ha->t_self);
1228 BlockNumber bb = ItemPointerGetBlockNumber(&hb->t_self);
1229 OffsetNumber ob = ItemPointerGetOffsetNumber(&hb->t_self);
1244 * acquire_inherited_sample_rows -- acquire sample rows from inheritance tree
1246 * This has the same API as acquire_sample_rows, except that rows are
1247 * collected from all inheritance children as well as the specified table.
1248 * We fail and return zero if there are no inheritance children, or if all
1249 * children are foreign tables that don't support ANALYZE.
1252 acquire_inherited_sample_rows(Relation onerel, int elevel,
1253 HeapTuple *rows, int targrows,
1254 double *totalrows, double *totaldeadrows)
1258 AcquireSampleRowsFunc *acquirefuncs;
1267 * Find all members of inheritance set. We only need AccessShareLock on
1271 find_all_inheritors(RelationGetRelid(onerel), AccessShareLock, NULL);
1274 * Check that there's at least one descendant, else fail. This could
1275 * happen despite analyze_rel's relhassubclass check, if table once had a
1276 * child but no longer does. In that case, we can clear the
1277 * relhassubclass field so as not to make the same mistake again later.
1278 * (This is safe because we hold ShareUpdateExclusiveLock.)
1280 if (list_length(tableOIDs) < 2)
1282 /* CCI because we already updated the pg_class row in this command */
1283 CommandCounterIncrement();
1284 SetRelationHasSubclass(RelationGetRelid(onerel), false);
1286 (errmsg("skipping analyze of \"%s.%s\" inheritance tree --- this inheritance tree contains no child tables",
1287 get_namespace_name(RelationGetNamespace(onerel)),
1288 RelationGetRelationName(onerel))));
1293 * Identify acquirefuncs to use, and count blocks in all the relations.
1294 * The result could overflow BlockNumber, so we use double arithmetic.
1296 rels = (Relation *) palloc(list_length(tableOIDs) * sizeof(Relation));
1297 acquirefuncs = (AcquireSampleRowsFunc *)
1298 palloc(list_length(tableOIDs) * sizeof(AcquireSampleRowsFunc));
1299 relblocks = (double *) palloc(list_length(tableOIDs) * sizeof(double));
1302 foreach(lc, tableOIDs)
1304 Oid childOID = lfirst_oid(lc);
1306 AcquireSampleRowsFunc acquirefunc = NULL;
1307 BlockNumber relpages = 0;
1309 /* We already got the needed lock */
1310 childrel = heap_open(childOID, NoLock);
1312 /* Ignore if temp table of another backend */
1313 if (RELATION_IS_OTHER_TEMP(childrel))
1315 /* ... but release the lock on it */
1316 Assert(childrel != onerel);
1317 heap_close(childrel, AccessShareLock);
1321 /* Check table type (MATVIEW can't happen, but might as well allow) */
1322 if (childrel->rd_rel->relkind == RELKIND_RELATION ||
1323 childrel->rd_rel->relkind == RELKIND_MATVIEW)
1325 /* Regular table, so use the regular row acquisition function */
1326 acquirefunc = acquire_sample_rows;
1327 relpages = RelationGetNumberOfBlocks(childrel);
1329 else if (childrel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
1332 * For a foreign table, call the FDW's hook function to see
1333 * whether it supports analysis.
1335 FdwRoutine *fdwroutine;
1338 fdwroutine = GetFdwRoutineForRelation(childrel, false);
1340 if (fdwroutine->AnalyzeForeignTable != NULL)
1341 ok = fdwroutine->AnalyzeForeignTable(childrel,
1347 /* ignore, but release the lock on it */
1348 Assert(childrel != onerel);
1349 heap_close(childrel, AccessShareLock);
1355 /* ignore, but release the lock on it */
1356 Assert(childrel != onerel);
1357 heap_close(childrel, AccessShareLock);
1361 /* OK, we'll process this child */
1362 rels[nrels] = childrel;
1363 acquirefuncs[nrels] = acquirefunc;
1364 relblocks[nrels] = (double) relpages;
1365 totalblocks += (double) relpages;
1370 * If we don't have at least two tables to consider, fail.
1375 (errmsg("skipping analyze of \"%s.%s\" inheritance tree --- this inheritance tree contains no analyzable child tables",
1376 get_namespace_name(RelationGetNamespace(onerel)),
1377 RelationGetRelationName(onerel))));
1382 * Now sample rows from each relation, proportionally to its fraction of
1383 * the total block count. (This might be less than desirable if the child
1384 * rels have radically different free-space percentages, but it's not
1385 * clear that it's worth working harder.)
1390 for (i = 0; i < nrels; i++)
1392 Relation childrel = rels[i];
1393 AcquireSampleRowsFunc acquirefunc = acquirefuncs[i];
1394 double childblocks = relblocks[i];
1396 if (childblocks > 0)
1400 childtargrows = (int) rint(targrows * childblocks / totalblocks);
1401 /* Make sure we don't overrun due to roundoff error */
1402 childtargrows = Min(childtargrows, targrows - numrows);
1403 if (childtargrows > 0)
1409 /* Fetch a random sample of the child's rows */
1410 childrows = (*acquirefunc) (childrel, elevel,
1411 rows + numrows, childtargrows,
1414 /* We may need to convert from child's rowtype to parent's */
1415 if (childrows > 0 &&
1416 !equalTupleDescs(RelationGetDescr(childrel),
1417 RelationGetDescr(onerel)))
1419 TupleConversionMap *map;
1421 map = convert_tuples_by_name(RelationGetDescr(childrel),
1422 RelationGetDescr(onerel),
1423 gettext_noop("could not convert row type"));
1428 for (j = 0; j < childrows; j++)
1432 newtup = do_convert_tuple(rows[numrows + j], map);
1433 heap_freetuple(rows[numrows + j]);
1434 rows[numrows + j] = newtup;
1436 free_conversion_map(map);
1440 /* And add to counts */
1441 numrows += childrows;
1442 *totalrows += trows;
1443 *totaldeadrows += tdrows;
1448 * Note: we cannot release the child-table locks, since we may have
1449 * pointers to their TOAST tables in the sampled rows.
1451 heap_close(childrel, NoLock);
1459 * update_attstats() -- update attribute statistics for one relation
1461 * Statistics are stored in several places: the pg_class row for the
1462 * relation has stats about the whole relation, and there is a
1463 * pg_statistic row for each (non-system) attribute that has ever
1464 * been analyzed. The pg_class values are updated by VACUUM, not here.
1466 * pg_statistic rows are just added or updated normally. This means
1467 * that pg_statistic will probably contain some deleted rows at the
1468 * completion of a vacuum cycle, unless it happens to get vacuumed last.
1470 * To keep things simple, we punt for pg_statistic, and don't try
1471 * to compute or store rows for pg_statistic itself in pg_statistic.
1472 * This could possibly be made to work, but it's not worth the trouble.
1473 * Note analyze_rel() has seen to it that we won't come here when
1474 * vacuuming pg_statistic itself.
1476 * Note: there would be a race condition here if two backends could
1477 * ANALYZE the same table concurrently. Presently, we lock that out
1478 * by taking a self-exclusive lock on the relation in analyze_rel().
1481 update_attstats(Oid relid, bool inh, int natts, VacAttrStats **vacattrstats)
1487 return; /* nothing to do */
1489 sd = heap_open(StatisticRelationId, RowExclusiveLock);
1491 for (attno = 0; attno < natts; attno++)
1493 VacAttrStats *stats = vacattrstats[attno];
1499 Datum values[Natts_pg_statistic];
1500 bool nulls[Natts_pg_statistic];
1501 bool replaces[Natts_pg_statistic];
1503 /* Ignore attr if we weren't able to collect stats */
1504 if (!stats->stats_valid)
1508 * Construct a new pg_statistic tuple
1510 for (i = 0; i < Natts_pg_statistic; ++i)
1516 values[Anum_pg_statistic_starelid - 1] = ObjectIdGetDatum(relid);
1517 values[Anum_pg_statistic_staattnum - 1] = Int16GetDatum(stats->attr->attnum);
1518 values[Anum_pg_statistic_stainherit - 1] = BoolGetDatum(inh);
1519 values[Anum_pg_statistic_stanullfrac - 1] = Float4GetDatum(stats->stanullfrac);
1520 values[Anum_pg_statistic_stawidth - 1] = Int32GetDatum(stats->stawidth);
1521 values[Anum_pg_statistic_stadistinct - 1] = Float4GetDatum(stats->stadistinct);
1522 i = Anum_pg_statistic_stakind1 - 1;
1523 for (k = 0; k < STATISTIC_NUM_SLOTS; k++)
1525 values[i++] = Int16GetDatum(stats->stakind[k]); /* stakindN */
1527 i = Anum_pg_statistic_staop1 - 1;
1528 for (k = 0; k < STATISTIC_NUM_SLOTS; k++)
1530 values[i++] = ObjectIdGetDatum(stats->staop[k]); /* staopN */
1532 i = Anum_pg_statistic_stanumbers1 - 1;
1533 for (k = 0; k < STATISTIC_NUM_SLOTS; k++)
1535 int nnum = stats->numnumbers[k];
1539 Datum *numdatums = (Datum *) palloc(nnum * sizeof(Datum));
1542 for (n = 0; n < nnum; n++)
1543 numdatums[n] = Float4GetDatum(stats->stanumbers[k][n]);
1544 /* XXX knows more than it should about type float4: */
1545 arry = construct_array(numdatums, nnum,
1547 sizeof(float4), FLOAT4PASSBYVAL, 'i');
1548 values[i++] = PointerGetDatum(arry); /* stanumbersN */
1553 values[i++] = (Datum) 0;
1556 i = Anum_pg_statistic_stavalues1 - 1;
1557 for (k = 0; k < STATISTIC_NUM_SLOTS; k++)
1559 if (stats->numvalues[k] > 0)
1563 arry = construct_array(stats->stavalues[k],
1564 stats->numvalues[k],
1566 stats->statyplen[k],
1567 stats->statypbyval[k],
1568 stats->statypalign[k]);
1569 values[i++] = PointerGetDatum(arry); /* stavaluesN */
1574 values[i++] = (Datum) 0;
1578 /* Is there already a pg_statistic tuple for this attribute? */
1579 oldtup = SearchSysCache3(STATRELATTINH,
1580 ObjectIdGetDatum(relid),
1581 Int16GetDatum(stats->attr->attnum),
1584 if (HeapTupleIsValid(oldtup))
1586 /* Yes, replace it */
1587 stup = heap_modify_tuple(oldtup,
1588 RelationGetDescr(sd),
1592 ReleaseSysCache(oldtup);
1593 simple_heap_update(sd, &stup->t_self, stup);
1597 /* No, insert new tuple */
1598 stup = heap_form_tuple(RelationGetDescr(sd), values, nulls);
1599 simple_heap_insert(sd, stup);
1602 /* update indexes too */
1603 CatalogUpdateIndexes(sd, stup);
1605 heap_freetuple(stup);
1608 heap_close(sd, RowExclusiveLock);
1612 * Standard fetch function for use by compute_stats subroutines.
1614 * This exists to provide some insulation between compute_stats routines
1615 * and the actual storage of the sample data.
1618 std_fetch_func(VacAttrStatsP stats, int rownum, bool *isNull)
1620 int attnum = stats->tupattnum;
1621 HeapTuple tuple = stats->rows[rownum];
1622 TupleDesc tupDesc = stats->tupDesc;
1624 return heap_getattr(tuple, attnum, tupDesc, isNull);
1628 * Fetch function for analyzing index expressions.
1630 * We have not bothered to construct index tuples, instead the data is
1631 * just in Datum arrays.
1634 ind_fetch_func(VacAttrStatsP stats, int rownum, bool *isNull)
1638 /* exprvals and exprnulls are already offset for proper column */
1639 i = rownum * stats->rowstride;
1640 *isNull = stats->exprnulls[i];
1641 return stats->exprvals[i];
1645 /*==========================================================================
1647 * Code below this point represents the "standard" type-specific statistics
1648 * analysis algorithms. This code can be replaced on a per-data-type basis
1649 * by setting a nonzero value in pg_type.typanalyze.
1651 *==========================================================================
1656 * To avoid consuming too much memory during analysis and/or too much space
1657 * in the resulting pg_statistic rows, we ignore varlena datums that are wider
1658 * than WIDTH_THRESHOLD (after detoasting!). This is legitimate for MCV
1659 * and distinct-value calculations since a wide value is unlikely to be
1660 * duplicated at all, much less be a most-common value. For the same reason,
1661 * ignoring wide values will not affect our estimates of histogram bin
1662 * boundaries very much.
1664 #define WIDTH_THRESHOLD 1024
1666 #define swapInt(a,b) do {int _tmp; _tmp=a; a=b; b=_tmp;} while(0)
1667 #define swapDatum(a,b) do {Datum _tmp; _tmp=a; a=b; b=_tmp;} while(0)
1670 * Extra information used by the default analysis routines
1674 Oid eqopr; /* '=' operator for datatype, if any */
1675 Oid eqfunc; /* and associated function */
1676 Oid ltopr; /* '<' operator for datatype, if any */
1681 Datum value; /* a data value */
1682 int tupno; /* position index for tuple it came from */
1687 int count; /* # of duplicates */
1688 int first; /* values[] index of first occurrence */
1695 } CompareScalarsContext;
1698 static void compute_trivial_stats(VacAttrStatsP stats,
1699 AnalyzeAttrFetchFunc fetchfunc,
1702 static void compute_distinct_stats(VacAttrStatsP stats,
1703 AnalyzeAttrFetchFunc fetchfunc,
1706 static void compute_scalar_stats(VacAttrStatsP stats,
1707 AnalyzeAttrFetchFunc fetchfunc,
1710 static int compare_scalars(const void *a, const void *b, void *arg);
1711 static int compare_mcvs(const void *a, const void *b);
1715 * std_typanalyze -- the default type-specific typanalyze function
1718 std_typanalyze(VacAttrStats *stats)
1720 Form_pg_attribute attr = stats->attr;
1723 StdAnalyzeData *mystats;
1725 /* If the attstattarget column is negative, use the default value */
1726 /* NB: it is okay to scribble on stats->attr since it's a copy */
1727 if (attr->attstattarget < 0)
1728 attr->attstattarget = default_statistics_target;
1730 /* Look for default "<" and "=" operators for column's type */
1731 get_sort_group_operators(stats->attrtypid,
1732 false, false, false,
1733 <opr, &eqopr, NULL,
1736 /* Save the operator info for compute_stats routines */
1737 mystats = (StdAnalyzeData *) palloc(sizeof(StdAnalyzeData));
1738 mystats->eqopr = eqopr;
1739 mystats->eqfunc = OidIsValid(eqopr) ? get_opcode(eqopr) : InvalidOid;
1740 mystats->ltopr = ltopr;
1741 stats->extra_data = mystats;
1744 * Determine which standard statistics algorithm to use
1746 if (OidIsValid(eqopr) && OidIsValid(ltopr))
1748 /* Seems to be a scalar datatype */
1749 stats->compute_stats = compute_scalar_stats;
1750 /*--------------------
1751 * The following choice of minrows is based on the paper
1752 * "Random sampling for histogram construction: how much is enough?"
1753 * by Surajit Chaudhuri, Rajeev Motwani and Vivek Narasayya, in
1754 * Proceedings of ACM SIGMOD International Conference on Management
1755 * of Data, 1998, Pages 436-447. Their Corollary 1 to Theorem 5
1756 * says that for table size n, histogram size k, maximum relative
1757 * error in bin size f, and error probability gamma, the minimum
1758 * random sample size is
1759 * r = 4 * k * ln(2*n/gamma) / f^2
1760 * Taking f = 0.5, gamma = 0.01, n = 10^6 rows, we obtain
1762 * Note that because of the log function, the dependence on n is
1763 * quite weak; even at n = 10^12, a 300*k sample gives <= 0.66
1764 * bin size error with probability 0.99. So there's no real need to
1765 * scale for n, which is a good thing because we don't necessarily
1766 * know it at this point.
1767 *--------------------
1769 stats->minrows = 300 * attr->attstattarget;
1771 else if (OidIsValid(eqopr))
1773 /* We can still recognize distinct values */
1774 stats->compute_stats = compute_distinct_stats;
1775 /* Might as well use the same minrows as above */
1776 stats->minrows = 300 * attr->attstattarget;
1780 /* Can't do much but the trivial stuff */
1781 stats->compute_stats = compute_trivial_stats;
1782 /* Might as well use the same minrows as above */
1783 stats->minrows = 300 * attr->attstattarget;
1791 * compute_trivial_stats() -- compute very basic column statistics
1793 * We use this when we cannot find a hash "=" operator for the datatype.
1795 * We determine the fraction of non-null rows and the average datum width.
1798 compute_trivial_stats(VacAttrStatsP stats,
1799 AnalyzeAttrFetchFunc fetchfunc,
1805 int nonnull_cnt = 0;
1806 double total_width = 0;
1807 bool is_varlena = (!stats->attrtype->typbyval &&
1808 stats->attrtype->typlen == -1);
1809 bool is_varwidth = (!stats->attrtype->typbyval &&
1810 stats->attrtype->typlen < 0);
1812 for (i = 0; i < samplerows; i++)
1817 vacuum_delay_point();
1819 value = fetchfunc(stats, i, &isnull);
1821 /* Check for null/nonnull */
1830 * If it's a variable-width field, add up widths for average width
1831 * calculation. Note that if the value is toasted, we use the toasted
1832 * width. We don't bother with this calculation if it's a fixed-width
1837 total_width += VARSIZE_ANY(DatumGetPointer(value));
1839 else if (is_varwidth)
1841 /* must be cstring */
1842 total_width += strlen(DatumGetCString(value)) + 1;
1846 /* We can only compute average width if we found some non-null values. */
1847 if (nonnull_cnt > 0)
1849 stats->stats_valid = true;
1850 /* Do the simple null-frac and width stats */
1851 stats->stanullfrac = (double) null_cnt / (double) samplerows;
1853 stats->stawidth = total_width / (double) nonnull_cnt;
1855 stats->stawidth = stats->attrtype->typlen;
1856 stats->stadistinct = 0.0; /* "unknown" */
1858 else if (null_cnt > 0)
1860 /* We found only nulls; assume the column is entirely null */
1861 stats->stats_valid = true;
1862 stats->stanullfrac = 1.0;
1864 stats->stawidth = 0; /* "unknown" */
1866 stats->stawidth = stats->attrtype->typlen;
1867 stats->stadistinct = 0.0; /* "unknown" */
1873 * compute_distinct_stats() -- compute column statistics including ndistinct
1875 * We use this when we can find only an "=" operator for the datatype.
1877 * We determine the fraction of non-null rows, the average width, the
1878 * most common values, and the (estimated) number of distinct values.
1880 * The most common values are determined by brute force: we keep a list
1881 * of previously seen values, ordered by number of times seen, as we scan
1882 * the samples. A newly seen value is inserted just after the last
1883 * multiply-seen value, causing the bottommost (oldest) singly-seen value
1884 * to drop off the list. The accuracy of this method, and also its cost,
1885 * depend mainly on the length of the list we are willing to keep.
1888 compute_distinct_stats(VacAttrStatsP stats,
1889 AnalyzeAttrFetchFunc fetchfunc,
1895 int nonnull_cnt = 0;
1896 int toowide_cnt = 0;
1897 double total_width = 0;
1898 bool is_varlena = (!stats->attrtype->typbyval &&
1899 stats->attrtype->typlen == -1);
1900 bool is_varwidth = (!stats->attrtype->typbyval &&
1901 stats->attrtype->typlen < 0);
1911 int num_mcv = stats->attr->attstattarget;
1912 StdAnalyzeData *mystats = (StdAnalyzeData *) stats->extra_data;
1915 * We track up to 2*n values for an n-element MCV list; but at least 10
1917 track_max = 2 * num_mcv;
1920 track = (TrackItem *) palloc(track_max * sizeof(TrackItem));
1923 fmgr_info(mystats->eqfunc, &f_cmpeq);
1925 for (i = 0; i < samplerows; i++)
1933 vacuum_delay_point();
1935 value = fetchfunc(stats, i, &isnull);
1937 /* Check for null/nonnull */
1946 * If it's a variable-width field, add up widths for average width
1947 * calculation. Note that if the value is toasted, we use the toasted
1948 * width. We don't bother with this calculation if it's a fixed-width
1953 total_width += VARSIZE_ANY(DatumGetPointer(value));
1956 * If the value is toasted, we want to detoast it just once to
1957 * avoid repeated detoastings and resultant excess memory usage
1958 * during the comparisons. Also, check to see if the value is
1959 * excessively wide, and if so don't detoast at all --- just
1962 if (toast_raw_datum_size(value) > WIDTH_THRESHOLD)
1967 value = PointerGetDatum(PG_DETOAST_DATUM(value));
1969 else if (is_varwidth)
1971 /* must be cstring */
1972 total_width += strlen(DatumGetCString(value)) + 1;
1976 * See if the value matches anything we're already tracking.
1979 firstcount1 = track_cnt;
1980 for (j = 0; j < track_cnt; j++)
1982 /* We always use the default collation for statistics */
1983 if (DatumGetBool(FunctionCall2Coll(&f_cmpeq,
1984 DEFAULT_COLLATION_OID,
1985 value, track[j].value)))
1990 if (j < firstcount1 && track[j].count == 1)
1998 /* This value may now need to "bubble up" in the track list */
1999 while (j > 0 && track[j].count > track[j - 1].count)
2001 swapDatum(track[j].value, track[j - 1].value);
2002 swapInt(track[j].count, track[j - 1].count);
2008 /* No match. Insert at head of count-1 list */
2009 if (track_cnt < track_max)
2011 for (j = track_cnt - 1; j > firstcount1; j--)
2013 track[j].value = track[j - 1].value;
2014 track[j].count = track[j - 1].count;
2016 if (firstcount1 < track_cnt)
2018 track[firstcount1].value = value;
2019 track[firstcount1].count = 1;
2024 /* We can only compute real stats if we found some non-null values. */
2025 if (nonnull_cnt > 0)
2030 stats->stats_valid = true;
2031 /* Do the simple null-frac and width stats */
2032 stats->stanullfrac = (double) null_cnt / (double) samplerows;
2034 stats->stawidth = total_width / (double) nonnull_cnt;
2036 stats->stawidth = stats->attrtype->typlen;
2038 /* Count the number of values we found multiple times */
2040 for (nmultiple = 0; nmultiple < track_cnt; nmultiple++)
2042 if (track[nmultiple].count == 1)
2044 summultiple += track[nmultiple].count;
2049 /* If we found no repeated values, assume it's a unique column */
2050 stats->stadistinct = -1.0;
2052 else if (track_cnt < track_max && toowide_cnt == 0 &&
2053 nmultiple == track_cnt)
2056 * Our track list includes every value in the sample, and every
2057 * value appeared more than once. Assume the column has just
2058 * these values. (This case is meant to address columns with
2059 * small, fixed sets of possible values, such as boolean or enum
2060 * columns. If there are any values that appear just once in the
2061 * sample, including too-wide values, we should assume that that's
2062 * not what we're dealing with.)
2064 stats->stadistinct = track_cnt;
2069 * Estimate the number of distinct values using the estimator
2070 * proposed by Haas and Stokes in IBM Research Report RJ 10025:
2071 * n*d / (n - f1 + f1*n/N)
2072 * where f1 is the number of distinct values that occurred
2073 * exactly once in our sample of n rows (from a total of N),
2074 * and d is the total number of distinct values in the sample.
2075 * This is their Duj1 estimator; the other estimators they
2076 * recommend are considerably more complex, and are numerically
2077 * very unstable when n is much smaller than N.
2079 * In this calculation, we consider only non-nulls. We used to
2080 * include rows with null values in the n and N counts, but that
2081 * leads to inaccurate answers in columns with many nulls, and
2082 * it's intuitively bogus anyway considering the desired result is
2083 * the number of distinct non-null values.
2085 * We assume (not very reliably!) that all the multiply-occurring
2086 * values are reflected in the final track[] list, and the other
2087 * nonnull values all appeared but once. (XXX this usually
2088 * results in a drastic overestimate of ndistinct. Can we do
2092 int f1 = nonnull_cnt - summultiple;
2093 int d = f1 + nmultiple;
2094 double n = samplerows - null_cnt;
2095 double N = totalrows * (1.0 - stats->stanullfrac);
2098 /* N == 0 shouldn't happen, but just in case ... */
2100 stadistinct = (n * d) / ((n - f1) + f1 * n / N);
2104 /* Clamp to sane range in case of roundoff error */
2105 if (stadistinct < d)
2107 if (stadistinct > N)
2109 /* And round to integer */
2110 stats->stadistinct = floor(stadistinct + 0.5);
2114 * If we estimated the number of distinct values at more than 10% of
2115 * the total row count (a very arbitrary limit), then assume that
2116 * stadistinct should scale with the row count rather than be a fixed
2119 if (stats->stadistinct > 0.1 * totalrows)
2120 stats->stadistinct = -(stats->stadistinct / totalrows);
2123 * Decide how many values are worth storing as most-common values. If
2124 * we are able to generate a complete MCV list (all the values in the
2125 * sample will fit, and we think these are all the ones in the table),
2126 * then do so. Otherwise, store only those values that are
2127 * significantly more common than the (estimated) average. We set the
2128 * threshold rather arbitrarily at 25% more than average, with at
2129 * least 2 instances in the sample.
2131 * Note: the first of these cases is meant to address columns with
2132 * small, fixed sets of possible values, such as boolean or enum
2133 * columns. If we can *completely* represent the column population by
2134 * an MCV list that will fit into the stats target, then we should do
2135 * so and thus provide the planner with complete information. But if
2136 * the MCV list is not complete, it's generally worth being more
2137 * selective, and not just filling it all the way up to the stats
2138 * target. So for an incomplete list, we try to take only MCVs that
2139 * are significantly more common than average.
2141 if (track_cnt < track_max && toowide_cnt == 0 &&
2142 stats->stadistinct > 0 &&
2143 track_cnt <= num_mcv)
2145 /* Track list includes all values seen, and all will fit */
2146 num_mcv = track_cnt;
2150 double ndistinct_table = stats->stadistinct;
2154 /* Re-extract estimate of # distinct nonnull values in table */
2155 if (ndistinct_table < 0)
2156 ndistinct_table = -ndistinct_table * totalrows;
2157 /* estimate # occurrences in sample of a typical nonnull value */
2158 avgcount = (double) nonnull_cnt / ndistinct_table;
2159 /* set minimum threshold count to store a value */
2160 mincount = avgcount * 1.25;
2163 if (num_mcv > track_cnt)
2164 num_mcv = track_cnt;
2165 for (i = 0; i < num_mcv; i++)
2167 if (track[i].count < mincount)
2175 /* Generate MCV slot entry */
2178 MemoryContext old_context;
2182 /* Must copy the target values into anl_context */
2183 old_context = MemoryContextSwitchTo(stats->anl_context);
2184 mcv_values = (Datum *) palloc(num_mcv * sizeof(Datum));
2185 mcv_freqs = (float4 *) palloc(num_mcv * sizeof(float4));
2186 for (i = 0; i < num_mcv; i++)
2188 mcv_values[i] = datumCopy(track[i].value,
2189 stats->attrtype->typbyval,
2190 stats->attrtype->typlen);
2191 mcv_freqs[i] = (double) track[i].count / (double) samplerows;
2193 MemoryContextSwitchTo(old_context);
2195 stats->stakind[0] = STATISTIC_KIND_MCV;
2196 stats->staop[0] = mystats->eqopr;
2197 stats->stanumbers[0] = mcv_freqs;
2198 stats->numnumbers[0] = num_mcv;
2199 stats->stavalues[0] = mcv_values;
2200 stats->numvalues[0] = num_mcv;
2203 * Accept the defaults for stats->statypid and others. They have
2204 * been set before we were called (see vacuum.h)
2208 else if (null_cnt > 0)
2210 /* We found only nulls; assume the column is entirely null */
2211 stats->stats_valid = true;
2212 stats->stanullfrac = 1.0;
2214 stats->stawidth = 0; /* "unknown" */
2216 stats->stawidth = stats->attrtype->typlen;
2217 stats->stadistinct = 0.0; /* "unknown" */
2220 /* We don't need to bother cleaning up any of our temporary palloc's */
2225 * compute_scalar_stats() -- compute column statistics
2227 * We use this when we can find "=" and "<" operators for the datatype.
2229 * We determine the fraction of non-null rows, the average width, the
2230 * most common values, the (estimated) number of distinct values, the
2231 * distribution histogram, and the correlation of physical to logical order.
2233 * The desired stats can be determined fairly easily after sorting the
2234 * data values into order.
2237 compute_scalar_stats(VacAttrStatsP stats,
2238 AnalyzeAttrFetchFunc fetchfunc,
2244 int nonnull_cnt = 0;
2245 int toowide_cnt = 0;
2246 double total_width = 0;
2247 bool is_varlena = (!stats->attrtype->typbyval &&
2248 stats->attrtype->typlen == -1);
2249 bool is_varwidth = (!stats->attrtype->typbyval &&
2250 stats->attrtype->typlen < 0);
2252 SortSupportData ssup;
2256 ScalarMCVItem *track;
2258 int num_mcv = stats->attr->attstattarget;
2259 int num_bins = stats->attr->attstattarget;
2260 StdAnalyzeData *mystats = (StdAnalyzeData *) stats->extra_data;
2262 values = (ScalarItem *) palloc(samplerows * sizeof(ScalarItem));
2263 tupnoLink = (int *) palloc(samplerows * sizeof(int));
2264 track = (ScalarMCVItem *) palloc(num_mcv * sizeof(ScalarMCVItem));
2266 memset(&ssup, 0, sizeof(ssup));
2267 ssup.ssup_cxt = CurrentMemoryContext;
2268 /* We always use the default collation for statistics */
2269 ssup.ssup_collation = DEFAULT_COLLATION_OID;
2270 ssup.ssup_nulls_first = false;
2273 * For now, don't perform abbreviated key conversion, because full values
2274 * are required for MCV slot generation. Supporting that optimization
2275 * would necessitate teaching compare_scalars() to call a tie-breaker.
2277 ssup.abbreviate = false;
2279 PrepareSortSupportFromOrderingOp(mystats->ltopr, &ssup);
2281 /* Initial scan to find sortable values */
2282 for (i = 0; i < samplerows; i++)
2287 vacuum_delay_point();
2289 value = fetchfunc(stats, i, &isnull);
2291 /* Check for null/nonnull */
2300 * If it's a variable-width field, add up widths for average width
2301 * calculation. Note that if the value is toasted, we use the toasted
2302 * width. We don't bother with this calculation if it's a fixed-width
2307 total_width += VARSIZE_ANY(DatumGetPointer(value));
2310 * If the value is toasted, we want to detoast it just once to
2311 * avoid repeated detoastings and resultant excess memory usage
2312 * during the comparisons. Also, check to see if the value is
2313 * excessively wide, and if so don't detoast at all --- just
2316 if (toast_raw_datum_size(value) > WIDTH_THRESHOLD)
2321 value = PointerGetDatum(PG_DETOAST_DATUM(value));
2323 else if (is_varwidth)
2325 /* must be cstring */
2326 total_width += strlen(DatumGetCString(value)) + 1;
2329 /* Add it to the list to be sorted */
2330 values[values_cnt].value = value;
2331 values[values_cnt].tupno = values_cnt;
2332 tupnoLink[values_cnt] = values_cnt;
2336 /* We can only compute real stats if we found some sortable values. */
2339 int ndistinct, /* # distinct values in sample */
2340 nmultiple, /* # that appear multiple times */
2344 CompareScalarsContext cxt;
2346 /* Sort the collected values */
2348 cxt.tupnoLink = tupnoLink;
2349 qsort_arg((void *) values, values_cnt, sizeof(ScalarItem),
2350 compare_scalars, (void *) &cxt);
2353 * Now scan the values in order, find the most common ones, and also
2354 * accumulate ordering-correlation statistics.
2356 * To determine which are most common, we first have to count the
2357 * number of duplicates of each value. The duplicates are adjacent in
2358 * the sorted list, so a brute-force approach is to compare successive
2359 * datum values until we find two that are not equal. However, that
2360 * requires N-1 invocations of the datum comparison routine, which are
2361 * completely redundant with work that was done during the sort. (The
2362 * sort algorithm must at some point have compared each pair of items
2363 * that are adjacent in the sorted order; otherwise it could not know
2364 * that it's ordered the pair correctly.) We exploit this by having
2365 * compare_scalars remember the highest tupno index that each
2366 * ScalarItem has been found equal to. At the end of the sort, a
2367 * ScalarItem's tupnoLink will still point to itself if and only if it
2368 * is the last item of its group of duplicates (since the group will
2369 * be ordered by tupno).
2375 for (i = 0; i < values_cnt; i++)
2377 int tupno = values[i].tupno;
2379 corr_xysum += ((double) i) * ((double) tupno);
2381 if (tupnoLink[tupno] == tupno)
2383 /* Reached end of duplicates of this value */
2388 if (track_cnt < num_mcv ||
2389 dups_cnt > track[track_cnt - 1].count)
2392 * Found a new item for the mcv list; find its
2393 * position, bubbling down old items if needed. Loop
2394 * invariant is that j points at an empty/ replaceable
2399 if (track_cnt < num_mcv)
2401 for (j = track_cnt - 1; j > 0; j--)
2403 if (dups_cnt <= track[j - 1].count)
2405 track[j].count = track[j - 1].count;
2406 track[j].first = track[j - 1].first;
2408 track[j].count = dups_cnt;
2409 track[j].first = i + 1 - dups_cnt;
2416 stats->stats_valid = true;
2417 /* Do the simple null-frac and width stats */
2418 stats->stanullfrac = (double) null_cnt / (double) samplerows;
2420 stats->stawidth = total_width / (double) nonnull_cnt;
2422 stats->stawidth = stats->attrtype->typlen;
2426 /* If we found no repeated values, assume it's a unique column */
2427 stats->stadistinct = -1.0;
2429 else if (toowide_cnt == 0 && nmultiple == ndistinct)
2432 * Every value in the sample appeared more than once. Assume the
2433 * column has just these values. (This case is meant to address
2434 * columns with small, fixed sets of possible values, such as
2435 * boolean or enum columns. If there are any values that appear
2436 * just once in the sample, including too-wide values, we should
2437 * assume that that's not what we're dealing with.)
2439 stats->stadistinct = ndistinct;
2444 * Estimate the number of distinct values using the estimator
2445 * proposed by Haas and Stokes in IBM Research Report RJ 10025:
2446 * n*d / (n - f1 + f1*n/N)
2447 * where f1 is the number of distinct values that occurred
2448 * exactly once in our sample of n rows (from a total of N),
2449 * and d is the total number of distinct values in the sample.
2450 * This is their Duj1 estimator; the other estimators they
2451 * recommend are considerably more complex, and are numerically
2452 * very unstable when n is much smaller than N.
2454 * In this calculation, we consider only non-nulls. We used to
2455 * include rows with null values in the n and N counts, but that
2456 * leads to inaccurate answers in columns with many nulls, and
2457 * it's intuitively bogus anyway considering the desired result is
2458 * the number of distinct non-null values.
2460 * Overwidth values are assumed to have been distinct.
2463 int f1 = ndistinct - nmultiple + toowide_cnt;
2464 int d = f1 + nmultiple;
2465 double n = samplerows - null_cnt;
2466 double N = totalrows * (1.0 - stats->stanullfrac);
2469 /* N == 0 shouldn't happen, but just in case ... */
2471 stadistinct = (n * d) / ((n - f1) + f1 * n / N);
2475 /* Clamp to sane range in case of roundoff error */
2476 if (stadistinct < d)
2478 if (stadistinct > N)
2480 /* And round to integer */
2481 stats->stadistinct = floor(stadistinct + 0.5);
2485 * If we estimated the number of distinct values at more than 10% of
2486 * the total row count (a very arbitrary limit), then assume that
2487 * stadistinct should scale with the row count rather than be a fixed
2490 if (stats->stadistinct > 0.1 * totalrows)
2491 stats->stadistinct = -(stats->stadistinct / totalrows);
2494 * Decide how many values are worth storing as most-common values. If
2495 * we are able to generate a complete MCV list (all the values in the
2496 * sample will fit, and we think these are all the ones in the table),
2497 * then do so. Otherwise, store only those values that are
2498 * significantly more common than the (estimated) average. We set the
2499 * threshold rather arbitrarily at 25% more than average, with at
2500 * least 2 instances in the sample. Also, we won't suppress values
2501 * that have a frequency of at least 1/K where K is the intended
2502 * number of histogram bins; such values might otherwise cause us to
2503 * emit duplicate histogram bin boundaries. (We might end up with
2504 * duplicate histogram entries anyway, if the distribution is skewed;
2505 * but we prefer to treat such values as MCVs if at all possible.)
2507 * Note: the first of these cases is meant to address columns with
2508 * small, fixed sets of possible values, such as boolean or enum
2509 * columns. If we can *completely* represent the column population by
2510 * an MCV list that will fit into the stats target, then we should do
2511 * so and thus provide the planner with complete information. But if
2512 * the MCV list is not complete, it's generally worth being more
2513 * selective, and not just filling it all the way up to the stats
2514 * target. So for an incomplete list, we try to take only MCVs that
2515 * are significantly more common than average.
2517 if (track_cnt == ndistinct && toowide_cnt == 0 &&
2518 stats->stadistinct > 0 &&
2519 track_cnt <= num_mcv)
2521 /* Track list includes all values seen, and all will fit */
2522 num_mcv = track_cnt;
2526 double ndistinct_table = stats->stadistinct;
2531 /* Re-extract estimate of # distinct nonnull values in table */
2532 if (ndistinct_table < 0)
2533 ndistinct_table = -ndistinct_table * totalrows;
2534 /* estimate # occurrences in sample of a typical nonnull value */
2535 avgcount = (double) nonnull_cnt / ndistinct_table;
2536 /* set minimum threshold count to store a value */
2537 mincount = avgcount * 1.25;
2540 /* don't let threshold exceed 1/K, however */
2541 maxmincount = (double) values_cnt / (double) num_bins;
2542 if (mincount > maxmincount)
2543 mincount = maxmincount;
2544 if (num_mcv > track_cnt)
2545 num_mcv = track_cnt;
2546 for (i = 0; i < num_mcv; i++)
2548 if (track[i].count < mincount)
2556 /* Generate MCV slot entry */
2559 MemoryContext old_context;
2563 /* Must copy the target values into anl_context */
2564 old_context = MemoryContextSwitchTo(stats->anl_context);
2565 mcv_values = (Datum *) palloc(num_mcv * sizeof(Datum));
2566 mcv_freqs = (float4 *) palloc(num_mcv * sizeof(float4));
2567 for (i = 0; i < num_mcv; i++)
2569 mcv_values[i] = datumCopy(values[track[i].first].value,
2570 stats->attrtype->typbyval,
2571 stats->attrtype->typlen);
2572 mcv_freqs[i] = (double) track[i].count / (double) samplerows;
2574 MemoryContextSwitchTo(old_context);
2576 stats->stakind[slot_idx] = STATISTIC_KIND_MCV;
2577 stats->staop[slot_idx] = mystats->eqopr;
2578 stats->stanumbers[slot_idx] = mcv_freqs;
2579 stats->numnumbers[slot_idx] = num_mcv;
2580 stats->stavalues[slot_idx] = mcv_values;
2581 stats->numvalues[slot_idx] = num_mcv;
2584 * Accept the defaults for stats->statypid and others. They have
2585 * been set before we were called (see vacuum.h)
2591 * Generate a histogram slot entry if there are at least two distinct
2592 * values not accounted for in the MCV list. (This ensures the
2593 * histogram won't collapse to empty or a singleton.)
2595 num_hist = ndistinct - num_mcv;
2596 if (num_hist > num_bins)
2597 num_hist = num_bins + 1;
2600 MemoryContext old_context;
2608 /* Sort the MCV items into position order to speed next loop */
2609 qsort((void *) track, num_mcv,
2610 sizeof(ScalarMCVItem), compare_mcvs);
2613 * Collapse out the MCV items from the values[] array.
2615 * Note we destroy the values[] array here... but we don't need it
2616 * for anything more. We do, however, still need values_cnt.
2617 * nvals will be the number of remaining entries in values[].
2626 j = 0; /* index of next interesting MCV item */
2627 while (src < values_cnt)
2633 int first = track[j].first;
2637 /* advance past this MCV item */
2638 src = first + track[j].count;
2642 ncopy = first - src;
2645 ncopy = values_cnt - src;
2646 memmove(&values[dest], &values[src],
2647 ncopy * sizeof(ScalarItem));
2655 Assert(nvals >= num_hist);
2657 /* Must copy the target values into anl_context */
2658 old_context = MemoryContextSwitchTo(stats->anl_context);
2659 hist_values = (Datum *) palloc(num_hist * sizeof(Datum));
2662 * The object of this loop is to copy the first and last values[]
2663 * entries along with evenly-spaced values in between. So the
2664 * i'th value is values[(i * (nvals - 1)) / (num_hist - 1)]. But
2665 * computing that subscript directly risks integer overflow when
2666 * the stats target is more than a couple thousand. Instead we
2667 * add (nvals - 1) / (num_hist - 1) to pos at each step, tracking
2668 * the integral and fractional parts of the sum separately.
2670 delta = (nvals - 1) / (num_hist - 1);
2671 deltafrac = (nvals - 1) % (num_hist - 1);
2674 for (i = 0; i < num_hist; i++)
2676 hist_values[i] = datumCopy(values[pos].value,
2677 stats->attrtype->typbyval,
2678 stats->attrtype->typlen);
2680 posfrac += deltafrac;
2681 if (posfrac >= (num_hist - 1))
2683 /* fractional part exceeds 1, carry to integer part */
2685 posfrac -= (num_hist - 1);
2689 MemoryContextSwitchTo(old_context);
2691 stats->stakind[slot_idx] = STATISTIC_KIND_HISTOGRAM;
2692 stats->staop[slot_idx] = mystats->ltopr;
2693 stats->stavalues[slot_idx] = hist_values;
2694 stats->numvalues[slot_idx] = num_hist;
2697 * Accept the defaults for stats->statypid and others. They have
2698 * been set before we were called (see vacuum.h)
2703 /* Generate a correlation entry if there are multiple values */
2706 MemoryContext old_context;
2711 /* Must copy the target values into anl_context */
2712 old_context = MemoryContextSwitchTo(stats->anl_context);
2713 corrs = (float4 *) palloc(sizeof(float4));
2714 MemoryContextSwitchTo(old_context);
2717 * Since we know the x and y value sets are both
2718 * 0, 1, ..., values_cnt-1
2719 * we have sum(x) = sum(y) =
2720 * (values_cnt-1)*values_cnt / 2
2721 * and sum(x^2) = sum(y^2) =
2722 * (values_cnt-1)*values_cnt*(2*values_cnt-1) / 6.
2725 corr_xsum = ((double) (values_cnt - 1)) *
2726 ((double) values_cnt) / 2.0;
2727 corr_x2sum = ((double) (values_cnt - 1)) *
2728 ((double) values_cnt) * (double) (2 * values_cnt - 1) / 6.0;
2730 /* And the correlation coefficient reduces to */
2731 corrs[0] = (values_cnt * corr_xysum - corr_xsum * corr_xsum) /
2732 (values_cnt * corr_x2sum - corr_xsum * corr_xsum);
2734 stats->stakind[slot_idx] = STATISTIC_KIND_CORRELATION;
2735 stats->staop[slot_idx] = mystats->ltopr;
2736 stats->stanumbers[slot_idx] = corrs;
2737 stats->numnumbers[slot_idx] = 1;
2741 else if (nonnull_cnt > 0)
2743 /* We found some non-null values, but they were all too wide */
2744 Assert(nonnull_cnt == toowide_cnt);
2745 stats->stats_valid = true;
2746 /* Do the simple null-frac and width stats */
2747 stats->stanullfrac = (double) null_cnt / (double) samplerows;
2749 stats->stawidth = total_width / (double) nonnull_cnt;
2751 stats->stawidth = stats->attrtype->typlen;
2752 /* Assume all too-wide values are distinct, so it's a unique column */
2753 stats->stadistinct = -1.0;
2755 else if (null_cnt > 0)
2757 /* We found only nulls; assume the column is entirely null */
2758 stats->stats_valid = true;
2759 stats->stanullfrac = 1.0;
2761 stats->stawidth = 0; /* "unknown" */
2763 stats->stawidth = stats->attrtype->typlen;
2764 stats->stadistinct = 0.0; /* "unknown" */
2767 /* We don't need to bother cleaning up any of our temporary palloc's */
2771 * qsort_arg comparator for sorting ScalarItems
2773 * Aside from sorting the items, we update the tupnoLink[] array
2774 * whenever two ScalarItems are found to contain equal datums. The array
2775 * is indexed by tupno; for each ScalarItem, it contains the highest
2776 * tupno that that item's datum has been found to be equal to. This allows
2777 * us to avoid additional comparisons in compute_scalar_stats().
2780 compare_scalars(const void *a, const void *b, void *arg)
2782 Datum da = ((const ScalarItem *) a)->value;
2783 int ta = ((const ScalarItem *) a)->tupno;
2784 Datum db = ((const ScalarItem *) b)->value;
2785 int tb = ((const ScalarItem *) b)->tupno;
2786 CompareScalarsContext *cxt = (CompareScalarsContext *) arg;
2789 compare = ApplySortComparator(da, false, db, false, cxt->ssup);
2794 * The two datums are equal, so update cxt->tupnoLink[].
2796 if (cxt->tupnoLink[ta] < tb)
2797 cxt->tupnoLink[ta] = tb;
2798 if (cxt->tupnoLink[tb] < ta)
2799 cxt->tupnoLink[tb] = ta;
2802 * For equal datums, sort by tupno
2808 * qsort comparator for sorting ScalarMCVItems by position
2811 compare_mcvs(const void *a, const void *b)
2813 int da = ((const ScalarMCVItem *) a)->first;
2814 int db = ((const ScalarMCVItem *) b)->first;