1 /*-------------------------------------------------------------------------
4 * the postgres vacuum cleaner
6 * Copyright (c) 1994, Regents of the University of California
10 * $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.63 1998/02/26 04:31:03 momjian Exp $
12 *-------------------------------------------------------------------------
14 #include <sys/types.h>
24 #include <utils/portal.h>
25 #include <access/genam.h>
26 #include <access/heapam.h>
27 #include <access/xact.h>
28 #include <storage/bufmgr.h>
29 #include <access/transam.h>
30 #include <catalog/pg_index.h>
31 #include <catalog/index.h>
32 #include <catalog/catname.h>
33 #include <catalog/catalog.h>
34 #include <catalog/pg_class.h>
35 #include <catalog/pg_proc.h>
36 #include <catalog/pg_statistic.h>
37 #include <catalog/pg_type.h>
38 #include <catalog/pg_operator.h>
39 #include <parser/parse_oper.h>
40 #include <storage/smgr.h>
41 #include <storage/lmgr.h>
42 #include <utils/inval.h>
43 #include <utils/mcxt.h>
44 #include <utils/inval.h>
45 #include <utils/syscache.h>
46 #include <utils/builtins.h>
47 #include <commands/vacuum.h>
48 #include <storage/bufpage.h>
49 #include "storage/shmem.h"
50 #ifndef HAVE_GETRUSAGE
51 #include <rusagestub.h>
54 #include <sys/resource.h>
57 /* #include <port-protos.h> *//* Why? */
59 extern int BlowawayRelationBuffers(Relation rdesc, BlockNumber block);
61 bool VacuumRunning = false;
63 static Portal vc_portal;
65 static int MESSAGE_LEVEL; /* message level */
67 #define swapLong(a,b) {long tmp; tmp=a; a=b; b=tmp;}
68 #define swapInt(a,b) {int tmp; tmp=a; a=b; b=tmp;}
69 #define swapDatum(a,b) {Datum tmp; tmp=a; a=b; b=tmp;}
70 #define VacAttrStatsEqValid(stats) ( stats->f_cmpeq.fn_addr != NULL )
71 #define VacAttrStatsLtGtValid(stats) ( stats->f_cmplt.fn_addr != NULL && \
72 stats->f_cmpgt.fn_addr != NULL && \
73 RegProcedureIsValid(stats->outfunc) )
76 /* non-export function prototypes */
77 static void vc_init(void);
78 static void vc_shutdown(void);
79 static void vc_vacuum(NameData *VacRelP, bool analyze, List *va_cols);
80 static VRelList vc_getrels(NameData *VacRelP);
81 static void vc_vacone(Oid relid, bool analyze, List *va_cols);
82 static void vc_scanheap(VRelStats *vacrelstats, Relation onerel, VPageList Vvpl, VPageList Fvpl);
83 static void vc_rpfheap(VRelStats *vacrelstats, Relation onerel, VPageList Vvpl, VPageList Fvpl, int nindices, Relation *Irel);
84 static void vc_vacheap(VRelStats *vacrelstats, Relation onerel, VPageList vpl);
85 static void vc_vacpage(Page page, VPageDescr vpd);
86 static void vc_vaconeind(VPageList vpl, Relation indrel, int nhtups);
87 static void vc_scanoneind(Relation indrel, int nhtups);
88 static void vc_attrstats(Relation onerel, VRelStats *vacrelstats, HeapTuple htup);
89 static void vc_bucketcpy(AttributeTupleForm attr, Datum value, Datum *bucket, int16 *bucket_len);
90 static void vc_updstats(Oid relid, int npages, int ntups, bool hasindex, VRelStats *vacrelstats);
91 static void vc_delhilowstats(Oid relid, int attcnt, int *attnums);
92 static void vc_setpagelock(Relation rel, BlockNumber blkno);
93 static VPageDescr vc_tidreapped(ItemPointer itemptr, VPageList vpl);
94 static void vc_reappage(VPageList vpl, VPageDescr vpc);
95 static void vc_vpinsert(VPageList vpl, VPageDescr vpnew);
96 static void vc_free(VRelList vrl);
97 static void vc_getindices(Oid relid, int *nindices, Relation **Irel);
98 static void vc_clsindices(int nindices, Relation *Irel);
99 static void vc_mkindesc(Relation onerel, int nindices, Relation *Irel, IndDesc **Idesc);
100 static char *vc_find_eq(char *bot, int nelem, int size, char *elm, int (*compar) (char *, char *));
101 static int vc_cmp_blk(char *left, char *right);
102 static int vc_cmp_offno(char *left, char *right);
103 static bool vc_enough_space(VPageDescr vpd, Size len);
106 vacuum(char *vacrel, bool verbose, bool analyze, List *va_spec)
110 PortalVariableMemory pmem;
116 * Create a portal for safe memory across transctions. We need to
117 * palloc the name space for it because our hash function expects the
118 * name to be on a longword boundary. CreatePortal copies the name to
119 * safe storage for us.
121 pname = (char *) palloc(strlen(VACPNAME) + 1);
122 strcpy(pname, VACPNAME);
123 vc_portal = CreatePortal(pname);
127 MESSAGE_LEVEL = NOTICE;
129 MESSAGE_LEVEL = DEBUG;
131 /* vacrel gets de-allocated on transaction commit */
133 strcpy(VacRel.data, vacrel);
135 pmem = PortalGetVariableMemory(vc_portal);
136 old = MemoryContextSwitchTo((MemoryContext) pmem);
138 if (va_spec != NIL && !analyze)
139 elog(ERROR, "Can't vacuum columns, only tables. You can 'vacuum analyze' columns.");
143 char *col = (char *) lfirst(le);
146 dest = (char *) palloc(strlen(col) + 1);
148 va_cols = lappend(va_cols, dest);
150 MemoryContextSwitchTo(old);
152 /* initialize vacuum cleaner */
155 /* vacuum the database */
157 vc_vacuum(&VacRel, analyze, va_cols);
159 vc_vacuum(NULL, analyze, NIL);
161 PortalDestroy(&vc_portal);
168 * vc_init(), vc_shutdown() -- start up and shut down the vacuum cleaner.
170 * We run exactly one vacuum cleaner at a time. We use the file system
171 * to guarantee an exclusive lock on vacuuming, since a single vacuum
172 * cleaner instantiation crosses transaction boundaries, and we'd lose
173 * postgres-style locks at the end of every transaction.
175 * The strangeness with committing and starting transactions in the
176 * init and shutdown routines is due to the fact that the vacuum cleaner
177 * is invoked via a sql command, and so is already executing inside
178 * a transaction. We need to leave ourselves in a predictable state
179 * on entry and exit to the vacuum cleaner. We commit the transaction
180 * started in PostgresMain() inside vc_init(), and start one in
181 * vc_shutdown() to match the commit waiting for us back in
189 if ((fd = open("pg_vlock", O_CREAT | O_EXCL, 0600)) < 0)
190 elog(ERROR, "can't create lock file -- another vacuum cleaner running?");
195 * By here, exclusive open on the lock file succeeded. If we abort
196 * for any reason during vacuuming, we need to remove the lock file.
197 * This global variable is checked in the transaction manager on xact
198 * abort, and the routine vc_abort() is called if necessary.
201 VacuumRunning = true;
203 /* matches the StartTransaction in PostgresMain() */
204 CommitTransactionCommand();
210 /* on entry, not in a transaction */
211 if (unlink("pg_vlock") < 0)
212 elog(ERROR, "vacuum: can't destroy lock file!");
214 /* okay, we're done */
215 VacuumRunning = false;
217 /* matches the CommitTransaction in PostgresMain() */
218 StartTransactionCommand();
225 /* on abort, remove the vacuum cleaner lock file */
228 VacuumRunning = false;
232 * vc_vacuum() -- vacuum the database.
234 * This routine builds a list of relations to vacuum, and then calls
235 * code that vacuums them one at a time. We are careful to vacuum each
236 * relation in a separate transaction in order to avoid holding too many
240 vc_vacuum(NameData *VacRelP, bool analyze, List *va_cols)
245 /* get list of relations */
246 vrl = vc_getrels(VacRelP);
248 if (analyze && VacRelP == NULL && vrl != NULL)
249 vc_delhilowstats(InvalidOid, 0, NULL);
251 /* vacuum each heap relation */
252 for (cur = vrl; cur != (VRelList) NULL; cur = cur->vrl_next)
253 vc_vacone(cur->vrl_relid, analyze, va_cols);
259 vc_getrels(NameData *VacRelP)
263 HeapScanDesc pgcscan;
266 PortalVariableMemory portalmem;
277 StartTransactionCommand();
281 ScanKeyEntryInitialize(&pgckey, 0x0, Anum_pg_class_relname,
282 NameEqualRegProcedure,
283 PointerGetDatum(VacRelP->data));
287 ScanKeyEntryInitialize(&pgckey, 0x0, Anum_pg_class_relkind,
288 CharacterEqualRegProcedure, CharGetDatum('r'));
291 portalmem = PortalGetVariableMemory(vc_portal);
292 vrl = cur = (VRelList) NULL;
294 pgclass = heap_openr(RelationRelationName);
295 pgcdesc = RelationGetTupleDescriptor(pgclass);
297 pgcscan = heap_beginscan(pgclass, false, false, 1, &pgckey);
299 while (HeapTupleIsValid(pgctup = heap_getnext(pgcscan, 0, &buf)))
304 d = heap_getattr(pgctup, Anum_pg_class_relname, pgcdesc, &n);
308 * don't vacuum large objects for now - something breaks when we
311 if ((strlen(rname) >= 5) && rname[0] == 'x' &&
312 rname[1] == 'i' && rname[2] == 'n' &&
313 (rname[3] == 'v' || rname[3] == 'x') &&
314 rname[4] >= '0' && rname[4] <= '9')
316 elog(NOTICE, "Rel %s: can't vacuum LargeObjects now",
322 d = heap_getattr(pgctup, Anum_pg_class_relkind, pgcdesc, &n);
324 rkind = DatumGetChar(d);
326 /* skip system relations */
330 elog(NOTICE, "Vacuum: can not process index and certain system tables");
334 /* get a relation list entry for this guy */
335 old = MemoryContextSwitchTo((MemoryContext) portalmem);
336 if (vrl == (VRelList) NULL)
338 vrl = cur = (VRelList) palloc(sizeof(VRelListData));
342 cur->vrl_next = (VRelList) palloc(sizeof(VRelListData));
345 MemoryContextSwitchTo(old);
347 cur->vrl_relid = pgctup->t_oid;
348 cur->vrl_next = (VRelList) NULL;
350 /* wei hates it if you forget to do this */
354 elog(NOTICE, "Vacuum: table not found");
357 heap_endscan(pgcscan);
360 CommitTransactionCommand();
366 * vc_vacone() -- vacuum one heap relation
368 * This routine vacuums a single heap, cleans out its indices, and
369 * updates its statistics npages and ntups statistics.
371 * Doing one heap at a time incurs extra overhead, since we need to
372 * check that the heap exists again just before we vacuum it. The
373 * reason that we do this is so that vacuuming can be spread across
374 * many small transactions. Otherwise, two-phase locking would require
375 * us to lock the entire database during one pass of the vacuum cleaner.
378 vc_vacone(Oid relid, bool analyze, List *va_cols)
385 HeapScanDesc pgcscan;
388 VPageListData Vvpl; /* List of pages to vacuum and/or clean
390 VPageListData Fvpl; /* List of pages with space enough for
396 VRelStats *vacrelstats;
398 StartTransactionCommand();
400 ScanKeyEntryInitialize(&pgckey, 0x0, ObjectIdAttributeNumber,
401 ObjectIdEqualRegProcedure,
402 ObjectIdGetDatum(relid));
404 pgclass = heap_openr(RelationRelationName);
405 pgcdesc = RelationGetTupleDescriptor(pgclass);
406 pgcscan = heap_beginscan(pgclass, false, false, 1, &pgckey);
409 * Race condition -- if the pg_class tuple has gone away since the
410 * last time we saw it, we don't need to vacuum it.
413 if (!HeapTupleIsValid(pgctup = heap_getnext(pgcscan, 0, &pgcbuf)))
415 heap_endscan(pgcscan);
417 CommitTransactionCommand();
421 /* now open the class and vacuum it */
422 onerel = heap_open(relid);
424 vacrelstats = (VRelStats *) palloc(sizeof(VRelStats));
425 vacrelstats->relid = relid;
426 vacrelstats->npages = vacrelstats->ntups = 0;
427 vacrelstats->hasindex = false;
428 if (analyze && !IsSystemRelationName((RelationGetRelationName(onerel))->data))
432 AttributeTupleForm *attr;
434 attr_cnt = onerel->rd_att->natts;
435 attr = onerel->rd_att->attrs;
442 if (length(va_cols) > attr_cnt)
443 elog(ERROR, "vacuum: too many attributes specified for relation %s",
444 (RelationGetRelationName(onerel))->data);
445 attnums = (int *) palloc(attr_cnt * sizeof(int));
448 char *col = (char *) lfirst(le);
450 for (i = 0; i < attr_cnt; i++)
452 if (namestrcmp(&(attr[i]->attname), col) == 0)
455 if (i < attr_cnt) /* found */
459 elog(ERROR, "vacuum: there is no attribute %s in %s",
460 col, (RelationGetRelationName(onerel))->data);
466 vacrelstats->vacattrstats =
467 (VacAttrStats *) palloc(attr_cnt * sizeof(VacAttrStats));
469 for (i = 0; i < attr_cnt; i++)
471 Operator func_operator;
472 OperatorTupleForm pgopform;
475 stats = &vacrelstats->vacattrstats[i];
476 stats->attr = palloc(ATTRIBUTE_TUPLE_SIZE);
477 memmove(stats->attr, attr[((attnums) ? attnums[i] : i)], ATTRIBUTE_TUPLE_SIZE);
478 stats->best = stats->guess1 = stats->guess2 = 0;
479 stats->max = stats->min = 0;
480 stats->best_len = stats->guess1_len = stats->guess2_len = 0;
481 stats->max_len = stats->min_len = 0;
482 stats->initialized = false;
483 stats->best_cnt = stats->guess1_cnt = stats->guess1_hits = stats->guess2_hits = 0;
484 stats->max_cnt = stats->min_cnt = stats->null_cnt = stats->nonnull_cnt = 0;
486 func_operator = oper("=", stats->attr->atttypid, stats->attr->atttypid, true);
487 if (func_operator != NULL)
489 pgopform = (OperatorTupleForm) GETSTRUCT(func_operator);
490 fmgr_info(pgopform->oprcode, &(stats->f_cmpeq));
493 stats->f_cmpeq.fn_addr = NULL;
495 func_operator = oper("<", stats->attr->atttypid, stats->attr->atttypid, true);
496 if (func_operator != NULL)
498 pgopform = (OperatorTupleForm) GETSTRUCT(func_operator);
499 fmgr_info(pgopform->oprcode, &(stats->f_cmplt));
502 stats->f_cmplt.fn_addr = NULL;
504 func_operator = oper(">", stats->attr->atttypid, stats->attr->atttypid, true);
505 if (func_operator != NULL)
507 pgopform = (OperatorTupleForm) GETSTRUCT(func_operator);
508 fmgr_info(pgopform->oprcode, &(stats->f_cmpgt));
511 stats->f_cmpgt.fn_addr = NULL;
513 pgttup = SearchSysCacheTuple(TYPOID,
514 ObjectIdGetDatum(stats->attr->atttypid),
516 if (HeapTupleIsValid(pgttup))
517 stats->outfunc = ((TypeTupleForm) GETSTRUCT(pgttup))->typoutput;
519 stats->outfunc = InvalidOid;
521 vacrelstats->va_natts = attr_cnt;
522 vc_delhilowstats(relid, ((attnums) ? attr_cnt : 0), attnums);
528 vacrelstats->va_natts = 0;
529 vacrelstats->vacattrstats = (VacAttrStats *) NULL;
532 /* we require the relation to be locked until the indices are cleaned */
533 RelationSetLockForWrite(onerel);
536 Vvpl.vpl_npages = Fvpl.vpl_npages = 0;
537 vc_scanheap(vacrelstats, onerel, &Vvpl, &Fvpl);
539 /* Now open indices */
540 Irel = (Relation *) NULL;
541 vc_getindices(vacrelstats->relid, &nindices, &Irel);
544 vacrelstats->hasindex = true;
546 vacrelstats->hasindex = false;
548 /* Clean/scan index relation(s) */
549 if (Irel != (Relation *) NULL)
551 if (Vvpl.vpl_npages > 0)
553 for (i = 0; i < nindices; i++)
554 vc_vaconeind(&Vvpl, Irel[i], vacrelstats->ntups);
557 /* just scan indices to update statistic */
559 for (i = 0; i < nindices; i++)
560 vc_scanoneind(Irel[i], vacrelstats->ntups);
564 if (Fvpl.vpl_npages > 0) /* Try to shrink heap */
565 vc_rpfheap(vacrelstats, onerel, &Vvpl, &Fvpl, nindices, Irel);
568 if (Irel != (Relation *) NULL)
569 vc_clsindices(nindices, Irel);
570 if (Vvpl.vpl_npages > 0)/* Clean pages from Vvpl list */
571 vc_vacheap(vacrelstats, onerel, &Vvpl);
574 /* ok - free Vvpl list of reapped pages */
575 if (Vvpl.vpl_npages > 0)
577 vpp = Vvpl.vpl_pgdesc;
578 for (i = 0; i < Vvpl.vpl_npages; i++, vpp++)
580 pfree(Vvpl.vpl_pgdesc);
581 if (Fvpl.vpl_npages > 0)
582 pfree(Fvpl.vpl_pgdesc);
585 /* all done with this class */
587 heap_endscan(pgcscan);
590 /* update statistics in pg_class */
591 vc_updstats(vacrelstats->relid, vacrelstats->npages, vacrelstats->ntups,
592 vacrelstats->hasindex, vacrelstats);
594 /* next command frees attribute stats */
596 CommitTransactionCommand();
600 * vc_scanheap() -- scan an open heap relation
602 * This routine sets commit times, constructs Vvpl list of
603 * empty/uninitialized pages and pages with dead tuples and
604 * ~LP_USED line pointers, constructs Fvpl list of pages
605 * appropriate for purposes of shrinking and maintains statistics
606 * on the number of live tuples in a heap.
609 vc_scanheap(VRelStats *vacrelstats, Relation onerel,
610 VPageList Vvpl, VPageList Fvpl)
639 Size min_tlen = MAXTUPLEN;
641 int32 i /* , attr_cnt */ ;
644 bool do_shrinking = true;
646 getrusage(RUSAGE_SELF, &ru0);
648 nvac = ntups = nunused = ncrash = nempg = nnepg = nchpg = nemend = 0;
651 relname = (RelationGetRelationName(onerel))->data;
653 nblocks = RelationGetNumberOfBlocks(onerel);
655 vpc = (VPageDescr) palloc(sizeof(VPageDescrData) + MaxOffsetNumber * sizeof(OffsetNumber));
658 for (blkno = 0; blkno < nblocks; blkno++)
660 buf = ReadBuffer(onerel, blkno);
661 page = BufferGetPage(buf);
662 vpc->vpd_blkno = blkno;
667 elog(NOTICE, "Rel %s: Uninitialized page %u - fixing",
669 PageInit(page, BufferGetPageSize(buf), 0);
670 vpc->vpd_free = ((PageHeader) page)->pd_upper - ((PageHeader) page)->pd_lower;
671 frsize += (vpc->vpd_free - sizeof(ItemIdData));
674 vc_reappage(Vvpl, vpc);
679 if (PageIsEmpty(page))
681 vpc->vpd_free = ((PageHeader) page)->pd_upper - ((PageHeader) page)->pd_lower;
682 frsize += (vpc->vpd_free - sizeof(ItemIdData));
685 vc_reappage(Vvpl, vpc);
692 maxoff = PageGetMaxOffsetNumber(page);
693 for (offnum = FirstOffsetNumber;
695 offnum = OffsetNumberNext(offnum))
697 itemid = PageGetItemId(page, offnum);
700 * Collect un-used items too - it's possible to have indices
701 * pointing here after crash.
703 if (!ItemIdIsUsed(itemid))
705 vpc->vpd_voff[vpc->vpd_noff++] = offnum;
710 htup = (HeapTuple) PageGetItem(page, itemid);
713 if (!(htup->t_infomask & HEAP_XMIN_COMMITTED))
715 if (htup->t_infomask & HEAP_XMIN_INVALID)
719 if (TransactionIdDidAbort(htup->t_xmin))
721 else if (TransactionIdDidCommit(htup->t_xmin))
723 htup->t_infomask |= HEAP_XMIN_COMMITTED;
726 else if (!TransactionIdIsInProgress(htup->t_xmin))
730 * Not Aborted, Not Committed, Not in Progress -
731 * so it's from crashed process. - vadim 11/26/96
738 elog(NOTICE, "Rel %s: TID %u/%u: InsertTransactionInProgress %u - can't shrink relation",
739 relname, blkno, offnum, htup->t_xmin);
740 do_shrinking = false;
746 * here we are concerned about tuples with xmin committed and
747 * xmax unknown or committed
749 if (htup->t_infomask & HEAP_XMIN_COMMITTED &&
750 !(htup->t_infomask & HEAP_XMAX_INVALID))
752 if (htup->t_infomask & HEAP_XMAX_COMMITTED)
754 else if (TransactionIdDidAbort(htup->t_xmax))
756 htup->t_infomask |= HEAP_XMAX_INVALID;
759 else if (TransactionIdDidCommit(htup->t_xmax))
761 else if (!TransactionIdIsInProgress(htup->t_xmax))
765 * Not Aborted, Not Committed, Not in Progress - so it
766 * from crashed process. - vadim 06/02/97
768 htup->t_infomask |= HEAP_XMAX_INVALID;;
773 elog(NOTICE, "Rel %s: TID %u/%u: DeleteTransactionInProgress %u - can't shrink relation",
774 relname, blkno, offnum, htup->t_xmax);
775 do_shrinking = false;
780 * It's possibly! But from where it comes ? And should we fix
781 * it ? - vadim 11/28/96
783 itemptr = &(htup->t_ctid);
784 if (!ItemPointerIsValid(itemptr) ||
785 BlockIdGetBlockNumber(&(itemptr->ip_blkid)) != blkno)
787 elog(NOTICE, "Rel %s: TID %u/%u: TID IN TUPLEHEADER %u/%u IS NOT THE SAME. TUPGONE %d.",
788 relname, blkno, offnum,
789 BlockIdGetBlockNumber(&(itemptr->ip_blkid)),
790 itemptr->ip_posid, tupgone);
796 if (htup->t_len != itemid->lp_len)
798 elog(NOTICE, "Rel %s: TID %u/%u: TUPLE_LEN IN PAGEHEADER %u IS NOT THE SAME AS IN TUPLEHEADER %u. TUPGONE %d.",
799 relname, blkno, offnum,
800 itemid->lp_len, htup->t_len, tupgone);
802 if (!OidIsValid(htup->t_oid))
804 elog(NOTICE, "Rel %s: TID %u/%u: OID IS INVALID. TUPGONE %d.",
805 relname, blkno, offnum, tupgone);
812 if (tempPage == (Page) NULL)
816 pageSize = PageGetPageSize(page);
817 tempPage = (Page) palloc(pageSize);
818 memmove(tempPage, page, pageSize);
821 lpp = &(((PageHeader) tempPage)->pd_linp[offnum - 1]);
824 lpp->lp_flags &= ~LP_USED;
826 vpc->vpd_voff[vpc->vpd_noff++] = offnum;
834 if (htup->t_len < min_tlen)
835 min_tlen = htup->t_len;
836 if (htup->t_len > max_tlen)
837 max_tlen = htup->t_len;
838 vc_attrstats(onerel, vacrelstats, htup);
850 if (tempPage != (Page) NULL)
851 { /* Some tuples are gone */
852 PageRepairFragmentation(tempPage);
853 vpc->vpd_free = ((PageHeader) tempPage)->pd_upper - ((PageHeader) tempPage)->pd_lower;
854 frsize += vpc->vpd_free;
855 vc_reappage(Vvpl, vpc);
857 tempPage = (Page) NULL;
859 else if (vpc->vpd_noff > 0)
860 { /* there are only ~LP_USED line pointers */
861 vpc->vpd_free = ((PageHeader) page)->pd_upper - ((PageHeader) page)->pd_lower;
862 frsize += vpc->vpd_free;
863 vc_reappage(Vvpl, vpc);
875 /* save stats in the rel list for use later */
876 vacrelstats->ntups = ntups;
877 vacrelstats->npages = nblocks;
878 /* vacrelstats->natts = attr_cnt;*/
880 min_tlen = max_tlen = 0;
881 vacrelstats->min_tlen = min_tlen;
882 vacrelstats->max_tlen = max_tlen;
884 Vvpl->vpl_nemend = nemend;
885 Fvpl->vpl_nemend = nemend;
888 * Try to make Fvpl keeping in mind that we can't use free space of
889 * "empty" end-pages and last page if it reapped.
891 if (do_shrinking && Vvpl->vpl_npages - nemend > 0)
893 int nusf; /* blocks usefull for re-using */
895 nusf = Vvpl->vpl_npages - nemend;
896 if ((Vvpl->vpl_pgdesc[nusf - 1])->vpd_blkno == nblocks - nemend - 1)
899 for (i = 0; i < nusf; i++)
901 vp = Vvpl->vpl_pgdesc[i];
902 if (vc_enough_space(vp, min_tlen))
904 vc_vpinsert(Fvpl, vp);
905 frsusf += vp->vpd_free;
910 getrusage(RUSAGE_SELF, &ru1);
912 elog(MESSAGE_LEVEL, "Rel %s: Pages %u: Changed %u, Reapped %u, Empty %u, New %u; \
913 Tup %u: Vac %u, Crash %u, UnUsed %u, MinLen %u, MaxLen %u; Re-using: Free/Avail. Space %u/%u; EndEmpty/Avail. Pages %u/%u. Elapsed %u/%u sec.",
915 nblocks, nchpg, Vvpl->vpl_npages, nempg, nnepg,
916 ntups, nvac, ncrash, nunused, min_tlen, max_tlen,
917 frsize, frsusf, nemend, Fvpl->vpl_npages,
918 ru1.ru_stime.tv_sec - ru0.ru_stime.tv_sec,
919 ru1.ru_utime.tv_sec - ru0.ru_utime.tv_sec);
925 * vc_rpfheap() -- try to repaire relation' fragmentation
927 * This routine marks dead tuples as unused and tries re-use dead space
928 * by moving tuples (and inserting indices if needed). It constructs
929 * Nvpl list of free-ed pages (moved tuples) and clean indices
930 * for them after committing (in hack-manner - without losing locks
931 * and freeing memory!) current transaction. It truncates relation
932 * if some end-blocks are gone away.
935 vc_rpfheap(VRelStats *vacrelstats, Relation onerel,
936 VPageList Vvpl, VPageList Fvpl, int nindices, Relation *Irel)
946 OffsetNumber offnum = 0,
954 TupleDesc tupdesc = NULL;
955 Datum *idatum = NULL;
957 InsertIndexResult iresult;
959 VPageDescr ToVpd = NULL,
981 getrusage(RUSAGE_SELF, &ru0);
983 myXID = GetCurrentTransactionId();
984 myCID = GetCurrentCommandId();
986 if (Irel != (Relation *) NULL) /* preparation for index' inserts */
988 vc_mkindesc(onerel, nindices, Irel, &Idesc);
989 tupdesc = RelationGetTupleDescriptor(onerel);
990 idatum = (Datum *) palloc(INDEX_MAX_KEYS * sizeof(*idatum));
991 inulls = (char *) palloc(INDEX_MAX_KEYS * sizeof(*inulls));
995 Fnpages = Fvpl->vpl_npages;
996 Fvplast = Fvpl->vpl_pgdesc[Fnpages - 1];
997 Fblklast = Fvplast->vpd_blkno;
998 Assert(Vvpl->vpl_npages > Vvpl->vpl_nemend);
999 Vnpages = Vvpl->vpl_npages - Vvpl->vpl_nemend;
1000 Vvplast = Vvpl->vpl_pgdesc[Vnpages - 1];
1001 Vblklast = Vvplast->vpd_blkno;
1002 Assert(Vblklast >= Fblklast);
1003 ToBuf = InvalidBuffer;
1006 vpc = (VPageDescr) palloc(sizeof(VPageDescrData) + MaxOffsetNumber * sizeof(OffsetNumber));
1007 vpc->vpd_nusd = vpc->vpd_noff = 0;
1009 nblocks = vacrelstats->npages;
1010 for (blkno = nblocks - Vvpl->vpl_nemend - 1;; blkno--)
1012 /* if it's reapped page and it was used by me - quit */
1013 if (blkno == Fblklast && Fvplast->vpd_nusd > 0)
1016 buf = ReadBuffer(onerel, blkno);
1017 page = BufferGetPage(buf);
1021 isempty = PageIsEmpty(page);
1024 if (blkno == Vblklast) /* it's reapped page */
1026 if (Vvplast->vpd_noff > 0) /* there are dead tuples */
1027 { /* on this page - clean */
1029 vc_vacpage(page, Vvplast);
1037 Assert(Vnpages > 0);
1038 /* get prev reapped page from Vvpl */
1039 Vvplast = Vvpl->vpl_pgdesc[Vnpages - 1];
1040 Vblklast = Vvplast->vpd_blkno;
1041 if (blkno == Fblklast) /* this page in Fvpl too */
1044 Assert(Fnpages > 0);
1045 Assert(Fvplast->vpd_nusd == 0);
1046 /* get prev reapped page from Fvpl */
1047 Fvplast = Fvpl->vpl_pgdesc[Fnpages - 1];
1048 Fblklast = Fvplast->vpd_blkno;
1050 Assert(Fblklast <= Vblklast);
1062 vpc->vpd_blkno = blkno;
1063 maxoff = PageGetMaxOffsetNumber(page);
1064 for (offnum = FirstOffsetNumber;
1066 offnum = OffsetNumberNext(offnum))
1068 itemid = PageGetItemId(page, offnum);
1070 if (!ItemIdIsUsed(itemid))
1073 htup = (HeapTuple) PageGetItem(page, itemid);
1076 /* try to find new page for this tuple */
1077 if (ToBuf == InvalidBuffer ||
1078 !vc_enough_space(ToVpd, tlen))
1080 if (ToBuf != InvalidBuffer)
1083 ToBuf = InvalidBuffer;
1086 * If no one tuple can't be added to this page -
1087 * remove page from Fvpl. - vadim 11/27/96
1089 * But we can't remove last page - this is our
1090 * "show-stopper" !!! - vadim 02/25/98
1092 if (ToVpd != Fvplast &&
1093 !vc_enough_space(ToVpd, vacrelstats->min_tlen))
1095 Assert(Fnpages > ToVpI + 1);
1096 memmove(Fvpl->vpl_pgdesc + ToVpI,
1097 Fvpl->vpl_pgdesc + ToVpI + 1,
1098 sizeof(VPageDescr *) * (Fnpages - ToVpI - 1));
1100 Assert(Fvplast == Fvpl->vpl_pgdesc[Fnpages - 1]);
1103 for (i = 0; i < Fnpages; i++)
1105 if (vc_enough_space(Fvpl->vpl_pgdesc[i], tlen))
1109 break; /* can't move item anywhere */
1111 ToVpd = Fvpl->vpl_pgdesc[ToVpI];
1112 ToBuf = ReadBuffer(onerel, ToVpd->vpd_blkno);
1113 ToPage = BufferGetPage(ToBuf);
1114 /* if this page was not used before - clean it */
1115 if (!PageIsEmpty(ToPage) && ToVpd->vpd_nusd == 0)
1116 vc_vacpage(ToPage, ToVpd);
1120 newtup = (HeapTuple) palloc(tlen);
1121 memmove((char *) newtup, (char *) htup, tlen);
1123 /* store transaction information */
1124 TransactionIdStore(myXID, &(newtup->t_xmin));
1125 newtup->t_cmin = myCID;
1126 StoreInvalidTransactionId(&(newtup->t_xmax));
1127 /* set xmin to unknown and xmax to invalid */
1128 newtup->t_infomask &= ~(HEAP_XACT_MASK);
1129 newtup->t_infomask |= HEAP_XMAX_INVALID;
1131 /* add tuple to the page */
1132 newoff = PageAddItem(ToPage, (Item) newtup, tlen,
1133 InvalidOffsetNumber, LP_USED);
1134 if (newoff == InvalidOffsetNumber)
1137 failed to add item with len = %u to page %u (free space %u, nusd %u, noff %u)",
1138 tlen, ToVpd->vpd_blkno, ToVpd->vpd_free,
1139 ToVpd->vpd_nusd, ToVpd->vpd_noff);
1141 newitemid = PageGetItemId(ToPage, newoff);
1143 newtup = (HeapTuple) PageGetItem(ToPage, newitemid);
1144 ItemPointerSet(&(newtup->t_ctid), ToVpd->vpd_blkno, newoff);
1146 /* now logically delete end-tuple */
1147 TransactionIdStore(myXID, &(htup->t_xmax));
1148 htup->t_cmax = myCID;
1149 /* set xmax to unknown */
1150 htup->t_infomask &= ~(HEAP_XMAX_INVALID | HEAP_XMAX_COMMITTED);
1154 ToVpd->vpd_free = ((PageHeader) ToPage)->pd_upper - ((PageHeader) ToPage)->pd_lower;
1155 vpc->vpd_voff[vpc->vpd_noff++] = offnum;
1157 /* insert index' tuples if needed */
1158 if (Irel != (Relation *) NULL)
1160 for (i = 0, idcur = Idesc; i < nindices; i++, idcur++)
1164 (AttrNumber *) &(idcur->tform->indkey[0]),
1171 iresult = index_insert(
1182 } /* walk along page */
1184 if (vpc->vpd_noff > 0) /* some tuples were moved */
1186 vc_reappage(&Nvpl, vpc);
1194 if (offnum <= maxoff)
1195 break; /* some item(s) left */
1197 } /* walk along relation */
1199 blkno++; /* new number of blocks */
1201 if (ToBuf != InvalidBuffer)
1211 * We have to commit our tuple' movings before we'll truncate
1212 * relation, but we shouldn't lose our locks. And so - quick hack:
1213 * flush buffers and record status of current transaction as
1214 * committed, and continue. - vadim 11/13/96
1216 FlushBufferPool(!TransactionFlushEnabled());
1217 TransactionIdCommit(myXID);
1218 FlushBufferPool(!TransactionFlushEnabled());
1222 * Clean uncleaned reapped pages from Vvpl list and set xmin committed
1223 * for inserted tuples
1226 for (i = 0, vpp = Vvpl->vpl_pgdesc; i < Vnpages; i++, vpp++)
1228 Assert((*vpp)->vpd_blkno < blkno);
1229 buf = ReadBuffer(onerel, (*vpp)->vpd_blkno);
1230 page = BufferGetPage(buf);
1231 if ((*vpp)->vpd_nusd == 0) /* this page was not used */
1235 * noff == 0 in empty pages only - such pages should be
1238 Assert((*vpp)->vpd_noff > 0);
1239 vc_vacpage(page, *vpp);
1242 /* this page was used */
1245 moff = PageGetMaxOffsetNumber(page);
1246 for (newoff = FirstOffsetNumber;
1248 newoff = OffsetNumberNext(newoff))
1250 itemid = PageGetItemId(page, newoff);
1251 if (!ItemIdIsUsed(itemid))
1253 htup = (HeapTuple) PageGetItem(page, itemid);
1254 if (TransactionIdEquals((TransactionId) htup->t_xmin, myXID))
1256 htup->t_infomask |= HEAP_XMIN_COMMITTED;
1260 Assert((*vpp)->vpd_nusd == ntups);
1265 Assert(nmoved == nchkmvd);
1267 getrusage(RUSAGE_SELF, &ru1);
1269 elog(MESSAGE_LEVEL, "Rel %s: Pages: %u --> %u; Tuple(s) moved: %u. \
1270 Elapsed %u/%u sec.",
1271 (RelationGetRelationName(onerel))->data,
1272 nblocks, blkno, nmoved,
1273 ru1.ru_stime.tv_sec - ru0.ru_stime.tv_sec,
1274 ru1.ru_utime.tv_sec - ru0.ru_utime.tv_sec);
1276 if (Nvpl.vpl_npages > 0)
1278 /* vacuum indices again if needed */
1279 if (Irel != (Relation *) NULL)
1285 /* re-sort Nvpl.vpl_pgdesc */
1286 for (vpleft = Nvpl.vpl_pgdesc,
1287 vpright = Nvpl.vpl_pgdesc + Nvpl.vpl_npages - 1;
1288 vpleft < vpright; vpleft++, vpright--)
1294 for (i = 0; i < nindices; i++)
1295 vc_vaconeind(&Nvpl, Irel[i], vacrelstats->ntups);
1299 * clean moved tuples from last page in Nvpl list if some tuples
1302 if (vpc->vpd_noff > 0 && offnum <= maxoff)
1304 Assert(vpc->vpd_blkno == blkno - 1);
1305 buf = ReadBuffer(onerel, vpc->vpd_blkno);
1306 page = BufferGetPage(buf);
1309 for (offnum = FirstOffsetNumber;
1311 offnum = OffsetNumberNext(offnum))
1313 itemid = PageGetItemId(page, offnum);
1314 if (!ItemIdIsUsed(itemid))
1316 htup = (HeapTuple) PageGetItem(page, itemid);
1317 Assert(TransactionIdEquals((TransactionId) htup->t_xmax, myXID));
1318 itemid->lp_flags &= ~LP_USED;
1321 Assert(vpc->vpd_noff == ntups);
1322 PageRepairFragmentation(page);
1326 /* now - free new list of reapped pages */
1327 vpp = Nvpl.vpl_pgdesc;
1328 for (i = 0; i < Nvpl.vpl_npages; i++, vpp++)
1330 pfree(Nvpl.vpl_pgdesc);
1333 /* truncate relation */
1334 if (blkno < nblocks)
1336 i = BlowawayRelationBuffers(onerel, blkno);
1338 elog(FATAL, "VACUUM (vc_rpfheap): BlowawayRelationBuffers returned %d", i);
1339 blkno = smgrtruncate(DEFAULT_SMGR, onerel, blkno);
1341 vacrelstats->npages = blkno; /* set new number of blocks */
1344 if (Irel != (Relation *) NULL) /* pfree index' allocations */
1349 vc_clsindices(nindices, Irel);
1357 * vc_vacheap() -- free dead tuples
1359 * This routine marks dead tuples as unused and truncates relation
1360 * if there are "empty" end-blocks.
1363 vc_vacheap(VRelStats *vacrelstats, Relation onerel, VPageList Vvpl)
1371 nblocks = Vvpl->vpl_npages;
1372 nblocks -= Vvpl->vpl_nemend;/* nothing to do with them */
1374 for (i = 0, vpp = Vvpl->vpl_pgdesc; i < nblocks; i++, vpp++)
1376 if ((*vpp)->vpd_noff > 0)
1378 buf = ReadBuffer(onerel, (*vpp)->vpd_blkno);
1379 page = BufferGetPage(buf);
1380 vc_vacpage(page, *vpp);
1385 /* truncate relation if there are some empty end-pages */
1386 if (Vvpl->vpl_nemend > 0)
1388 Assert(vacrelstats->npages >= Vvpl->vpl_nemend);
1389 nblocks = vacrelstats->npages - Vvpl->vpl_nemend;
1390 elog(MESSAGE_LEVEL, "Rel %s: Pages: %u --> %u.",
1391 (RelationGetRelationName(onerel))->data,
1392 vacrelstats->npages, nblocks);
1395 * we have to flush "empty" end-pages (if changed, but who knows
1396 * it) before truncation
1398 FlushBufferPool(!TransactionFlushEnabled());
1400 i = BlowawayRelationBuffers(onerel, nblocks);
1402 elog(FATAL, "VACUUM (vc_vacheap): BlowawayRelationBuffers returned %d", i);
1404 nblocks = smgrtruncate(DEFAULT_SMGR, onerel, nblocks);
1405 Assert(nblocks >= 0);
1406 vacrelstats->npages = nblocks; /* set new number of blocks */
1412 * vc_vacpage() -- free dead tuples on a page
1413 * and repaire its fragmentation.
1416 vc_vacpage(Page page, VPageDescr vpd)
1421 Assert(vpd->vpd_nusd == 0);
1422 for (i = 0; i < vpd->vpd_noff; i++)
1424 itemid = &(((PageHeader) page)->pd_linp[vpd->vpd_voff[i] - 1]);
1425 itemid->lp_flags &= ~LP_USED;
1427 PageRepairFragmentation(page);
1432 * _vc_scanoneind() -- scan one index relation to update statistic.
1436 vc_scanoneind(Relation indrel, int nhtups)
1438 RetrieveIndexResult res;
1439 IndexScanDesc iscan;
1445 getrusage(RUSAGE_SELF, &ru0);
1447 /* walk through the entire index */
1448 iscan = index_beginscan(indrel, false, 0, (ScanKey) NULL);
1451 while ((res = index_getnext(iscan, ForwardScanDirection))
1452 != (RetrieveIndexResult) NULL)
1458 index_endscan(iscan);
1460 /* now update statistics in pg_class */
1461 nipages = RelationGetNumberOfBlocks(indrel);
1462 vc_updstats(indrel->rd_id, nipages, nitups, false, NULL);
1464 getrusage(RUSAGE_SELF, &ru1);
1466 elog(MESSAGE_LEVEL, "Ind %s: Pages %u; Tuples %u. Elapsed %u/%u sec.",
1467 indrel->rd_rel->relname.data, nipages, nitups,
1468 ru1.ru_stime.tv_sec - ru0.ru_stime.tv_sec,
1469 ru1.ru_utime.tv_sec - ru0.ru_utime.tv_sec);
1471 if (nitups != nhtups)
1472 elog(NOTICE, "Ind %s: NUMBER OF INDEX' TUPLES (%u) IS NOT THE SAME AS HEAP' (%u)",
1473 indrel->rd_rel->relname.data, nitups, nhtups);
1475 } /* vc_scanoneind */
1478 * vc_vaconeind() -- vacuum one index relation.
1480 * Vpl is the VPageList of the heap we're currently vacuuming.
1481 * It's locked. Indrel is an index relation on the vacuumed heap.
1482 * We don't set locks on the index relation here, since the indexed
1483 * access methods support locking at different granularities.
1484 * We let them handle it.
1486 * Finally, we arrange to update the index relation's statistics in
1490 vc_vaconeind(VPageList vpl, Relation indrel, int nhtups)
1492 RetrieveIndexResult res;
1493 IndexScanDesc iscan;
1494 ItemPointer heapptr;
1502 getrusage(RUSAGE_SELF, &ru0);
1504 /* walk through the entire index */
1505 iscan = index_beginscan(indrel, false, 0, (ScanKey) NULL);
1509 while ((res = index_getnext(iscan, ForwardScanDirection))
1510 != (RetrieveIndexResult) NULL)
1512 heapptr = &res->heap_iptr;
1514 if ((vp = vc_tidreapped(heapptr, vpl)) != (VPageDescr) NULL)
1517 elog(DEBUG, "<%x,%x> -> <%x,%x>",
1518 ItemPointerGetBlockNumber(&(res->index_iptr)),
1519 ItemPointerGetOffsetNumber(&(res->index_iptr)),
1520 ItemPointerGetBlockNumber(&(res->heap_iptr)),
1521 ItemPointerGetOffsetNumber(&(res->heap_iptr)));
1523 if (vp->vpd_noff == 0)
1524 { /* this is EmptyPage !!! */
1525 elog(NOTICE, "Ind %s: pointer to EmptyPage (blk %u off %u) - fixing",
1526 indrel->rd_rel->relname.data,
1527 vp->vpd_blkno, ItemPointerGetOffsetNumber(heapptr));
1530 index_delete(indrel, &res->index_iptr);
1541 index_endscan(iscan);
1543 /* now update statistics in pg_class */
1544 nipages = RelationGetNumberOfBlocks(indrel);
1545 vc_updstats(indrel->rd_id, nipages, nitups, false, NULL);
1547 getrusage(RUSAGE_SELF, &ru1);
1549 elog(MESSAGE_LEVEL, "Ind %s: Pages %u; Tuples %u: Deleted %u. Elapsed %u/%u sec.",
1550 indrel->rd_rel->relname.data, nipages, nitups, nvac,
1551 ru1.ru_stime.tv_sec - ru0.ru_stime.tv_sec,
1552 ru1.ru_utime.tv_sec - ru0.ru_utime.tv_sec);
1554 if (nitups != nhtups)
1555 elog(NOTICE, "Ind %s: NUMBER OF INDEX' TUPLES (%u) IS NOT THE SAME AS HEAP' (%u)",
1556 indrel->rd_rel->relname.data, nitups, nhtups);
1558 } /* vc_vaconeind */
1561 * vc_tidreapped() -- is a particular tid reapped?
1563 * vpl->VPageDescr_array is sorted in right order.
1566 vc_tidreapped(ItemPointer itemptr, VPageList vpl)
1568 OffsetNumber ioffno;
1574 vpd.vpd_blkno = ItemPointerGetBlockNumber(itemptr);
1575 ioffno = ItemPointerGetOffsetNumber(itemptr);
1578 vpp = (VPageDescr *) vc_find_eq((char *) (vpl->vpl_pgdesc),
1579 vpl->vpl_npages, sizeof(VPageDescr), (char *) &vp,
1582 if (vpp == (VPageDescr *) NULL)
1583 return ((VPageDescr) NULL);
1586 /* ok - we are on true page */
1588 if (vp->vpd_noff == 0)
1589 { /* this is EmptyPage !!! */
1593 voff = (OffsetNumber *) vc_find_eq((char *) (vp->vpd_voff),
1594 vp->vpd_noff, sizeof(OffsetNumber), (char *) &ioffno,
1597 if (voff == (OffsetNumber *) NULL)
1598 return ((VPageDescr) NULL);
1602 } /* vc_tidreapped */
1605 * vc_attrstats() -- compute column statistics used by the optimzer
1607 * We compute the column min, max, null and non-null counts.
1608 * Plus we attempt to find the count of the value that occurs most
1609 * frequently in each column
1610 * These figures are used to compute the selectivity of the column
1612 * We use a three-bucked cache to get the most frequent item
1613 * The 'guess' buckets count hits. A cache miss causes guess1
1614 * to get the most hit 'guess' item in the most recent cycle, and
1615 * the new item goes into guess2. Whenever the total count of hits
1616 * of a 'guess' entry is larger than 'best', 'guess' becomes 'best'.
1618 * This method works perfectly for columns with unique values, and columns
1619 * with only two unique values, plus nulls.
1621 * It becomes less perfect as the number of unique values increases and
1622 * their distribution in the table becomes more random.
1626 vc_attrstats(Relation onerel, VRelStats *vacrelstats, HeapTuple htup)
1629 attr_cnt = vacrelstats->va_natts;
1630 VacAttrStats *vacattrstats = vacrelstats->vacattrstats;
1631 TupleDesc tupDesc = onerel->rd_att;
1635 for (i = 0; i < attr_cnt; i++)
1637 VacAttrStats *stats = &vacattrstats[i];
1638 bool value_hit = true;
1640 value = heap_getattr(htup,
1641 stats->attr->attnum, tupDesc, &isnull);
1643 if (!VacAttrStatsEqValid(stats))
1650 stats->nonnull_cnt++;
1651 if (stats->initialized == false)
1653 vc_bucketcpy(stats->attr, value, &stats->best, &stats->best_len);
1654 /* best_cnt gets incremented later */
1655 vc_bucketcpy(stats->attr, value, &stats->guess1, &stats->guess1_len);
1656 stats->guess1_cnt = stats->guess1_hits = 1;
1657 vc_bucketcpy(stats->attr, value, &stats->guess2, &stats->guess2_len);
1658 stats->guess2_hits = 1;
1659 if (VacAttrStatsLtGtValid(stats))
1661 vc_bucketcpy(stats->attr, value, &stats->max, &stats->max_len);
1662 vc_bucketcpy(stats->attr, value, &stats->min, &stats->min_len);
1664 stats->initialized = true;
1666 if (VacAttrStatsLtGtValid(stats))
1668 if ((*fmgr_faddr(&stats->f_cmplt)) (value, stats->min))
1670 vc_bucketcpy(stats->attr, value, &stats->min, &stats->min_len);
1673 if ((*fmgr_faddr(&stats->f_cmpgt)) (value, stats->max))
1675 vc_bucketcpy(stats->attr, value, &stats->max, &stats->max_len);
1678 if ((*fmgr_faddr(&stats->f_cmpeq)) (value, stats->min))
1680 else if ((*fmgr_faddr(&stats->f_cmpeq)) (value, stats->max))
1683 if ((*fmgr_faddr(&stats->f_cmpeq)) (value, stats->best))
1685 else if ((*fmgr_faddr(&stats->f_cmpeq)) (value, stats->guess1))
1687 stats->guess1_cnt++;
1688 stats->guess1_hits++;
1690 else if ((*fmgr_faddr(&stats->f_cmpeq)) (value, stats->guess2))
1691 stats->guess2_hits++;
1695 if (stats->guess2_hits > stats->guess1_hits)
1697 swapDatum(stats->guess1, stats->guess2);
1698 swapInt(stats->guess1_len, stats->guess2_len);
1699 stats->guess1_cnt = stats->guess2_hits;
1700 swapLong(stats->guess1_hits, stats->guess2_hits);
1702 if (stats->guess1_cnt > stats->best_cnt)
1704 swapDatum(stats->best, stats->guess1);
1705 swapInt(stats->best_len, stats->guess1_len);
1706 swapLong(stats->best_cnt, stats->guess1_cnt);
1707 stats->guess1_hits = 1;
1708 stats->guess2_hits = 1;
1712 vc_bucketcpy(stats->attr, value, &stats->guess2, &stats->guess2_len);
1713 stats->guess1_hits = 1;
1714 stats->guess2_hits = 1;
1722 * vc_bucketcpy() -- update pg_class statistics for one relation
1726 vc_bucketcpy(AttributeTupleForm attr, Datum value, Datum *bucket, int16 *bucket_len)
1728 if (attr->attbyval && attr->attlen != -1)
1732 int len = (attr->attlen != -1 ? attr->attlen : VARSIZE(value));
1734 if (len > *bucket_len)
1736 if (*bucket_len != 0)
1737 pfree(DatumGetPointer(*bucket));
1738 *bucket = PointerGetDatum(palloc(len));
1741 memmove(DatumGetPointer(*bucket), DatumGetPointer(value), len);
1746 * vc_updstats() -- update pg_class statistics for one relation
1748 * This routine works for both index and heap relation entries in
1749 * pg_class. We violate no-overwrite semantics here by storing new
1750 * values for ntups, npages, and hasindex directly in the pg_class
1751 * tuple that's already on the page. The reason for this is that if
1752 * we updated these tuples in the usual way, then every tuple in pg_class
1753 * would be replaced every day. This would make planning and executing
1754 * historical queries very expensive.
1757 vc_updstats(Oid relid, int npages, int ntups, bool hasindex, VRelStats *vacrelstats)
1762 HeapScanDesc rsdesc,
1770 Form_pg_class pgcform;
1773 AttributeTupleForm attp;
1776 * update number of tuples and number of pages in pg_class
1778 ScanKeyEntryInitialize(&rskey, 0x0, ObjectIdAttributeNumber,
1779 ObjectIdEqualRegProcedure,
1780 ObjectIdGetDatum(relid));
1782 rd = heap_openr(RelationRelationName);
1783 rsdesc = heap_beginscan(rd, false, false, 1, &rskey);
1785 if (!HeapTupleIsValid(rtup = heap_getnext(rsdesc, 0, &rbuf)))
1786 elog(ERROR, "pg_class entry for relid %d vanished during vacuuming",
1789 /* overwrite the existing statistics in the tuple */
1790 vc_setpagelock(rd, BufferGetBlockNumber(rbuf));
1791 pgcform = (Form_pg_class) GETSTRUCT(rtup);
1792 pgcform->reltuples = ntups;
1793 pgcform->relpages = npages;
1794 pgcform->relhasindex = hasindex;
1796 if (vacrelstats != NULL && vacrelstats->va_natts > 0)
1798 VacAttrStats *vacattrstats = vacrelstats->vacattrstats;
1799 int natts = vacrelstats->va_natts;
1801 ad = heap_openr(AttributeRelationName);
1802 sd = heap_openr(StatisticRelationName);
1803 ScanKeyEntryInitialize(&askey, 0, Anum_pg_attribute_attrelid,
1806 asdesc = heap_beginscan(ad, false, false, 1, &askey);
1808 while (HeapTupleIsValid(atup = heap_getnext(asdesc, 0, &abuf)))
1811 float32data selratio; /* average ratio of rows selected
1812 * for a random constant */
1813 VacAttrStats *stats;
1814 Datum values[Natts_pg_statistic];
1815 char nulls[Natts_pg_statistic];
1817 attp = (AttributeTupleForm) GETSTRUCT(atup);
1818 if (attp->attnum <= 0) /* skip system attributes for now, */
1819 /* they are unique anyway */
1822 for (i = 0; i < natts; i++)
1824 if (attp->attnum == vacattrstats[i].attr->attnum)
1829 stats = &(vacattrstats[i]);
1831 /* overwrite the existing statistics in the tuple */
1832 if (VacAttrStatsEqValid(stats))
1835 vc_setpagelock(ad, BufferGetBlockNumber(abuf));
1837 if (stats->nonnull_cnt + stats->null_cnt == 0 ||
1838 (stats->null_cnt <= 1 && stats->best_cnt == 1))
1840 else if (VacAttrStatsLtGtValid(stats) && stats->min_cnt + stats->max_cnt == stats->nonnull_cnt)
1842 double min_cnt_d = stats->min_cnt,
1843 max_cnt_d = stats->max_cnt,
1844 null_cnt_d = stats->null_cnt,
1845 nonnullcnt_d = stats->nonnull_cnt; /* prevent overflow */
1847 selratio = (min_cnt_d * min_cnt_d + max_cnt_d * max_cnt_d + null_cnt_d * null_cnt_d) /
1848 (nonnullcnt_d + null_cnt_d) / (nonnullcnt_d + null_cnt_d);
1852 double most = (double) (stats->best_cnt > stats->null_cnt ? stats->best_cnt : stats->null_cnt);
1853 double total = ((double) stats->nonnull_cnt) + ((double) stats->null_cnt);
1856 * we assume count of other values are 20% of best
1859 selratio = (most * most + 0.20 * most * (total - most)) / total / total;
1863 attp->attdisbursion = selratio;
1864 WriteNoReleaseBuffer(abuf);
1866 /* DO PG_STATISTIC INSERTS */
1869 * doing system relations, especially pg_statistic is a
1872 if (VacAttrStatsLtGtValid(stats) && stats->initialized /* &&
1873 * !IsSystemRelationName(
1875 pgcform->relname.data) */ )
1877 FmgrInfo out_function;
1880 for (i = 0; i < Natts_pg_statistic; ++i)
1884 * initialize values[]
1888 values[i++] = (Datum) relid; /* 1 */
1889 values[i++] = (Datum) attp->attnum; /* 2 */
1890 values[i++] = (Datum) InvalidOid; /* 3 */
1891 fmgr_info(stats->outfunc, &out_function);
1892 out_string = (*fmgr_faddr(&out_function)) (stats->min, stats->attr->atttypid);
1893 values[i++] = (Datum) fmgr(TextInRegProcedure, out_string);
1895 out_string = (char *) (*fmgr_faddr(&out_function)) (stats->max, stats->attr->atttypid);
1896 values[i++] = (Datum) fmgr(TextInRegProcedure, out_string);
1901 stup = heap_formtuple(sdesc, values, nulls);
1904 * insert the tuple in the relation and get the tuple's oid.
1907 heap_insert(sd, stup);
1908 pfree(DatumGetPointer(values[3]));
1909 pfree(DatumGetPointer(values[4]));
1914 heap_endscan(asdesc);
1919 /* XXX -- after write, should invalidate relcache in other backends */
1920 WriteNoReleaseBuffer(rbuf); /* heap_endscan release scan' buffers ? */
1923 * invalidating system relations confuses the function cache of
1924 * pg_operator and pg_opclass
1926 if (!IsSystemRelationName(pgcform->relname.data))
1927 RelationInvalidateHeapTuple(rd, rtup);
1929 /* that's all, folks */
1930 heap_endscan(rsdesc);
1935 * vc_delhilowstats() -- delete pg_statistics rows
1939 vc_delhilowstats(Oid relid, int attcnt, int *attnums)
1941 Relation pgstatistic;
1942 HeapScanDesc pgsscan;
1946 pgstatistic = heap_openr(StatisticRelationName);
1948 if (relid != InvalidOid)
1950 ScanKeyEntryInitialize(&pgskey, 0x0, Anum_pg_statistic_starelid,
1951 ObjectIdEqualRegProcedure,
1952 ObjectIdGetDatum(relid));
1953 pgsscan = heap_beginscan(pgstatistic, false, false, 1, &pgskey);
1956 pgsscan = heap_beginscan(pgstatistic, false, false, 0, NULL);
1958 while (HeapTupleIsValid(pgstup = heap_getnext(pgsscan, 0, NULL)))
1962 Form_pg_statistic pgs = (Form_pg_statistic) GETSTRUCT(pgstup);
1965 for (i = 0; i < attcnt; i++)
1967 if (pgs->staattnum == attnums[i] + 1)
1971 continue; /* don't delete it */
1973 heap_delete(pgstatistic, &pgstup->t_ctid);
1976 heap_endscan(pgsscan);
1977 heap_close(pgstatistic);
1981 vc_setpagelock(Relation rel, BlockNumber blkno)
1983 ItemPointerData itm;
1985 ItemPointerSet(&itm, blkno, 1);
1987 RelationSetLockForWritePage(rel, &itm);
1991 * vc_reappage() -- save a page on the array of reapped pages.
1993 * As a side effect of the way that the vacuuming loop for a given
1994 * relation works, higher pages come after lower pages in the array
1995 * (and highest tid on a page is last).
1998 vc_reappage(VPageList vpl, VPageDescr vpc)
2002 /* allocate a VPageDescrData entry */
2003 newvpd = (VPageDescr) palloc(sizeof(VPageDescrData) + vpc->vpd_noff * sizeof(OffsetNumber));
2006 if (vpc->vpd_noff > 0)
2007 memmove(newvpd->vpd_voff, vpc->vpd_voff, vpc->vpd_noff * sizeof(OffsetNumber));
2008 newvpd->vpd_blkno = vpc->vpd_blkno;
2009 newvpd->vpd_free = vpc->vpd_free;
2010 newvpd->vpd_nusd = vpc->vpd_nusd;
2011 newvpd->vpd_noff = vpc->vpd_noff;
2013 /* insert this page into vpl list */
2014 vc_vpinsert(vpl, newvpd);
2019 vc_vpinsert(VPageList vpl, VPageDescr vpnew)
2022 /* allocate a VPageDescr entry if needed */
2023 if (vpl->vpl_npages == 0)
2024 vpl->vpl_pgdesc = (VPageDescr *) palloc(100 * sizeof(VPageDescr));
2025 else if (vpl->vpl_npages % 100 == 0)
2026 vpl->vpl_pgdesc = (VPageDescr *) repalloc(vpl->vpl_pgdesc, (vpl->vpl_npages + 100) * sizeof(VPageDescr));
2027 vpl->vpl_pgdesc[vpl->vpl_npages] = vpnew;
2028 (vpl->vpl_npages)++;
2033 vc_free(VRelList vrl)
2037 PortalVariableMemory pmem;
2039 pmem = PortalGetVariableMemory(vc_portal);
2040 old = MemoryContextSwitchTo((MemoryContext) pmem);
2042 while (vrl != (VRelList) NULL)
2045 /* free rel list entry */
2047 vrl = vrl->vrl_next;
2051 MemoryContextSwitchTo(old);
2055 vc_find_eq(char *bot, int nelem, int size, char *elm, int (*compar) (char *, char *))
2058 int last = nelem - 1;
2059 int celm = nelem / 2;
2063 last_move = first_move = true;
2066 if (first_move == true)
2068 res = compar(bot, elm);
2075 if (last_move == true)
2077 res = compar(elm, bot + last * size);
2081 return (bot + last * size);
2084 res = compar(elm, bot + celm * size);
2086 return (bot + celm * size);
2100 last = last - celm - 1;
2101 bot = bot + (celm + 1) * size;
2102 celm = (last + 1) / 2;
2109 vc_cmp_blk(char *left, char *right)
2114 lblk = (*((VPageDescr *) left))->vpd_blkno;
2115 rblk = (*((VPageDescr *) right))->vpd_blkno;
2126 vc_cmp_offno(char *left, char *right)
2129 if (*(OffsetNumber *) left < *(OffsetNumber *) right)
2131 if (*(OffsetNumber *) left == *(OffsetNumber *) right)
2135 } /* vc_cmp_offno */
2139 vc_getindices(Oid relid, int *nindices, Relation **Irel)
2145 HeapScanDesc pgiscan;
2155 ioid = (Oid *) palloc(10 * sizeof(Oid));
2157 /* prepare a heap scan on the pg_index relation */
2158 pgindex = heap_openr(IndexRelationName);
2159 pgidesc = RelationGetTupleDescriptor(pgindex);
2161 ScanKeyEntryInitialize(&pgikey, 0x0, Anum_pg_index_indrelid,
2162 ObjectIdEqualRegProcedure,
2163 ObjectIdGetDatum(relid));
2165 pgiscan = heap_beginscan(pgindex, false, false, 1, &pgikey);
2167 while (HeapTupleIsValid(pgitup = heap_getnext(pgiscan, 0, NULL)))
2169 d = heap_getattr(pgitup, Anum_pg_index_indexrelid,
2173 ioid = (Oid *) repalloc(ioid, (i + 10) * sizeof(Oid));
2174 ioid[i - 1] = DatumGetObjectId(d);
2177 heap_endscan(pgiscan);
2178 heap_close(pgindex);
2181 { /* No one index found */
2186 if (Irel != (Relation **) NULL)
2187 *Irel = (Relation *) palloc(i * sizeof(Relation));
2191 irel = index_open(ioid[--i]);
2192 if (irel != (Relation) NULL)
2194 if (Irel != (Relation **) NULL)
2201 elog(NOTICE, "CAN't OPEN INDEX %u - SKIP IT", ioid[i]);
2206 if (Irel != (Relation **) NULL && *nindices == 0)
2209 *Irel = (Relation *) NULL;
2212 } /* vc_getindices */
2216 vc_clsindices(int nindices, Relation *Irel)
2219 if (Irel == (Relation *) NULL)
2224 index_close(Irel[nindices]);
2228 } /* vc_clsindices */
2232 vc_mkindesc(Relation onerel, int nindices, Relation *Irel, IndDesc **Idesc)
2235 HeapTuple pgIndexTup;
2236 AttrNumber *attnumP;
2240 *Idesc = (IndDesc *) palloc(nindices * sizeof(IndDesc));
2242 for (i = 0, idcur = *Idesc; i < nindices; i++, idcur++)
2245 SearchSysCacheTuple(INDEXRELID,
2246 ObjectIdGetDatum(Irel[i]->rd_id),
2249 idcur->tform = (IndexTupleForm) GETSTRUCT(pgIndexTup);
2250 for (attnumP = &(idcur->tform->indkey[0]), natts = 0;
2251 *attnumP != InvalidAttrNumber && natts != INDEX_MAX_KEYS;
2252 attnumP++, natts++);
2253 if (idcur->tform->indproc != InvalidOid)
2255 idcur->finfoP = &(idcur->finfo);
2256 FIgetnArgs(idcur->finfoP) = natts;
2258 FIgetProcOid(idcur->finfoP) = idcur->tform->indproc;
2259 *(FIgetname(idcur->finfoP)) = '\0';
2262 idcur->finfoP = (FuncIndexInfo *) NULL;
2264 idcur->natts = natts;
2271 vc_enough_space(VPageDescr vpd, Size len)
2274 len = DOUBLEALIGN(len);
2276 if (len > vpd->vpd_free)
2279 if (vpd->vpd_nusd < vpd->vpd_noff) /* there are free itemid(s) */
2280 return (true); /* and len <= free_space */
2282 /* ok. noff_usd >= noff_free and so we'll have to allocate new itemid */
2283 if (len <= vpd->vpd_free - sizeof(ItemIdData))
2288 } /* vc_enough_space */