1 /*-------------------------------------------------------------------------
4 * the postgres vacuum cleaner
6 * Copyright (c) 1994, Regents of the University of California
10 * $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.62 1998/02/25 23:40:32 vadim Exp $
12 *-------------------------------------------------------------------------
14 #include <sys/types.h>
24 #include <utils/portal.h>
25 #include <access/genam.h>
26 #include <access/heapam.h>
27 #include <access/xact.h>
28 #include <storage/bufmgr.h>
29 #include <access/transam.h>
30 #include <catalog/pg_index.h>
31 #include <catalog/index.h>
32 #include <catalog/catname.h>
33 #include <catalog/catalog.h>
34 #include <catalog/pg_class.h>
35 #include <catalog/pg_proc.h>
36 #include <catalog/pg_statistic.h>
37 #include <catalog/pg_type.h>
38 #include <catalog/pg_operator.h>
39 #include <parser/parse_oper.h>
40 #include <storage/smgr.h>
41 #include <storage/lmgr.h>
42 #include <utils/inval.h>
43 #include <utils/mcxt.h>
44 #include <utils/inval.h>
45 #include <utils/syscache.h>
46 #include <utils/builtins.h>
47 #include <commands/vacuum.h>
48 #include <storage/bufpage.h>
49 #include "storage/shmem.h"
50 #ifndef HAVE_GETRUSAGE
51 #include <rusagestub.h>
54 #include <sys/resource.h>
57 /* #include <port-protos.h> */ /* Why? */
59 extern int BlowawayRelationBuffers(Relation rdesc, BlockNumber block);
61 bool VacuumRunning = false;
63 static Portal vc_portal;
65 static int MESSAGE_LEVEL; /* message level */
67 #define swapLong(a,b) {long tmp; tmp=a; a=b; b=tmp;}
68 #define swapInt(a,b) {int tmp; tmp=a; a=b; b=tmp;}
69 #define swapDatum(a,b) {Datum tmp; tmp=a; a=b; b=tmp;}
70 #define VacAttrStatsEqValid(stats) ( stats->f_cmpeq.fn_addr != NULL )
71 #define VacAttrStatsLtGtValid(stats) ( stats->f_cmplt.fn_addr != NULL && \
72 stats->f_cmpgt.fn_addr != NULL && \
73 RegProcedureIsValid(stats->outfunc) )
76 /* non-export function prototypes */
77 static void vc_init(void);
78 static void vc_shutdown(void);
79 static void vc_vacuum(NameData *VacRelP, bool analyze, List *va_cols);
80 static VRelList vc_getrels(NameData *VacRelP);
81 static void vc_vacone(Oid relid, bool analyze, List *va_cols);
82 static void vc_scanheap(VRelStats *vacrelstats, Relation onerel, VPageList Vvpl, VPageList Fvpl);
83 static void vc_rpfheap(VRelStats *vacrelstats, Relation onerel, VPageList Vvpl, VPageList Fvpl, int nindices, Relation *Irel);
84 static void vc_vacheap(VRelStats *vacrelstats, Relation onerel, VPageList vpl);
85 static void vc_vacpage(Page page, VPageDescr vpd);
86 static void vc_vaconeind(VPageList vpl, Relation indrel, int nhtups);
87 static void vc_scanoneind(Relation indrel, int nhtups);
88 static void vc_attrstats(Relation onerel, VRelStats *vacrelstats, HeapTuple htup);
89 static void vc_bucketcpy(AttributeTupleForm attr, Datum value, Datum *bucket, int16 *bucket_len);
90 static void vc_updstats(Oid relid, int npages, int ntups, bool hasindex, VRelStats *vacrelstats);
91 static void vc_delhilowstats(Oid relid, int attcnt, int *attnums);
92 static void vc_setpagelock(Relation rel, BlockNumber blkno);
93 static VPageDescr vc_tidreapped(ItemPointer itemptr, VPageList vpl);
94 static void vc_reappage(VPageList vpl, VPageDescr vpc);
95 static void vc_vpinsert(VPageList vpl, VPageDescr vpnew);
96 static void vc_free(VRelList vrl);
97 static void vc_getindices(Oid relid, int *nindices, Relation **Irel);
98 static void vc_clsindices(int nindices, Relation *Irel);
99 static void vc_mkindesc(Relation onerel, int nindices, Relation *Irel, IndDesc **Idesc);
100 static char *vc_find_eq(char *bot, int nelem, int size, char *elm, int (*compar) (char *, char *));
101 static int vc_cmp_blk(char *left, char *right);
102 static int vc_cmp_offno(char *left, char *right);
103 static bool vc_enough_space(VPageDescr vpd, Size len);
106 vacuum(char *vacrel, bool verbose, bool analyze, List *va_spec)
110 PortalVariableMemory pmem;
116 * Create a portal for safe memory across transctions. We need to
117 * palloc the name space for it because our hash function expects the
118 * name to be on a longword boundary. CreatePortal copies the name to
119 * safe storage for us.
121 pname = (char *) palloc(strlen(VACPNAME) + 1);
122 strcpy(pname, VACPNAME);
123 vc_portal = CreatePortal(pname);
127 MESSAGE_LEVEL = NOTICE;
129 MESSAGE_LEVEL = DEBUG;
131 /* vacrel gets de-allocated on transaction commit */
133 strcpy(VacRel.data, vacrel);
135 pmem = PortalGetVariableMemory(vc_portal);
136 old = MemoryContextSwitchTo((MemoryContext) pmem);
138 if (va_spec != NIL && !analyze)
139 elog(ERROR,"Can't vacuum columns, only tables. You can 'vacuum analyze' columns.");
143 char *col = (char *) lfirst(le);
146 dest = (char *) palloc(strlen(col) + 1);
148 va_cols = lappend(va_cols, dest);
150 MemoryContextSwitchTo(old);
152 /* initialize vacuum cleaner */
155 /* vacuum the database */
157 vc_vacuum(&VacRel, analyze, va_cols);
159 vc_vacuum(NULL, analyze, NIL);
161 PortalDestroy(&vc_portal);
168 * vc_init(), vc_shutdown() -- start up and shut down the vacuum cleaner.
170 * We run exactly one vacuum cleaner at a time. We use the file system
171 * to guarantee an exclusive lock on vacuuming, since a single vacuum
172 * cleaner instantiation crosses transaction boundaries, and we'd lose
173 * postgres-style locks at the end of every transaction.
175 * The strangeness with committing and starting transactions in the
176 * init and shutdown routines is due to the fact that the vacuum cleaner
177 * is invoked via a sql command, and so is already executing inside
178 * a transaction. We need to leave ourselves in a predictable state
179 * on entry and exit to the vacuum cleaner. We commit the transaction
180 * started in PostgresMain() inside vc_init(), and start one in
181 * vc_shutdown() to match the commit waiting for us back in
189 if ((fd = open("pg_vlock", O_CREAT | O_EXCL, 0600)) < 0)
190 elog(ERROR, "can't create lock file -- another vacuum cleaner running?");
195 * By here, exclusive open on the lock file succeeded. If we abort
196 * for any reason during vacuuming, we need to remove the lock file.
197 * This global variable is checked in the transaction manager on xact
198 * abort, and the routine vc_abort() is called if necessary.
201 VacuumRunning = true;
203 /* matches the StartTransaction in PostgresMain() */
204 CommitTransactionCommand();
210 /* on entry, not in a transaction */
211 if (unlink("pg_vlock") < 0)
212 elog(ERROR, "vacuum: can't destroy lock file!");
214 /* okay, we're done */
215 VacuumRunning = false;
217 /* matches the CommitTransaction in PostgresMain() */
218 StartTransactionCommand();
225 /* on abort, remove the vacuum cleaner lock file */
228 VacuumRunning = false;
232 * vc_vacuum() -- vacuum the database.
234 * This routine builds a list of relations to vacuum, and then calls
235 * code that vacuums them one at a time. We are careful to vacuum each
236 * relation in a separate transaction in order to avoid holding too many
240 vc_vacuum(NameData *VacRelP, bool analyze, List *va_cols)
245 /* get list of relations */
246 vrl = vc_getrels(VacRelP);
248 if (analyze && VacRelP == NULL && vrl != NULL)
249 vc_delhilowstats(InvalidOid, 0, NULL);
251 /* vacuum each heap relation */
252 for (cur = vrl; cur != (VRelList) NULL; cur = cur->vrl_next)
253 vc_vacone(cur->vrl_relid, analyze, va_cols);
259 vc_getrels(NameData *VacRelP)
263 HeapScanDesc pgcscan;
266 PortalVariableMemory portalmem;
277 StartTransactionCommand();
281 ScanKeyEntryInitialize(&pgckey, 0x0, Anum_pg_class_relname,
282 NameEqualRegProcedure,
283 PointerGetDatum(VacRelP->data));
287 ScanKeyEntryInitialize(&pgckey, 0x0, Anum_pg_class_relkind,
288 CharacterEqualRegProcedure, CharGetDatum('r'));
291 portalmem = PortalGetVariableMemory(vc_portal);
292 vrl = cur = (VRelList) NULL;
294 pgclass = heap_openr(RelationRelationName);
295 pgcdesc = RelationGetTupleDescriptor(pgclass);
297 pgcscan = heap_beginscan(pgclass, false, false, 1, &pgckey);
299 while (HeapTupleIsValid(pgctup = heap_getnext(pgcscan, 0, &buf)))
304 d = heap_getattr(pgctup, Anum_pg_class_relname, pgcdesc, &n);
308 * don't vacuum large objects for now - something breaks when we
311 if ((strlen(rname) >= 5) && rname[0] == 'x' &&
312 rname[1] == 'i' && rname[2] == 'n' &&
313 (rname[3] == 'v' || rname[3] == 'x') &&
314 rname[4] >= '0' && rname[4] <= '9')
316 elog(NOTICE, "Rel %s: can't vacuum LargeObjects now",
322 d = heap_getattr(pgctup, Anum_pg_class_relkind, pgcdesc, &n);
324 rkind = DatumGetChar(d);
326 /* skip system relations */
330 elog(NOTICE, "Vacuum: can not process index and certain system tables");
334 /* get a relation list entry for this guy */
335 old = MemoryContextSwitchTo((MemoryContext) portalmem);
336 if (vrl == (VRelList) NULL)
338 vrl = cur = (VRelList) palloc(sizeof(VRelListData));
342 cur->vrl_next = (VRelList) palloc(sizeof(VRelListData));
345 MemoryContextSwitchTo(old);
347 cur->vrl_relid = pgctup->t_oid;
348 cur->vrl_next = (VRelList) NULL;
350 /* wei hates it if you forget to do this */
354 elog(NOTICE, "Vacuum: table not found");
357 heap_endscan(pgcscan);
360 CommitTransactionCommand();
366 * vc_vacone() -- vacuum one heap relation
368 * This routine vacuums a single heap, cleans out its indices, and
369 * updates its statistics npages and ntups statistics.
371 * Doing one heap at a time incurs extra overhead, since we need to
372 * check that the heap exists again just before we vacuum it. The
373 * reason that we do this is so that vacuuming can be spread across
374 * many small transactions. Otherwise, two-phase locking would require
375 * us to lock the entire database during one pass of the vacuum cleaner.
378 vc_vacone(Oid relid, bool analyze, List *va_cols)
385 HeapScanDesc pgcscan;
388 VPageListData Vvpl; /* List of pages to vacuum and/or clean
390 VPageListData Fvpl; /* List of pages with space enough for
396 VRelStats *vacrelstats;
398 StartTransactionCommand();
400 ScanKeyEntryInitialize(&pgckey, 0x0, ObjectIdAttributeNumber,
401 ObjectIdEqualRegProcedure,
402 ObjectIdGetDatum(relid));
404 pgclass = heap_openr(RelationRelationName);
405 pgcdesc = RelationGetTupleDescriptor(pgclass);
406 pgcscan = heap_beginscan(pgclass, false, false, 1, &pgckey);
409 * Race condition -- if the pg_class tuple has gone away since the
410 * last time we saw it, we don't need to vacuum it.
413 if (!HeapTupleIsValid(pgctup = heap_getnext(pgcscan, 0, &pgcbuf)))
415 heap_endscan(pgcscan);
417 CommitTransactionCommand();
421 /* now open the class and vacuum it */
422 onerel = heap_open(relid);
424 vacrelstats = (VRelStats *) palloc(sizeof(VRelStats));
425 vacrelstats->relid = relid;
426 vacrelstats->npages = vacrelstats->ntups = 0;
427 vacrelstats->hasindex = false;
428 if (analyze && !IsSystemRelationName((RelationGetRelationName(onerel))->data))
432 AttributeTupleForm *attr;
434 attr_cnt = onerel->rd_att->natts;
435 attr = onerel->rd_att->attrs;
442 if (length(va_cols) > attr_cnt)
443 elog(ERROR, "vacuum: too many attributes specified for relation %s",
444 (RelationGetRelationName(onerel))->data);
445 attnums = (int *) palloc(attr_cnt * sizeof(int));
448 char *col = (char *) lfirst(le);
450 for (i = 0; i < attr_cnt; i++)
452 if (namestrcmp(&(attr[i]->attname), col) == 0)
455 if (i < attr_cnt) /* found */
459 elog(ERROR, "vacuum: there is no attribute %s in %s",
460 col, (RelationGetRelationName(onerel))->data);
466 vacrelstats->vacattrstats =
467 (VacAttrStats *) palloc(attr_cnt * sizeof(VacAttrStats));
469 for (i = 0; i < attr_cnt; i++)
471 Operator func_operator;
472 OperatorTupleForm pgopform;
475 stats = &vacrelstats->vacattrstats[i];
476 stats->attr = palloc(ATTRIBUTE_TUPLE_SIZE);
477 memmove(stats->attr, attr[((attnums) ? attnums[i] : i)], ATTRIBUTE_TUPLE_SIZE);
478 stats->best = stats->guess1 = stats->guess2 = 0;
479 stats->max = stats->min = 0;
480 stats->best_len = stats->guess1_len = stats->guess2_len = 0;
481 stats->max_len = stats->min_len = 0;
482 stats->initialized = false;
483 stats->best_cnt = stats->guess1_cnt = stats->guess1_hits = stats->guess2_hits = 0;
484 stats->max_cnt = stats->min_cnt = stats->null_cnt = stats->nonnull_cnt = 0;
486 func_operator = oper("=", stats->attr->atttypid, stats->attr->atttypid, true);
487 if (func_operator != NULL)
489 pgopform = (OperatorTupleForm) GETSTRUCT(func_operator);
490 fmgr_info(pgopform->oprcode, &(stats->f_cmpeq));
493 stats->f_cmpeq.fn_addr = NULL;
495 func_operator = oper("<", stats->attr->atttypid, stats->attr->atttypid, true);
496 if (func_operator != NULL)
498 pgopform = (OperatorTupleForm) GETSTRUCT(func_operator);
499 fmgr_info(pgopform->oprcode, &(stats->f_cmplt));
502 stats->f_cmplt.fn_addr = NULL;
504 func_operator = oper(">", stats->attr->atttypid, stats->attr->atttypid, true);
505 if (func_operator != NULL)
507 pgopform = (OperatorTupleForm) GETSTRUCT(func_operator);
508 fmgr_info(pgopform->oprcode, &(stats->f_cmpgt));
511 stats->f_cmpgt.fn_addr = NULL;
513 pgttup = SearchSysCacheTuple(TYPOID,
514 ObjectIdGetDatum(stats->attr->atttypid),
516 if (HeapTupleIsValid(pgttup))
517 stats->outfunc = ((TypeTupleForm) GETSTRUCT(pgttup))->typoutput;
519 stats->outfunc = InvalidOid;
521 vacrelstats->va_natts = attr_cnt;
522 vc_delhilowstats(relid, ((attnums) ? attr_cnt : 0), attnums);
528 vacrelstats->va_natts = 0;
529 vacrelstats->vacattrstats = (VacAttrStats *) NULL;
532 /* we require the relation to be locked until the indices are cleaned */
533 RelationSetLockForWrite(onerel);
536 Vvpl.vpl_npages = Fvpl.vpl_npages = 0;
537 vc_scanheap(vacrelstats, onerel, &Vvpl, &Fvpl);
539 /* Now open indices */
540 Irel = (Relation *) NULL;
541 vc_getindices(vacrelstats->relid, &nindices, &Irel);
544 vacrelstats->hasindex = true;
546 vacrelstats->hasindex = false;
548 /* Clean/scan index relation(s) */
549 if (Irel != (Relation *) NULL)
551 if (Vvpl.vpl_npages > 0)
553 for (i = 0; i < nindices; i++)
554 vc_vaconeind(&Vvpl, Irel[i], vacrelstats->ntups);
557 /* just scan indices to update statistic */
559 for (i = 0; i < nindices; i++)
560 vc_scanoneind(Irel[i], vacrelstats->ntups);
564 if (Fvpl.vpl_npages > 0) /* Try to shrink heap */
565 vc_rpfheap(vacrelstats, onerel, &Vvpl, &Fvpl, nindices, Irel);
568 if (Irel != (Relation *) NULL)
569 vc_clsindices(nindices, Irel);
570 if (Vvpl.vpl_npages > 0)/* Clean pages from Vvpl list */
571 vc_vacheap(vacrelstats, onerel, &Vvpl);
574 /* ok - free Vvpl list of reapped pages */
575 if (Vvpl.vpl_npages > 0)
577 vpp = Vvpl.vpl_pgdesc;
578 for (i = 0; i < Vvpl.vpl_npages; i++, vpp++)
580 pfree(Vvpl.vpl_pgdesc);
581 if (Fvpl.vpl_npages > 0)
582 pfree(Fvpl.vpl_pgdesc);
585 /* all done with this class */
587 heap_endscan(pgcscan);
590 /* update statistics in pg_class */
591 vc_updstats(vacrelstats->relid, vacrelstats->npages, vacrelstats->ntups,
592 vacrelstats->hasindex, vacrelstats);
594 /* next command frees attribute stats */
596 CommitTransactionCommand();
600 * vc_scanheap() -- scan an open heap relation
602 * This routine sets commit times, constructs Vvpl list of
603 * empty/uninitialized pages and pages with dead tuples and
604 * ~LP_USED line pointers, constructs Fvpl list of pages
605 * appropriate for purposes of shrinking and maintains statistics
606 * on the number of live tuples in a heap.
609 vc_scanheap(VRelStats *vacrelstats, Relation onerel,
610 VPageList Vvpl, VPageList Fvpl)
639 Size min_tlen = MAXTUPLEN;
641 int32 i /* , attr_cnt */ ;
644 bool do_shrinking = true;
646 getrusage(RUSAGE_SELF, &ru0);
648 nvac = ntups = nunused = ncrash = nempg = nnepg = nchpg = nemend = 0;
651 relname = (RelationGetRelationName(onerel))->data;
653 nblocks = RelationGetNumberOfBlocks(onerel);
655 vpc = (VPageDescr) palloc(sizeof(VPageDescrData) + MaxOffsetNumber * sizeof(OffsetNumber));
658 for (blkno = 0; blkno < nblocks; blkno++)
660 buf = ReadBuffer(onerel, blkno);
661 page = BufferGetPage(buf);
662 vpc->vpd_blkno = blkno;
667 elog(NOTICE, "Rel %s: Uninitialized page %u - fixing",
669 PageInit(page, BufferGetPageSize(buf), 0);
670 vpc->vpd_free = ((PageHeader) page)->pd_upper - ((PageHeader) page)->pd_lower;
671 frsize += (vpc->vpd_free - sizeof(ItemIdData));
674 vc_reappage(Vvpl, vpc);
679 if (PageIsEmpty(page))
681 vpc->vpd_free = ((PageHeader) page)->pd_upper - ((PageHeader) page)->pd_lower;
682 frsize += (vpc->vpd_free - sizeof(ItemIdData));
685 vc_reappage(Vvpl, vpc);
692 maxoff = PageGetMaxOffsetNumber(page);
693 for (offnum = FirstOffsetNumber;
695 offnum = OffsetNumberNext(offnum))
697 itemid = PageGetItemId(page, offnum);
700 * Collect un-used items too - it's possible to have indices
701 * pointing here after crash.
703 if (!ItemIdIsUsed(itemid))
705 vpc->vpd_voff[vpc->vpd_noff++] = offnum;
710 htup = (HeapTuple) PageGetItem(page, itemid);
713 if (!(htup->t_infomask & HEAP_XMIN_COMMITTED))
715 if (htup->t_infomask & HEAP_XMIN_INVALID)
719 if (TransactionIdDidAbort(htup->t_xmin))
721 else if (TransactionIdDidCommit(htup->t_xmin))
723 htup->t_infomask |= HEAP_XMIN_COMMITTED;
726 else if (!TransactionIdIsInProgress(htup->t_xmin))
729 * Not Aborted, Not Committed, Not in Progress -
730 * so it's from crashed process. - vadim 11/26/96
737 elog(NOTICE, "Rel %s: TID %u/%u: InsertTransactionInProgress %u - can't shrink relation",
738 relname, blkno, offnum, htup->t_xmin);
739 do_shrinking = false;
745 * here we are concerned about tuples with xmin committed
746 * and xmax unknown or committed
748 if (htup->t_infomask & HEAP_XMIN_COMMITTED &&
749 !(htup->t_infomask & HEAP_XMAX_INVALID))
751 if (htup->t_infomask & HEAP_XMAX_COMMITTED)
753 else if (TransactionIdDidAbort(htup->t_xmax))
755 htup->t_infomask |= HEAP_XMAX_INVALID;
758 else if (TransactionIdDidCommit(htup->t_xmax))
760 else if (!TransactionIdIsInProgress(htup->t_xmax))
763 * Not Aborted, Not Committed, Not in Progress - so it
764 * from crashed process. - vadim 06/02/97
766 htup->t_infomask |= HEAP_XMAX_INVALID;;
771 elog(NOTICE, "Rel %s: TID %u/%u: DeleteTransactionInProgress %u - can't shrink relation",
772 relname, blkno, offnum, htup->t_xmax);
773 do_shrinking = false;
778 * It's possibly! But from where it comes ? And should we fix
779 * it ? - vadim 11/28/96
781 itemptr = &(htup->t_ctid);
782 if (!ItemPointerIsValid(itemptr) ||
783 BlockIdGetBlockNumber(&(itemptr->ip_blkid)) != blkno)
785 elog(NOTICE, "Rel %s: TID %u/%u: TID IN TUPLEHEADER %u/%u IS NOT THE SAME. TUPGONE %d.",
786 relname, blkno, offnum,
787 BlockIdGetBlockNumber(&(itemptr->ip_blkid)),
788 itemptr->ip_posid, tupgone);
794 if (htup->t_len != itemid->lp_len)
796 elog(NOTICE, "Rel %s: TID %u/%u: TUPLE_LEN IN PAGEHEADER %u IS NOT THE SAME AS IN TUPLEHEADER %u. TUPGONE %d.",
797 relname, blkno, offnum,
798 itemid->lp_len, htup->t_len, tupgone);
800 if (!OidIsValid(htup->t_oid))
802 elog(NOTICE, "Rel %s: TID %u/%u: OID IS INVALID. TUPGONE %d.",
803 relname, blkno, offnum, tupgone);
810 if (tempPage == (Page) NULL)
814 pageSize = PageGetPageSize(page);
815 tempPage = (Page) palloc(pageSize);
816 memmove(tempPage, page, pageSize);
819 lpp = &(((PageHeader) tempPage)->pd_linp[offnum - 1]);
822 lpp->lp_flags &= ~LP_USED;
824 vpc->vpd_voff[vpc->vpd_noff++] = offnum;
832 if (htup->t_len < min_tlen)
833 min_tlen = htup->t_len;
834 if (htup->t_len > max_tlen)
835 max_tlen = htup->t_len;
836 vc_attrstats(onerel, vacrelstats, htup);
848 if (tempPage != (Page) NULL)
849 { /* Some tuples are gone */
850 PageRepairFragmentation(tempPage);
851 vpc->vpd_free = ((PageHeader) tempPage)->pd_upper - ((PageHeader) tempPage)->pd_lower;
852 frsize += vpc->vpd_free;
853 vc_reappage(Vvpl, vpc);
855 tempPage = (Page) NULL;
857 else if (vpc->vpd_noff > 0)
858 { /* there are only ~LP_USED line pointers */
859 vpc->vpd_free = ((PageHeader) page)->pd_upper - ((PageHeader) page)->pd_lower;
860 frsize += vpc->vpd_free;
861 vc_reappage(Vvpl, vpc);
873 /* save stats in the rel list for use later */
874 vacrelstats->ntups = ntups;
875 vacrelstats->npages = nblocks;
876 /* vacrelstats->natts = attr_cnt;*/
878 min_tlen = max_tlen = 0;
879 vacrelstats->min_tlen = min_tlen;
880 vacrelstats->max_tlen = max_tlen;
882 Vvpl->vpl_nemend = nemend;
883 Fvpl->vpl_nemend = nemend;
886 * Try to make Fvpl keeping in mind that we can't use free space of
887 * "empty" end-pages and last page if it reapped.
889 if (do_shrinking && Vvpl->vpl_npages - nemend > 0)
891 int nusf; /* blocks usefull for re-using */
893 nusf = Vvpl->vpl_npages - nemend;
894 if ((Vvpl->vpl_pgdesc[nusf - 1])->vpd_blkno == nblocks - nemend - 1)
897 for (i = 0; i < nusf; i++)
899 vp = Vvpl->vpl_pgdesc[i];
900 if (vc_enough_space(vp, min_tlen))
902 vc_vpinsert(Fvpl, vp);
903 frsusf += vp->vpd_free;
908 getrusage(RUSAGE_SELF, &ru1);
910 elog(MESSAGE_LEVEL, "Rel %s: Pages %u: Changed %u, Reapped %u, Empty %u, New %u; \
911 Tup %u: Vac %u, Crash %u, UnUsed %u, MinLen %u, MaxLen %u; Re-using: Free/Avail. Space %u/%u; EndEmpty/Avail. Pages %u/%u. Elapsed %u/%u sec.",
913 nblocks, nchpg, Vvpl->vpl_npages, nempg, nnepg,
914 ntups, nvac, ncrash, nunused, min_tlen, max_tlen,
915 frsize, frsusf, nemend, Fvpl->vpl_npages,
916 ru1.ru_stime.tv_sec - ru0.ru_stime.tv_sec,
917 ru1.ru_utime.tv_sec - ru0.ru_utime.tv_sec);
923 * vc_rpfheap() -- try to repaire relation' fragmentation
925 * This routine marks dead tuples as unused and tries re-use dead space
926 * by moving tuples (and inserting indices if needed). It constructs
927 * Nvpl list of free-ed pages (moved tuples) and clean indices
928 * for them after committing (in hack-manner - without losing locks
929 * and freeing memory!) current transaction. It truncates relation
930 * if some end-blocks are gone away.
933 vc_rpfheap(VRelStats *vacrelstats, Relation onerel,
934 VPageList Vvpl, VPageList Fvpl, int nindices, Relation *Irel)
944 OffsetNumber offnum = 0,
952 TupleDesc tupdesc = NULL;
953 Datum *idatum = NULL;
955 InsertIndexResult iresult;
957 VPageDescr ToVpd = NULL,
979 getrusage(RUSAGE_SELF, &ru0);
981 myXID = GetCurrentTransactionId();
982 myCID = GetCurrentCommandId();
984 if (Irel != (Relation *) NULL) /* preparation for index' inserts */
986 vc_mkindesc(onerel, nindices, Irel, &Idesc);
987 tupdesc = RelationGetTupleDescriptor(onerel);
988 idatum = (Datum *) palloc(INDEX_MAX_KEYS * sizeof(*idatum));
989 inulls = (char *) palloc(INDEX_MAX_KEYS * sizeof(*inulls));
993 Fnpages = Fvpl->vpl_npages;
994 Fvplast = Fvpl->vpl_pgdesc[Fnpages - 1];
995 Fblklast = Fvplast->vpd_blkno;
996 Assert(Vvpl->vpl_npages > Vvpl->vpl_nemend);
997 Vnpages = Vvpl->vpl_npages - Vvpl->vpl_nemend;
998 Vvplast = Vvpl->vpl_pgdesc[Vnpages - 1];
999 Vblklast = Vvplast->vpd_blkno;
1000 Assert(Vblklast >= Fblklast);
1001 ToBuf = InvalidBuffer;
1004 vpc = (VPageDescr) palloc(sizeof(VPageDescrData) + MaxOffsetNumber * sizeof(OffsetNumber));
1005 vpc->vpd_nusd = vpc->vpd_noff = 0;
1007 nblocks = vacrelstats->npages;
1008 for (blkno = nblocks - Vvpl->vpl_nemend - 1;; blkno--)
1010 /* if it's reapped page and it was used by me - quit */
1011 if (blkno == Fblklast && Fvplast->vpd_nusd > 0)
1014 buf = ReadBuffer(onerel, blkno);
1015 page = BufferGetPage(buf);
1019 isempty = PageIsEmpty(page);
1022 if (blkno == Vblklast) /* it's reapped page */
1024 if (Vvplast->vpd_noff > 0) /* there are dead tuples */
1025 { /* on this page - clean */
1027 vc_vacpage(page, Vvplast);
1035 Assert(Vnpages > 0);
1036 /* get prev reapped page from Vvpl */
1037 Vvplast = Vvpl->vpl_pgdesc[Vnpages - 1];
1038 Vblklast = Vvplast->vpd_blkno;
1039 if (blkno == Fblklast) /* this page in Fvpl too */
1042 Assert(Fnpages > 0);
1043 Assert(Fvplast->vpd_nusd == 0);
1044 /* get prev reapped page from Fvpl */
1045 Fvplast = Fvpl->vpl_pgdesc[Fnpages - 1];
1046 Fblklast = Fvplast->vpd_blkno;
1048 Assert(Fblklast <= Vblklast);
1060 vpc->vpd_blkno = blkno;
1061 maxoff = PageGetMaxOffsetNumber(page);
1062 for (offnum = FirstOffsetNumber;
1064 offnum = OffsetNumberNext(offnum))
1066 itemid = PageGetItemId(page, offnum);
1068 if (!ItemIdIsUsed(itemid))
1071 htup = (HeapTuple) PageGetItem(page, itemid);
1074 /* try to find new page for this tuple */
1075 if (ToBuf == InvalidBuffer ||
1076 !vc_enough_space(ToVpd, tlen))
1078 if (ToBuf != InvalidBuffer)
1081 ToBuf = InvalidBuffer;
1084 * If no one tuple can't be added to this page -
1085 * remove page from Fvpl. - vadim 11/27/96
1087 * But we can't remove last page - this is our
1088 * "show-stopper" !!! - vadim 02/25/98
1090 if (ToVpd != Fvplast &&
1091 !vc_enough_space(ToVpd, vacrelstats->min_tlen))
1093 Assert(Fnpages > ToVpI + 1);
1094 memmove(Fvpl->vpl_pgdesc + ToVpI,
1095 Fvpl->vpl_pgdesc + ToVpI + 1,
1096 sizeof(VPageDescr *) * (Fnpages - ToVpI - 1));
1098 Assert (Fvplast == Fvpl->vpl_pgdesc[Fnpages - 1]);
1101 for (i = 0; i < Fnpages; i++)
1103 if (vc_enough_space(Fvpl->vpl_pgdesc[i], tlen))
1107 break; /* can't move item anywhere */
1109 ToVpd = Fvpl->vpl_pgdesc[ToVpI];
1110 ToBuf = ReadBuffer(onerel, ToVpd->vpd_blkno);
1111 ToPage = BufferGetPage(ToBuf);
1112 /* if this page was not used before - clean it */
1113 if (!PageIsEmpty(ToPage) && ToVpd->vpd_nusd == 0)
1114 vc_vacpage(ToPage, ToVpd);
1118 newtup = (HeapTuple) palloc(tlen);
1119 memmove((char *) newtup, (char *) htup, tlen);
1121 /* store transaction information */
1122 TransactionIdStore(myXID, &(newtup->t_xmin));
1123 newtup->t_cmin = myCID;
1124 StoreInvalidTransactionId(&(newtup->t_xmax));
1125 /* set xmin to unknown and xmax to invalid */
1126 newtup->t_infomask &= ~(HEAP_XACT_MASK);
1127 newtup->t_infomask |= HEAP_XMAX_INVALID;
1129 /* add tuple to the page */
1130 newoff = PageAddItem(ToPage, (Item) newtup, tlen,
1131 InvalidOffsetNumber, LP_USED);
1132 if (newoff == InvalidOffsetNumber)
1135 failed to add item with len = %u to page %u (free space %u, nusd %u, noff %u)",
1136 tlen, ToVpd->vpd_blkno, ToVpd->vpd_free,
1137 ToVpd->vpd_nusd, ToVpd->vpd_noff);
1139 newitemid = PageGetItemId(ToPage, newoff);
1141 newtup = (HeapTuple) PageGetItem(ToPage, newitemid);
1142 ItemPointerSet(&(newtup->t_ctid), ToVpd->vpd_blkno, newoff);
1144 /* now logically delete end-tuple */
1145 TransactionIdStore(myXID, &(htup->t_xmax));
1146 htup->t_cmax = myCID;
1147 /* set xmax to unknown */
1148 htup->t_infomask &= ~(HEAP_XMAX_INVALID | HEAP_XMAX_COMMITTED);
1152 ToVpd->vpd_free = ((PageHeader) ToPage)->pd_upper - ((PageHeader) ToPage)->pd_lower;
1153 vpc->vpd_voff[vpc->vpd_noff++] = offnum;
1155 /* insert index' tuples if needed */
1156 if (Irel != (Relation *) NULL)
1158 for (i = 0, idcur = Idesc; i < nindices; i++, idcur++)
1162 (AttrNumber *) &(idcur->tform->indkey[0]),
1169 iresult = index_insert(
1180 } /* walk along page */
1182 if (vpc->vpd_noff > 0) /* some tuples were moved */
1184 vc_reappage(&Nvpl, vpc);
1192 if (offnum <= maxoff)
1193 break; /* some item(s) left */
1195 } /* walk along relation */
1197 blkno++; /* new number of blocks */
1199 if (ToBuf != InvalidBuffer)
1209 * We have to commit our tuple' movings before we'll truncate
1210 * relation, but we shouldn't lose our locks. And so - quick hack:
1211 * flush buffers and record status of current transaction as
1212 * committed, and continue. - vadim 11/13/96
1214 FlushBufferPool(!TransactionFlushEnabled());
1215 TransactionIdCommit(myXID);
1216 FlushBufferPool(!TransactionFlushEnabled());
1220 * Clean uncleaned reapped pages from Vvpl list and set xmin committed
1221 * for inserted tuples
1224 for (i = 0, vpp = Vvpl->vpl_pgdesc; i < Vnpages; i++, vpp++)
1226 Assert((*vpp)->vpd_blkno < blkno);
1227 buf = ReadBuffer(onerel, (*vpp)->vpd_blkno);
1228 page = BufferGetPage(buf);
1229 if ((*vpp)->vpd_nusd == 0) /* this page was not used */
1233 * noff == 0 in empty pages only - such pages should be
1236 Assert((*vpp)->vpd_noff > 0);
1237 vc_vacpage(page, *vpp);
1240 /* this page was used */
1243 moff = PageGetMaxOffsetNumber(page);
1244 for (newoff = FirstOffsetNumber;
1246 newoff = OffsetNumberNext(newoff))
1248 itemid = PageGetItemId(page, newoff);
1249 if (!ItemIdIsUsed(itemid))
1251 htup = (HeapTuple) PageGetItem(page, itemid);
1252 if (TransactionIdEquals((TransactionId) htup->t_xmin, myXID))
1254 htup->t_infomask |= HEAP_XMIN_COMMITTED;
1258 Assert((*vpp)->vpd_nusd == ntups);
1263 Assert(nmoved == nchkmvd);
1265 getrusage(RUSAGE_SELF, &ru1);
1267 elog(MESSAGE_LEVEL, "Rel %s: Pages: %u --> %u; Tuple(s) moved: %u. \
1268 Elapsed %u/%u sec.",
1269 (RelationGetRelationName(onerel))->data,
1270 nblocks, blkno, nmoved,
1271 ru1.ru_stime.tv_sec - ru0.ru_stime.tv_sec,
1272 ru1.ru_utime.tv_sec - ru0.ru_utime.tv_sec);
1274 if (Nvpl.vpl_npages > 0)
1276 /* vacuum indices again if needed */
1277 if (Irel != (Relation *) NULL)
1283 /* re-sort Nvpl.vpl_pgdesc */
1284 for (vpleft = Nvpl.vpl_pgdesc,
1285 vpright = Nvpl.vpl_pgdesc + Nvpl.vpl_npages - 1;
1286 vpleft < vpright; vpleft++, vpright--)
1292 for (i = 0; i < nindices; i++)
1293 vc_vaconeind(&Nvpl, Irel[i], vacrelstats->ntups);
1297 * clean moved tuples from last page in Nvpl list if some tuples
1300 if (vpc->vpd_noff > 0 && offnum <= maxoff)
1302 Assert(vpc->vpd_blkno == blkno - 1);
1303 buf = ReadBuffer(onerel, vpc->vpd_blkno);
1304 page = BufferGetPage(buf);
1307 for (offnum = FirstOffsetNumber;
1309 offnum = OffsetNumberNext(offnum))
1311 itemid = PageGetItemId(page, offnum);
1312 if (!ItemIdIsUsed(itemid))
1314 htup = (HeapTuple) PageGetItem(page, itemid);
1315 Assert(TransactionIdEquals((TransactionId) htup->t_xmax, myXID));
1316 itemid->lp_flags &= ~LP_USED;
1319 Assert(vpc->vpd_noff == ntups);
1320 PageRepairFragmentation(page);
1324 /* now - free new list of reapped pages */
1325 vpp = Nvpl.vpl_pgdesc;
1326 for (i = 0; i < Nvpl.vpl_npages; i++, vpp++)
1328 pfree(Nvpl.vpl_pgdesc);
1331 /* truncate relation */
1332 if (blkno < nblocks)
1334 i = BlowawayRelationBuffers(onerel, blkno);
1336 elog (FATAL, "VACUUM (vc_rpfheap): BlowawayRelationBuffers returned %d", i);
1337 blkno = smgrtruncate(DEFAULT_SMGR, onerel, blkno);
1339 vacrelstats->npages = blkno; /* set new number of blocks */
1342 if (Irel != (Relation *) NULL) /* pfree index' allocations */
1347 vc_clsindices(nindices, Irel);
1355 * vc_vacheap() -- free dead tuples
1357 * This routine marks dead tuples as unused and truncates relation
1358 * if there are "empty" end-blocks.
1361 vc_vacheap(VRelStats *vacrelstats, Relation onerel, VPageList Vvpl)
1369 nblocks = Vvpl->vpl_npages;
1370 nblocks -= Vvpl->vpl_nemend; /* nothing to do with them */
1372 for (i = 0, vpp = Vvpl->vpl_pgdesc; i < nblocks; i++, vpp++)
1374 if ((*vpp)->vpd_noff > 0)
1376 buf = ReadBuffer(onerel, (*vpp)->vpd_blkno);
1377 page = BufferGetPage(buf);
1378 vc_vacpage(page, *vpp);
1383 /* truncate relation if there are some empty end-pages */
1384 if (Vvpl->vpl_nemend > 0)
1386 Assert(vacrelstats->npages >= Vvpl->vpl_nemend);
1387 nblocks = vacrelstats->npages - Vvpl->vpl_nemend;
1388 elog(MESSAGE_LEVEL, "Rel %s: Pages: %u --> %u.",
1389 (RelationGetRelationName(onerel))->data,
1390 vacrelstats->npages, nblocks);
1393 * we have to flush "empty" end-pages (if changed, but who knows
1394 * it) before truncation
1396 FlushBufferPool(!TransactionFlushEnabled());
1398 i = BlowawayRelationBuffers(onerel, nblocks);
1400 elog (FATAL, "VACUUM (vc_vacheap): BlowawayRelationBuffers returned %d", i);
1402 nblocks = smgrtruncate(DEFAULT_SMGR, onerel, nblocks);
1403 Assert(nblocks >= 0);
1404 vacrelstats->npages = nblocks; /* set new number of blocks */
1410 * vc_vacpage() -- free dead tuples on a page
1411 * and repaire its fragmentation.
1414 vc_vacpage(Page page, VPageDescr vpd)
1419 Assert(vpd->vpd_nusd == 0);
1420 for (i = 0; i < vpd->vpd_noff; i++)
1422 itemid = &(((PageHeader) page)->pd_linp[vpd->vpd_voff[i] - 1]);
1423 itemid->lp_flags &= ~LP_USED;
1425 PageRepairFragmentation(page);
1430 * _vc_scanoneind() -- scan one index relation to update statistic.
1434 vc_scanoneind(Relation indrel, int nhtups)
1436 RetrieveIndexResult res;
1437 IndexScanDesc iscan;
1443 getrusage(RUSAGE_SELF, &ru0);
1445 /* walk through the entire index */
1446 iscan = index_beginscan(indrel, false, 0, (ScanKey) NULL);
1449 while ((res = index_getnext(iscan, ForwardScanDirection))
1450 != (RetrieveIndexResult) NULL)
1456 index_endscan(iscan);
1458 /* now update statistics in pg_class */
1459 nipages = RelationGetNumberOfBlocks(indrel);
1460 vc_updstats(indrel->rd_id, nipages, nitups, false, NULL);
1462 getrusage(RUSAGE_SELF, &ru1);
1464 elog(MESSAGE_LEVEL, "Ind %s: Pages %u; Tuples %u. Elapsed %u/%u sec.",
1465 indrel->rd_rel->relname.data, nipages, nitups,
1466 ru1.ru_stime.tv_sec - ru0.ru_stime.tv_sec,
1467 ru1.ru_utime.tv_sec - ru0.ru_utime.tv_sec);
1469 if (nitups != nhtups)
1470 elog(NOTICE, "Ind %s: NUMBER OF INDEX' TUPLES (%u) IS NOT THE SAME AS HEAP' (%u)",
1471 indrel->rd_rel->relname.data, nitups, nhtups);
1473 } /* vc_scanoneind */
1476 * vc_vaconeind() -- vacuum one index relation.
1478 * Vpl is the VPageList of the heap we're currently vacuuming.
1479 * It's locked. Indrel is an index relation on the vacuumed heap.
1480 * We don't set locks on the index relation here, since the indexed
1481 * access methods support locking at different granularities.
1482 * We let them handle it.
1484 * Finally, we arrange to update the index relation's statistics in
1488 vc_vaconeind(VPageList vpl, Relation indrel, int nhtups)
1490 RetrieveIndexResult res;
1491 IndexScanDesc iscan;
1492 ItemPointer heapptr;
1500 getrusage(RUSAGE_SELF, &ru0);
1502 /* walk through the entire index */
1503 iscan = index_beginscan(indrel, false, 0, (ScanKey) NULL);
1507 while ((res = index_getnext(iscan, ForwardScanDirection))
1508 != (RetrieveIndexResult) NULL)
1510 heapptr = &res->heap_iptr;
1512 if ((vp = vc_tidreapped(heapptr, vpl)) != (VPageDescr) NULL)
1515 elog(DEBUG, "<%x,%x> -> <%x,%x>",
1516 ItemPointerGetBlockNumber(&(res->index_iptr)),
1517 ItemPointerGetOffsetNumber(&(res->index_iptr)),
1518 ItemPointerGetBlockNumber(&(res->heap_iptr)),
1519 ItemPointerGetOffsetNumber(&(res->heap_iptr)));
1521 if (vp->vpd_noff == 0)
1522 { /* this is EmptyPage !!! */
1523 elog(NOTICE, "Ind %s: pointer to EmptyPage (blk %u off %u) - fixing",
1524 indrel->rd_rel->relname.data,
1525 vp->vpd_blkno, ItemPointerGetOffsetNumber(heapptr));
1528 index_delete(indrel, &res->index_iptr);
1539 index_endscan(iscan);
1541 /* now update statistics in pg_class */
1542 nipages = RelationGetNumberOfBlocks(indrel);
1543 vc_updstats(indrel->rd_id, nipages, nitups, false, NULL);
1545 getrusage(RUSAGE_SELF, &ru1);
1547 elog(MESSAGE_LEVEL, "Ind %s: Pages %u; Tuples %u: Deleted %u. Elapsed %u/%u sec.",
1548 indrel->rd_rel->relname.data, nipages, nitups, nvac,
1549 ru1.ru_stime.tv_sec - ru0.ru_stime.tv_sec,
1550 ru1.ru_utime.tv_sec - ru0.ru_utime.tv_sec);
1552 if (nitups != nhtups)
1553 elog(NOTICE, "Ind %s: NUMBER OF INDEX' TUPLES (%u) IS NOT THE SAME AS HEAP' (%u)",
1554 indrel->rd_rel->relname.data, nitups, nhtups);
1556 } /* vc_vaconeind */
1559 * vc_tidreapped() -- is a particular tid reapped?
1561 * vpl->VPageDescr_array is sorted in right order.
1564 vc_tidreapped(ItemPointer itemptr, VPageList vpl)
1566 OffsetNumber ioffno;
1572 vpd.vpd_blkno = ItemPointerGetBlockNumber(itemptr);
1573 ioffno = ItemPointerGetOffsetNumber(itemptr);
1576 vpp = (VPageDescr *) vc_find_eq((char *) (vpl->vpl_pgdesc),
1577 vpl->vpl_npages, sizeof(VPageDescr), (char *) &vp,
1580 if (vpp == (VPageDescr *) NULL)
1581 return ((VPageDescr) NULL);
1584 /* ok - we are on true page */
1586 if (vp->vpd_noff == 0)
1587 { /* this is EmptyPage !!! */
1591 voff = (OffsetNumber *) vc_find_eq((char *) (vp->vpd_voff),
1592 vp->vpd_noff, sizeof(OffsetNumber), (char *) &ioffno,
1595 if (voff == (OffsetNumber *) NULL)
1596 return ((VPageDescr) NULL);
1600 } /* vc_tidreapped */
1603 * vc_attrstats() -- compute column statistics used by the optimzer
1605 * We compute the column min, max, null and non-null counts.
1606 * Plus we attempt to find the count of the value that occurs most
1607 * frequently in each column
1608 * These figures are used to compute the selectivity of the column
1610 * We use a three-bucked cache to get the most frequent item
1611 * The 'guess' buckets count hits. A cache miss causes guess1
1612 * to get the most hit 'guess' item in the most recent cycle, and
1613 * the new item goes into guess2. Whenever the total count of hits
1614 * of a 'guess' entry is larger than 'best', 'guess' becomes 'best'.
1616 * This method works perfectly for columns with unique values, and columns
1617 * with only two unique values, plus nulls.
1619 * It becomes less perfect as the number of unique values increases and
1620 * their distribution in the table becomes more random.
1624 vc_attrstats(Relation onerel, VRelStats *vacrelstats, HeapTuple htup)
1627 attr_cnt = vacrelstats->va_natts;
1628 VacAttrStats *vacattrstats = vacrelstats->vacattrstats;
1629 TupleDesc tupDesc = onerel->rd_att;
1633 for (i = 0; i < attr_cnt; i++)
1635 VacAttrStats *stats = &vacattrstats[i];
1636 bool value_hit = true;
1638 value = heap_getattr(htup,
1639 stats->attr->attnum, tupDesc, &isnull);
1641 if (!VacAttrStatsEqValid(stats))
1648 stats->nonnull_cnt++;
1649 if (stats->initialized == false)
1651 vc_bucketcpy(stats->attr, value, &stats->best, &stats->best_len);
1652 /* best_cnt gets incremented later */
1653 vc_bucketcpy(stats->attr, value, &stats->guess1, &stats->guess1_len);
1654 stats->guess1_cnt = stats->guess1_hits = 1;
1655 vc_bucketcpy(stats->attr, value, &stats->guess2, &stats->guess2_len);
1656 stats->guess2_hits = 1;
1657 if (VacAttrStatsLtGtValid(stats))
1659 vc_bucketcpy(stats->attr, value, &stats->max, &stats->max_len);
1660 vc_bucketcpy(stats->attr, value, &stats->min, &stats->min_len);
1662 stats->initialized = true;
1664 if (VacAttrStatsLtGtValid(stats))
1666 if ((*fmgr_faddr(&stats->f_cmplt)) (value, stats->min))
1668 vc_bucketcpy(stats->attr, value, &stats->min, &stats->min_len);
1671 if ((*fmgr_faddr(&stats->f_cmpgt)) (value, stats->max))
1673 vc_bucketcpy(stats->attr, value, &stats->max, &stats->max_len);
1676 if ((*fmgr_faddr(&stats->f_cmpeq)) (value, stats->min))
1678 else if ((*fmgr_faddr(&stats->f_cmpeq)) (value, stats->max))
1681 if ((*fmgr_faddr(&stats->f_cmpeq)) (value, stats->best))
1683 else if ((*fmgr_faddr(&stats->f_cmpeq)) (value, stats->guess1))
1685 stats->guess1_cnt++;
1686 stats->guess1_hits++;
1688 else if ((*fmgr_faddr(&stats->f_cmpeq)) (value, stats->guess2))
1689 stats->guess2_hits++;
1693 if (stats->guess2_hits > stats->guess1_hits)
1695 swapDatum(stats->guess1, stats->guess2);
1696 swapInt(stats->guess1_len, stats->guess2_len);
1697 stats->guess1_cnt = stats->guess2_hits;
1698 swapLong(stats->guess1_hits, stats->guess2_hits);
1700 if (stats->guess1_cnt > stats->best_cnt)
1702 swapDatum(stats->best, stats->guess1);
1703 swapInt(stats->best_len, stats->guess1_len);
1704 swapLong(stats->best_cnt, stats->guess1_cnt);
1705 stats->guess1_hits = 1;
1706 stats->guess2_hits = 1;
1710 vc_bucketcpy(stats->attr, value, &stats->guess2, &stats->guess2_len);
1711 stats->guess1_hits = 1;
1712 stats->guess2_hits = 1;
1720 * vc_bucketcpy() -- update pg_class statistics for one relation
1724 vc_bucketcpy(AttributeTupleForm attr, Datum value, Datum *bucket, int16 *bucket_len)
1726 if (attr->attbyval && attr->attlen != -1)
1730 int len = (attr->attlen != -1 ? attr->attlen : VARSIZE(value));
1732 if (len > *bucket_len)
1734 if (*bucket_len != 0)
1735 pfree(DatumGetPointer(*bucket));
1736 *bucket = PointerGetDatum(palloc(len));
1739 memmove(DatumGetPointer(*bucket), DatumGetPointer(value), len);
1744 * vc_updstats() -- update pg_class statistics for one relation
1746 * This routine works for both index and heap relation entries in
1747 * pg_class. We violate no-overwrite semantics here by storing new
1748 * values for ntups, npages, and hasindex directly in the pg_class
1749 * tuple that's already on the page. The reason for this is that if
1750 * we updated these tuples in the usual way, then every tuple in pg_class
1751 * would be replaced every day. This would make planning and executing
1752 * historical queries very expensive.
1755 vc_updstats(Oid relid, int npages, int ntups, bool hasindex, VRelStats *vacrelstats)
1760 HeapScanDesc rsdesc,
1768 Form_pg_class pgcform;
1771 AttributeTupleForm attp;
1774 * update number of tuples and number of pages in pg_class
1776 ScanKeyEntryInitialize(&rskey, 0x0, ObjectIdAttributeNumber,
1777 ObjectIdEqualRegProcedure,
1778 ObjectIdGetDatum(relid));
1780 rd = heap_openr(RelationRelationName);
1781 rsdesc = heap_beginscan(rd, false, false, 1, &rskey);
1783 if (!HeapTupleIsValid(rtup = heap_getnext(rsdesc, 0, &rbuf)))
1784 elog(ERROR, "pg_class entry for relid %d vanished during vacuuming",
1787 /* overwrite the existing statistics in the tuple */
1788 vc_setpagelock(rd, BufferGetBlockNumber(rbuf));
1789 pgcform = (Form_pg_class) GETSTRUCT(rtup);
1790 pgcform->reltuples = ntups;
1791 pgcform->relpages = npages;
1792 pgcform->relhasindex = hasindex;
1794 if (vacrelstats != NULL && vacrelstats->va_natts > 0)
1796 VacAttrStats *vacattrstats = vacrelstats->vacattrstats;
1797 int natts = vacrelstats->va_natts;
1799 ad = heap_openr(AttributeRelationName);
1800 sd = heap_openr(StatisticRelationName);
1801 ScanKeyEntryInitialize(&askey, 0, Anum_pg_attribute_attrelid,
1804 asdesc = heap_beginscan(ad, false, false, 1, &askey);
1806 while (HeapTupleIsValid(atup = heap_getnext(asdesc, 0, &abuf)))
1809 float32data selratio; /* average ratio of rows selected
1810 * for a random constant */
1811 VacAttrStats *stats;
1812 Datum values[Natts_pg_statistic];
1813 char nulls[Natts_pg_statistic];
1815 attp = (AttributeTupleForm) GETSTRUCT(atup);
1816 if (attp->attnum <= 0) /* skip system attributes for now, */
1817 /* they are unique anyway */
1820 for (i = 0; i < natts; i++)
1822 if (attp->attnum == vacattrstats[i].attr->attnum)
1827 stats = &(vacattrstats[i]);
1829 /* overwrite the existing statistics in the tuple */
1830 if (VacAttrStatsEqValid(stats))
1833 vc_setpagelock(ad, BufferGetBlockNumber(abuf));
1835 if (stats->nonnull_cnt + stats->null_cnt == 0 ||
1836 (stats->null_cnt <= 1 && stats->best_cnt == 1))
1838 else if (VacAttrStatsLtGtValid(stats) && stats->min_cnt + stats->max_cnt == stats->nonnull_cnt)
1840 double min_cnt_d = stats->min_cnt,
1841 max_cnt_d = stats->max_cnt,
1842 null_cnt_d = stats->null_cnt,
1843 nonnullcnt_d = stats->nonnull_cnt; /* prevent overflow */
1845 selratio = (min_cnt_d * min_cnt_d + max_cnt_d * max_cnt_d + null_cnt_d * null_cnt_d) /
1846 (nonnullcnt_d + null_cnt_d) / (nonnullcnt_d + null_cnt_d);
1850 double most = (double) (stats->best_cnt > stats->null_cnt ? stats->best_cnt : stats->null_cnt);
1851 double total = ((double) stats->nonnull_cnt) + ((double) stats->null_cnt);
1854 * we assume count of other values are 20% of best
1857 selratio = (most * most + 0.20 * most * (total - most)) / total / total;
1861 attp->attdisbursion = selratio;
1862 WriteNoReleaseBuffer(abuf);
1864 /* DO PG_STATISTIC INSERTS */
1867 * doing system relations, especially pg_statistic is a
1870 if (VacAttrStatsLtGtValid(stats) && stats->initialized /* &&
1871 * !IsSystemRelationName(
1873 pgcform->relname.data) */ )
1875 FmgrInfo out_function;
1878 for (i = 0; i < Natts_pg_statistic; ++i)
1882 * initialize values[]
1886 values[i++] = (Datum) relid; /* 1 */
1887 values[i++] = (Datum) attp->attnum; /* 2 */
1888 values[i++] = (Datum) InvalidOid; /* 3 */
1889 fmgr_info(stats->outfunc, &out_function);
1890 out_string = (*fmgr_faddr(&out_function)) (stats->min, stats->attr->atttypid);
1891 values[i++] = (Datum) fmgr(TextInRegProcedure, out_string);
1893 out_string = (char *) (*fmgr_faddr(&out_function)) (stats->max, stats->attr->atttypid);
1894 values[i++] = (Datum) fmgr(TextInRegProcedure, out_string);
1899 stup = heap_formtuple(sdesc, values, nulls);
1902 * insert the tuple in the relation and get the tuple's oid.
1905 heap_insert(sd, stup);
1906 pfree(DatumGetPointer(values[3]));
1907 pfree(DatumGetPointer(values[4]));
1912 heap_endscan(asdesc);
1917 /* XXX -- after write, should invalidate relcache in other backends */
1918 WriteNoReleaseBuffer(rbuf); /* heap_endscan release scan' buffers ? */
1921 * invalidating system relations confuses the function cache of
1922 * pg_operator and pg_opclass
1924 if (!IsSystemRelationName(pgcform->relname.data))
1925 RelationInvalidateHeapTuple(rd, rtup);
1927 /* that's all, folks */
1928 heap_endscan(rsdesc);
1933 * vc_delhilowstats() -- delete pg_statistics rows
1937 vc_delhilowstats(Oid relid, int attcnt, int *attnums)
1939 Relation pgstatistic;
1940 HeapScanDesc pgsscan;
1944 pgstatistic = heap_openr(StatisticRelationName);
1946 if (relid != InvalidOid)
1948 ScanKeyEntryInitialize(&pgskey, 0x0, Anum_pg_statistic_starelid,
1949 ObjectIdEqualRegProcedure,
1950 ObjectIdGetDatum(relid));
1951 pgsscan = heap_beginscan(pgstatistic, false, false, 1, &pgskey);
1954 pgsscan = heap_beginscan(pgstatistic, false, false, 0, NULL);
1956 while (HeapTupleIsValid(pgstup = heap_getnext(pgsscan, 0, NULL)))
1960 Form_pg_statistic pgs = (Form_pg_statistic) GETSTRUCT(pgstup);
1963 for (i = 0; i < attcnt; i++)
1965 if (pgs->staattnum == attnums[i] + 1)
1969 continue; /* don't delete it */
1971 heap_delete(pgstatistic, &pgstup->t_ctid);
1974 heap_endscan(pgsscan);
1975 heap_close(pgstatistic);
1979 vc_setpagelock(Relation rel, BlockNumber blkno)
1981 ItemPointerData itm;
1983 ItemPointerSet(&itm, blkno, 1);
1985 RelationSetLockForWritePage(rel, &itm);
1989 * vc_reappage() -- save a page on the array of reapped pages.
1991 * As a side effect of the way that the vacuuming loop for a given
1992 * relation works, higher pages come after lower pages in the array
1993 * (and highest tid on a page is last).
1996 vc_reappage(VPageList vpl, VPageDescr vpc)
2000 /* allocate a VPageDescrData entry */
2001 newvpd = (VPageDescr) palloc(sizeof(VPageDescrData) + vpc->vpd_noff * sizeof(OffsetNumber));
2004 if (vpc->vpd_noff > 0)
2005 memmove(newvpd->vpd_voff, vpc->vpd_voff, vpc->vpd_noff * sizeof(OffsetNumber));
2006 newvpd->vpd_blkno = vpc->vpd_blkno;
2007 newvpd->vpd_free = vpc->vpd_free;
2008 newvpd->vpd_nusd = vpc->vpd_nusd;
2009 newvpd->vpd_noff = vpc->vpd_noff;
2011 /* insert this page into vpl list */
2012 vc_vpinsert(vpl, newvpd);
2017 vc_vpinsert(VPageList vpl, VPageDescr vpnew)
2020 /* allocate a VPageDescr entry if needed */
2021 if (vpl->vpl_npages == 0)
2022 vpl->vpl_pgdesc = (VPageDescr *) palloc(100 * sizeof(VPageDescr));
2023 else if (vpl->vpl_npages % 100 == 0)
2024 vpl->vpl_pgdesc = (VPageDescr *) repalloc(vpl->vpl_pgdesc, (vpl->vpl_npages + 100) * sizeof(VPageDescr));
2025 vpl->vpl_pgdesc[vpl->vpl_npages] = vpnew;
2026 (vpl->vpl_npages)++;
2031 vc_free(VRelList vrl)
2035 PortalVariableMemory pmem;
2037 pmem = PortalGetVariableMemory(vc_portal);
2038 old = MemoryContextSwitchTo((MemoryContext) pmem);
2040 while (vrl != (VRelList) NULL)
2043 /* free rel list entry */
2045 vrl = vrl->vrl_next;
2049 MemoryContextSwitchTo(old);
2053 vc_find_eq(char *bot, int nelem, int size, char *elm, int (*compar) (char *, char *))
2056 int last = nelem - 1;
2057 int celm = nelem / 2;
2061 last_move = first_move = true;
2064 if (first_move == true)
2066 res = compar(bot, elm);
2073 if (last_move == true)
2075 res = compar(elm, bot + last * size);
2079 return (bot + last * size);
2082 res = compar(elm, bot + celm * size);
2084 return (bot + celm * size);
2098 last = last - celm - 1;
2099 bot = bot + (celm + 1) * size;
2100 celm = (last + 1) / 2;
2107 vc_cmp_blk(char *left, char *right)
2112 lblk = (*((VPageDescr *) left))->vpd_blkno;
2113 rblk = (*((VPageDescr *) right))->vpd_blkno;
2124 vc_cmp_offno(char *left, char *right)
2127 if (*(OffsetNumber *) left < *(OffsetNumber *) right)
2129 if (*(OffsetNumber *) left == *(OffsetNumber *) right)
2133 } /* vc_cmp_offno */
2137 vc_getindices(Oid relid, int *nindices, Relation **Irel)
2143 HeapScanDesc pgiscan;
2153 ioid = (Oid *) palloc(10 * sizeof(Oid));
2155 /* prepare a heap scan on the pg_index relation */
2156 pgindex = heap_openr(IndexRelationName);
2157 pgidesc = RelationGetTupleDescriptor(pgindex);
2159 ScanKeyEntryInitialize(&pgikey, 0x0, Anum_pg_index_indrelid,
2160 ObjectIdEqualRegProcedure,
2161 ObjectIdGetDatum(relid));
2163 pgiscan = heap_beginscan(pgindex, false, false, 1, &pgikey);
2165 while (HeapTupleIsValid(pgitup = heap_getnext(pgiscan, 0, NULL)))
2167 d = heap_getattr(pgitup, Anum_pg_index_indexrelid,
2171 ioid = (Oid *) repalloc(ioid, (i + 10) * sizeof(Oid));
2172 ioid[i - 1] = DatumGetObjectId(d);
2175 heap_endscan(pgiscan);
2176 heap_close(pgindex);
2179 { /* No one index found */
2184 if (Irel != (Relation **) NULL)
2185 *Irel = (Relation *) palloc(i * sizeof(Relation));
2189 irel = index_open(ioid[--i]);
2190 if (irel != (Relation) NULL)
2192 if (Irel != (Relation **) NULL)
2199 elog(NOTICE, "CAN't OPEN INDEX %u - SKIP IT", ioid[i]);
2204 if (Irel != (Relation **) NULL && *nindices == 0)
2207 *Irel = (Relation *) NULL;
2210 } /* vc_getindices */
2214 vc_clsindices(int nindices, Relation *Irel)
2217 if (Irel == (Relation *) NULL)
2222 index_close(Irel[nindices]);
2226 } /* vc_clsindices */
2230 vc_mkindesc(Relation onerel, int nindices, Relation *Irel, IndDesc **Idesc)
2233 HeapTuple pgIndexTup;
2234 AttrNumber *attnumP;
2238 *Idesc = (IndDesc *) palloc(nindices * sizeof(IndDesc));
2240 for (i = 0, idcur = *Idesc; i < nindices; i++, idcur++)
2243 SearchSysCacheTuple(INDEXRELID,
2244 ObjectIdGetDatum(Irel[i]->rd_id),
2247 idcur->tform = (IndexTupleForm) GETSTRUCT(pgIndexTup);
2248 for (attnumP = &(idcur->tform->indkey[0]), natts = 0;
2249 *attnumP != InvalidAttrNumber && natts != INDEX_MAX_KEYS;
2250 attnumP++, natts++);
2251 if (idcur->tform->indproc != InvalidOid)
2253 idcur->finfoP = &(idcur->finfo);
2254 FIgetnArgs(idcur->finfoP) = natts;
2256 FIgetProcOid(idcur->finfoP) = idcur->tform->indproc;
2257 *(FIgetname(idcur->finfoP)) = '\0';
2260 idcur->finfoP = (FuncIndexInfo *) NULL;
2262 idcur->natts = natts;
2269 vc_enough_space(VPageDescr vpd, Size len)
2272 len = DOUBLEALIGN(len);
2274 if (len > vpd->vpd_free)
2277 if (vpd->vpd_nusd < vpd->vpd_noff) /* there are free itemid(s) */
2278 return (true); /* and len <= free_space */
2280 /* ok. noff_usd >= noff_free and so we'll have to allocate new itemid */
2281 if (len <= vpd->vpd_free - sizeof(ItemIdData))
2286 } /* vc_enough_space */