*
*
* IDENTIFICATION
- * $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.293 2004/10/07 14:15:50 momjian Exp $
+ * $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.294 2004/10/07 14:19:58 momjian Exp $
*
*-------------------------------------------------------------------------
*/
if (vacstmt->verbose)
elevel = INFO;
else
- /* bjm comment */
elevel = DEBUG2;
- if (1 == 0)
- func();
- else
- /* bjm comment */
- {
- elevel = DEBUG2;
- }
/*
- * We cannot run VACUUM inside a user transaction block; if we were inside
- * a transaction, then our commit- and start-transaction-command calls
- * would not have the intended effect! Furthermore, the forced commit that
- * occurs before truncating the relation's file would have the effect of
- * committing the rest of the user's transaction too, which would
- * certainly not be the desired behavior. (This only applies to VACUUM
- * FULL, though. We could in theory run lazy VACUUM inside a transaction
- * block, but we choose to disallow that case because we'd rather commit
- * as soon as possible after finishing the vacuum. This is mainly so
- * that we can let go the AccessExclusiveLock that we may be holding.)
+ * We cannot run VACUUM inside a user transaction block; if we were
+ * inside a transaction, then our commit- and
+ * start-transaction-command calls would not have the intended effect!
+ * Furthermore, the forced commit that occurs before truncating the
+ * relation's file would have the effect of committing the rest of the
+ * user's transaction too, which would certainly not be the desired
+ * behavior. (This only applies to VACUUM FULL, though. We could in
+ * theory run lazy VACUUM inside a transaction block, but we choose to
+ * disallow that case because we'd rather commit as soon as possible
+ * after finishing the vacuum. This is mainly so that we can let go
+ * the AccessExclusiveLock that we may be holding.)
*
* ANALYZE (without VACUUM) can run either way.
*/
else
in_outer_xact = IsInTransactionChain((void *) vacstmt);
- /* Send info about dead objects to the statistics collector */
+ /*
+ * Send info about dead objects to the statistics collector
+ */
if (vacstmt->vacuum)
pgstat_vacuum_tabstat();
/*
* Create special memory context for cross-transaction storage.
*
- * Since it is a child of PortalContext, it will go away eventually even if
- * we suffer an error; there's no need for special abort cleanup logic.
+ * Since it is a child of PortalContext, it will go away eventually even
+ * if we suffer an error; there's no need for special abort cleanup
+ * logic.
*/
vac_context = AllocSetContextCreate(PortalContext,
"Vacuum",
/*
* It's a database-wide VACUUM.
*
- * Compute the initially applicable OldestXmin and FreezeLimit XIDs, so
- * that we can record these values at the end of the VACUUM. Note that
- * individual tables may well be processed with newer values, but we
- * can guarantee that no (non-shared) relations are processed with
- * older ones.
+ * Compute the initially applicable OldestXmin and FreezeLimit XIDs,
+ * so that we can record these values at the end of the VACUUM.
+ * Note that individual tables may well be processed with newer
+ * values, but we can guarantee that no (non-shared) relations are
+ * processed with older ones.
*
- * It is okay to record non-shared values in pg_database, even though we
- * may vacuum shared relations with older cutoffs, because only the
- * minimum of the values present in pg_database matters. We can be
- * sure that shared relations have at some time been vacuumed with
- * cutoffs no worse than the global minimum; for, if there is a
- * backend in some other DB with xmin = OLDXMIN that's determining the
- * cutoff with which we vacuum shared relations, it is not possible
- * for that database to have a cutoff newer than OLDXMIN recorded in
- * pg_database.
+ * It is okay to record non-shared values in pg_database, even though
+ * we may vacuum shared relations with older cutoffs, because only
+ * the minimum of the values present in pg_database matters. We
+ * can be sure that shared relations have at some time been
+ * vacuumed with cutoffs no worse than the global minimum; for, if
+ * there is a backend in some other DB with xmin = OLDXMIN that's
+ * determining the cutoff with which we vacuum shared relations,
+ * it is not possible for that database to have a cutoff newer
+ * than OLDXMIN recorded in pg_database.
*/
vacuum_set_xid_limits(vacstmt, false,
&initialOldestXmin,
/*
* Decide whether we need to start/commit our own transactions.
*
- * For VACUUM (with or without ANALYZE): always do so, so that we can release
- * locks as soon as possible. (We could possibly use the outer
- * transaction for a one-table VACUUM, but handling TOAST tables would be
- * problematic.)
+ * For VACUUM (with or without ANALYZE): always do so, so that we can
+ * release locks as soon as possible. (We could possibly use the
+ * outer transaction for a one-table VACUUM, but handling TOAST tables
+ * would be problematic.)
*
* For ANALYZE (no VACUUM): if inside a transaction block, we cannot
- * start/commit our own transactions. Also, there's no need to do so if
- * only processing one relation. For multiple relations when not within a
- * transaction block, use own transactions so we can release locks sooner.
+ * start/commit our own transactions. Also, there's no need to do so
+ * if only processing one relation. For multiple relations when not
+ * within a transaction block, use own transactions so we can release
+ * locks sooner.
*/
if (vacstmt->vacuum)
use_own_xacts = true;
}
/*
- * If we are running ANALYZE without per-table transactions, we'll need a
- * memory context with table lifetime.
+ * If we are running ANALYZE without per-table transactions, we'll
+ * need a memory context with table lifetime.
*/
if (!use_own_xacts)
anl_context = AllocSetContextCreate(PortalContext,
ALLOCSET_DEFAULT_MAXSIZE);
/*
- * vacuum_rel expects to be entered with no transaction active; it will
- * start and commit its own transaction. But we are called by an SQL
- * command, and so we are executing inside a transaction already. We
- * commit the transaction started in PostgresMain() here, and start
- * another one before exiting to match the commit waiting for us back in
- * PostgresMain().
+ * vacuum_rel expects to be entered with no transaction active; it
+ * will start and commit its own transaction. But we are called by an
+ * SQL command, and so we are executing inside a transaction already.
+ * We commit the transaction started in PostgresMain() here, and start
+ * another one before exiting to match the commit waiting for us back
+ * in PostgresMain().
*/
if (use_own_xacts)
{
VacuumCostActive = (VacuumCostDelay > 0);
VacuumCostBalance = 0;
- /* Loop to process each selected relation. */
+ /*
+ * Loop to process each selected relation.
+ */
foreach(cur, relations)
{
Oid relid = lfirst_oid(cur);
MemoryContext old_context = NULL;
/*
- * If using separate xacts, start one for analyze. Otherwise,
- * we can use the outer transaction, but we still need to call
- * analyze_rel in a memory context that will be cleaned up on
- * return (else we leak memory while processing multiple
- * tables).
+ * If using separate xacts, start one for analyze.
+ * Otherwise, we can use the outer transaction, but we
+ * still need to call analyze_rel in a memory context that
+ * will be cleaned up on return (else we leak memory while
+ * processing multiple tables).
*/
if (use_own_xacts)
{
old_context = MemoryContextSwitchTo(anl_context);
/*
- * Tell the buffer replacement strategy that vacuum is causing
- * the IO
+ * Tell the buffer replacement strategy that vacuum is
+ * causing the IO
*/
StrategyHintVacuum(true);
/* Turn off vacuum cost accounting */
VacuumCostActive = false;
- /* Finish up processing. */
+ /*
+ * Finish up processing.
+ */
if (use_own_xacts)
{
/* here, we are not in a transaction */
if (vacstmt->vacuum)
{
/*
- * If it was a database-wide VACUUM, print FSM usage statistics (we
- * don't make you be superuser to see these).
+ * If it was a database-wide VACUUM, print FSM usage statistics
+ * (we don't make you be superuser to see these).
*/
if (vacstmt->relation == NULL)
PrintFreeSpaceMapStatistics(elevel);
/*
* If we completed a database-wide VACUUM without skipping any
- * relations, update the database's pg_database row with info about
- * the transaction IDs used, and try to truncate pg_clog.
+ * relations, update the database's pg_database row with info
+ * about the transaction IDs used, and try to truncate pg_clog.
*/
if (all_rels)
{
/*
* Clean up working storage --- note we must do this after
- * StartTransactionCommand, else we might be trying to delete the active
- * context!
+ * StartTransactionCommand, else we might be trying to delete the
+ * active context!
*/
MemoryContextDelete(vac_context);
vac_context = NULL;
limit = GetCurrentTransactionId() - (MaxTransactionId >> 2);
}
- /* Be careful not to generate a "permanent" XID */
+ /*
+ * Be careful not to generate a "permanent" XID
+ */
if (!TransactionIdIsNormal(limit))
limit = FirstNormalTransactionId;
- /* Ensure sane relationship of limits */
+ /*
+ * Ensure sane relationship of limits
+ */
if (TransactionIdFollows(limit, *oldestXmin))
{
ereport(WARNING,
Form_pg_class pgcform;
Buffer buffer;
- /* update number of tuples and number of pages in pg_class */
+ /*
+ * update number of tuples and number of pages in pg_class
+ */
rd = heap_openr(RelationRelationName, RowExclusiveLock);
ctup = SearchSysCache(RELOID,
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
/*
- * Invalidate the tuple in the catcaches; this also arranges to flush the
- * relation's relcache entry. (If we fail to commit for some reason, no
- * flush will occur, but no great harm is done since there are no
- * noncritical state updates here.)
+ * Invalidate the tuple in the catcaches; this also arranges to flush
+ * the relation's relcache entry. (If we fail to commit for some
+ * reason, no flush will occur, but no great harm is done since there
+ * are no noncritical state updates here.)
*/
CacheInvalidateHeapTuple(rd, &rtup);
heap_close(relation, AccessShareLock);
/*
- * Do not truncate CLOG if we seem to have suffered wraparound already;
- * the computed minimum XID might be bogus.
+ * Do not truncate CLOG if we seem to have suffered wraparound
+ * already; the computed minimum XID might be bogus.
*/
if (vacuumAlreadyWrapped)
{
CHECK_FOR_INTERRUPTS();
/*
- * Race condition -- if the pg_class tuple has gone away since the last
- * time we saw it, we don't need to vacuum it.
+ * Race condition -- if the pg_class tuple has gone away since the
+ * last time we saw it, we don't need to vacuum it.
*/
if (!SearchSysCacheExists(RELOID,
ObjectIdGetDatum(relid),
}
/*
- * Determine the type of lock we want --- hard exclusive lock for a FULL
- * vacuum, but just ShareUpdateExclusiveLock for concurrent vacuum. Either
- * way, we can be sure that no other backend is vacuuming the same table.
+ * Determine the type of lock we want --- hard exclusive lock for a
+ * FULL vacuum, but just ShareUpdateExclusiveLock for concurrent
+ * vacuum. Either way, we can be sure that no other backend is
+ * vacuuming the same table.
*/
lmode = vacstmt->full ? AccessExclusiveLock : ShareUpdateExclusiveLock;
/*
- * Open the class, get an appropriate lock on it, and check permissions.
+ * Open the class, get an appropriate lock on it, and check
+ * permissions.
*
- * We allow the user to vacuum a table if he is superuser, the table owner,
- * or the database owner (but in the latter case, only if it's not a
- * shared relation). pg_class_ownercheck includes the superuser case.
+ * We allow the user to vacuum a table if he is superuser, the table
+ * owner, or the database owner (but in the latter case, only if it's
+ * not a shared relation). pg_class_ownercheck includes the superuser
+ * case.
*
- * Note we choose to treat permissions failure as a WARNING and keep trying
- * to vacuum the rest of the DB --- is this appropriate?
+ * Note we choose to treat permissions failure as a WARNING and keep
+ * trying to vacuum the rest of the DB --- is this appropriate?
*/
onerel = relation_open(relid, lmode);
}
/*
- * Check that it's a plain table; we used to do this in get_rel_oids() but
- * seems safer to check after we've locked the relation.
+ * Check that it's a plain table; we used to do this in get_rel_oids()
+ * but seems safer to check after we've locked the relation.
*/
if (onerel->rd_rel->relkind != expected_relkind)
{
relation_close(onerel, lmode);
StrategyHintVacuum(false);
CommitTransactionCommand();
- return true; /* assume no long-lived data in temp tables */
+ return true; /* assume no long-lived data in temp
+ * tables */
}
/*
* Get a session-level lock too. This will protect our access to the
* relation across multiple transactions, so that we can vacuum the
- * relation's TOAST table (if any) secure in the knowledge that no one is
- * deleting the parent relation.
+ * relation's TOAST table (if any) secure in the knowledge that no one
+ * is deleting the parent relation.
*
* NOTE: this cannot block, even if someone else is waiting for access,
* because the lock manager knows that both lock requests are from the
onerelid = onerel->rd_lockInfo.lockRelId;
LockRelationForSession(&onerelid, lmode);
- /* Remember the relation's TOAST relation for later */
+ /*
+ * Remember the relation's TOAST relation for later
+ */
toast_relid = onerel->rd_rel->reltoastrelid;
- /* Do the actual work --- either FULL or "lazy" vacuum */
+ /*
+ * Do the actual work --- either FULL or "lazy" vacuum
+ */
if (vacstmt->full)
full_vacuum_rel(onerel, vacstmt);
else
/* all done with this class, but hold lock until commit */
relation_close(onerel, NoLock);
- /* Complete the transaction and free all temporary memory used. */
+ /*
+ * Complete the transaction and free all temporary memory used.
+ */
StrategyHintVacuum(false);
CommitTransactionCommand();
/*
* If the relation has a secondary toast rel, vacuum that too while we
* still hold the session lock on the master table. Note however that
- * "analyze" will not get done on the toast table. This is good, because
- * the toaster always uses hardcoded index access and statistics are
- * totally unimportant for toast relations.
+ * "analyze" will not get done on the toast table. This is good,
+ * because the toaster always uses hardcoded index access and
+ * statistics are totally unimportant for toast relations.
*/
if (toast_relid != InvalidOid)
{
result = false; /* failed to vacuum the TOAST table? */
}
- /* Now release the session-level lock on the master table. */
+ /*
+ * Now release the session-level lock on the master table.
+ */
UnlockRelationForSession(&onerelid, lmode);
return result;
{
VacPageListData vacuum_pages; /* List of pages to vacuum and/or
* clean indexes */
- VacPageListData fraged_pages; /* List of pages with space enough for
- * re-using */
+ VacPageListData fraged_pages; /* List of pages with space enough
+ * for re-using */
Relation *Irel;
int nindexes,
i;
vacuum_set_xid_limits(vacstmt, onerel->rd_rel->relisshared,
&OldestXmin, &FreezeLimit);
- /* Set up statistics-gathering machinery. */
+ /*
+ * Set up statistics-gathering machinery.
+ */
vacrelstats = (VRelStats *) palloc(sizeof(VRelStats));
vacrelstats->rel_pages = 0;
vacrelstats->rel_tuples = 0;
VacPage vacpagecopy;
ereport(WARNING,
- (errmsg("relation \"%s\" page %u is uninitialized --- fixing",
- relname, blkno)));
+ (errmsg("relation \"%s\" page %u is uninitialized --- fixing",
+ relname, blkno)));
PageInit(page, BufferGetPageSize(buf), 0);
vacpage->free = ((PageHeader) page)->pd_upper - ((PageHeader) page)->pd_lower;
free_space += vacpage->free;
case HEAPTUPLE_LIVE:
/*
- * Tuple is good. Consider whether to replace its xmin
- * value with FrozenTransactionId.
+ * Tuple is good. Consider whether to replace its
+ * xmin value with FrozenTransactionId.
*/
if (TransactionIdIsNormal(HeapTupleHeaderGetXmin(tuple.t_data)) &&
TransactionIdPrecedes(HeapTupleHeaderGetXmin(tuple.t_data),
pgchanged = true;
}
- /* Other checks... */
+ /*
+ * Other checks...
+ */
if (onerel->rd_rel->relhasoids &&
!OidIsValid(HeapTupleGetOid(&tuple)))
elog(WARNING, "relation \"%s\" TID %u/%u: OID is invalid",
case HEAPTUPLE_RECENTLY_DEAD:
/*
- * If tuple is recently deleted then we must not remove it
- * from relation.
+ * If tuple is recently deleted then we must not
+ * remove it from relation.
*/
nkeep += 1;
/*
- * If we do shrinking and this tuple is updated one then
- * remember it to construct updated tuple dependencies.
+ * If we do shrinking and this tuple is updated one
+ * then remember it to construct updated tuple
+ * dependencies.
*/
if (do_shrinking &&
!(ItemPointerEquals(&(tuple.t_self),
{
free_vtlinks = 1000;
vtlinks = (VTupleLink) repalloc(vtlinks,
- (free_vtlinks + num_vtlinks) *
- sizeof(VTupleLinkData));
+ (free_vtlinks + num_vtlinks) *
+ sizeof(VTupleLinkData));
}
vtlinks[num_vtlinks].new_tid = tuple.t_data->t_ctid;
vtlinks[num_vtlinks].this_tid = tuple.t_self;
case HEAPTUPLE_INSERT_IN_PROGRESS:
/*
- * This should not happen, since we hold exclusive lock on
- * the relation; shouldn't we raise an error? (Actually,
- * it can happen in system catalogs, since we tend to
- * release write lock before commit there.)
+ * This should not happen, since we hold exclusive
+ * lock on the relation; shouldn't we raise an error?
+ * (Actually, it can happen in system catalogs, since
+ * we tend to release write lock before commit there.)
*/
ereport(NOTICE,
(errmsg("relation \"%s\" TID %u/%u: InsertTransactionInProgress %u --- can't shrink relation",
case HEAPTUPLE_DELETE_IN_PROGRESS:
/*
- * This should not happen, since we hold exclusive lock on
- * the relation; shouldn't we raise an error? (Actually,
- * it can happen in system catalogs, since we tend to
- * release write lock before commit there.)
+ * This should not happen, since we hold exclusive
+ * lock on the relation; shouldn't we raise an error?
+ * (Actually, it can happen in system catalogs, since
+ * we tend to release write lock before commit there.)
*/
ereport(NOTICE,
(errmsg("relation \"%s\" TID %u/%u: DeleteTransactionInProgress %u --- can't shrink relation",
ItemId lpp;
/*
- * Here we are building a temporary copy of the page with dead
- * tuples removed. Below we will apply PageRepairFragmentation
- * to the copy, so that we can determine how much space will
- * be available after removal of dead tuples. But note we are
- * NOT changing the real page yet...
+ * Here we are building a temporary copy of the page with
+ * dead tuples removed. Below we will apply
+ * PageRepairFragmentation to the copy, so that we can
+ * determine how much space will be available after
+ * removal of dead tuples. But note we are NOT changing
+ * the real page yet...
*/
if (tempPage == NULL)
{
/*
* Add the page to fraged_pages if it has a useful amount of free
* space. "Useful" means enough for a minimal-sized tuple. But we
- * don't know that accurately near the start of the relation, so add
- * pages unconditionally if they have >= BLCKSZ/10 free space.
+ * don't know that accurately near the start of the relation, so
+ * add pages unconditionally if they have >= BLCKSZ/10 free space.
*/
do_frag = (vacpage->free >= min_tlen || vacpage->free >= BLCKSZ / 10);
/*
* Include the page in empty_end_pages if it will be empty after
- * vacuuming; this is to keep us from using it as a move destination.
+ * vacuuming; this is to keep us from using it as a move
+ * destination.
*/
if (notup)
{
RelationGetRelationName(onerel),
tups_vacuumed, num_tuples, nblocks),
errdetail("%.0f dead row versions cannot be removed yet.\n"
- "Nonremovable row versions range from %lu to %lu bytes long.\n"
+ "Nonremovable row versions range from %lu to %lu bytes long.\n"
"There were %.0f unused item pointers.\n"
- "Total free space (including removable row versions) is %.0f bytes.\n"
+ "Total free space (including removable row versions) is %.0f bytes.\n"
"%u pages are or will become empty, including %u at the end of the table.\n"
- "%u pages containing %.0f free bytes are potential move destinations.\n"
+ "%u pages containing %.0f free bytes are potential move destinations.\n"
"%s",
nkeep,
(unsigned long) min_tlen, (unsigned long) max_tlen,
vacpage->offsets_used = vacpage->offsets_free = 0;
/*
- * Scan pages backwards from the last nonempty page, trying to move tuples
- * down to lower pages. Quit when we reach a page that we have moved any
- * tuples onto, or the first page if we haven't moved anything, or when we
- * find a page we cannot completely empty (this last condition is handled
- * by "break" statements within the loop).
+ * Scan pages backwards from the last nonempty page, trying to move
+ * tuples down to lower pages. Quit when we reach a page that we have
+ * moved any tuples onto, or the first page if we haven't moved
+ * anything, or when we find a page we cannot completely empty (this
+ * last condition is handled by "break" statements within the loop).
*
- * NB: this code depends on the vacuum_pages and fraged_pages lists being in
- * order by blkno.
+ * NB: this code depends on the vacuum_pages and fraged_pages lists being
+ * in order by blkno.
*/
nblocks = vacrelstats->rel_pages;
for (blkno = nblocks - vacuum_pages->empty_end_pages - 1;
vacuum_delay_point();
/*
- * Forget fraged_pages pages at or after this one; they're no longer
- * useful as move targets, since we only want to move down. Note that
- * since we stop the outer loop at last_move_dest_block, pages removed
- * here cannot have had anything moved onto them already.
+ * Forget fraged_pages pages at or after this one; they're no
+ * longer useful as move targets, since we only want to move down.
+ * Note that since we stop the outer loop at last_move_dest_block,
+ * pages removed here cannot have had anything moved onto them
+ * already.
*
- * Also note that we don't change the stored fraged_pages list, only our
- * local variable num_fraged_pages; so the forgotten pages are still
- * available to be loaded into the free space map later.
+ * Also note that we don't change the stored fraged_pages list, only
+ * our local variable num_fraged_pages; so the forgotten pages are
+ * still available to be loaded into the free space map later.
*/
while (num_fraged_pages > 0 &&
- fraged_pages->pagedesc[num_fraged_pages - 1]->blkno >= blkno)
+ fraged_pages->pagedesc[num_fraged_pages - 1]->blkno >= blkno)
{
Assert(fraged_pages->pagedesc[num_fraged_pages - 1]->offsets_used == 0);
--num_fraged_pages;
}
- /* Process this page of relation. */
+ /*
+ * Process this page of relation.
+ */
buf = ReadBuffer(onerel, blkno);
page = BufferGetPage(buf);
else
Assert(!isempty);
- chain_tuple_moved = false; /* no one chain-tuple was moved off
- * this page, yet */
+ chain_tuple_moved = false; /* no one chain-tuple was moved
+ * off this page, yet */
vacpage->blkno = blkno;
maxoff = PageGetMaxOffsetNumber(page);
for (offnum = FirstOffsetNumber;
ItemPointerSet(&(tuple.t_self), blkno, offnum);
/*
- * VACUUM FULL has an exclusive lock on the relation. So normally
- * no other transaction can have pending INSERTs or DELETEs in
- * this relation. A tuple is either (a) a tuple in a system
- * catalog, inserted or deleted by a not yet committed transaction
- * or (b) dead (XMIN_INVALID or XMAX_COMMITTED) or (c) inserted by
- * a committed xact (XMIN_COMMITTED) or (d) moved by the currently
- * running VACUUM. In case (a) we wouldn't be in repair_frag() at
- * all. In case (b) we cannot be here, because scan_heap() has
- * already marked the item as unused, see continue above. Case (c)
- * is what normally is to be expected. Case (d) is only possible,
- * if a whole tuple chain has been moved while processing this or
- * a higher numbered block.
+ * VACUUM FULL has an exclusive lock on the relation. So
+ * normally no other transaction can have pending INSERTs or
+ * DELETEs in this relation. A tuple is either (a) a tuple in
+ * a system catalog, inserted or deleted by a not yet
+ * committed transaction or (b) dead (XMIN_INVALID or
+ * XMAX_COMMITTED) or (c) inserted by a committed xact
+ * (XMIN_COMMITTED) or (d) moved by the currently running
+ * VACUUM. In case (a) we wouldn't be in repair_frag() at all.
+ * In case (b) we cannot be here, because scan_heap() has
+ * already marked the item as unused, see continue above. Case
+ * (c) is what normally is to be expected. Case (d) is only
+ * possible, if a whole tuple chain has been moved while
+ * processing this or a higher numbered block.
*/
if (!(tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED))
{
/*
- * There cannot be another concurrently running VACUUM. If the
- * tuple had been moved in by a previous VACUUM, the
+ * There cannot be another concurrently running VACUUM. If
+ * the tuple had been moved in by a previous VACUUM, the
* visibility check would have set XMIN_COMMITTED. If the
- * tuple had been moved in by the currently running VACUUM,
- * the loop would have been terminated. We had elog(ERROR,
- * ...) here, but as we are testing for a can't-happen
- * condition, Assert() seems more appropriate.
+ * tuple had been moved in by the currently running
+ * VACUUM, the loop would have been terminated. We had
+ * elog(ERROR, ...) here, but as we are testing for a
+ * can't-happen condition, Assert() seems more
+ * appropriate.
*/
Assert(!(tuple.t_data->t_infomask & HEAP_MOVED_IN));
/*
- * If this (chain) tuple is moved by me already then I have to
- * check is it in vacpage or not - i.e. is it moved while
- * cleaning this page or some previous one.
+ * If this (chain) tuple is moved by me already then I
+ * have to check is it in vacpage or not - i.e. is it
+ * moved while cleaning this page or some previous one.
*/
Assert(tuple.t_data->t_infomask & HEAP_MOVED_OFF);
}
/*
- * If this tuple is in the chain of tuples created in updates by
- * "recent" transactions then we have to move all chain of tuples
- * to another places.
+ * If this tuple is in the chain of tuples created in updates
+ * by "recent" transactions then we have to move all chain of
+ * tuples to another places.
*
- * NOTE: this test is not 100% accurate: it is possible for a tuple
- * to be an updated one with recent xmin, and yet not have a
- * corresponding tuple in the vtlinks list. Presumably there was
- * once a parent tuple with xmax matching the xmin, but it's
- * possible that that tuple has been removed --- for example, if
- * it had xmin = xmax then HeapTupleSatisfiesVacuum would deem it
- * removable as soon as the xmin xact completes.
+ * NOTE: this test is not 100% accurate: it is possible for a
+ * tuple to be an updated one with recent xmin, and yet not
+ * have a corresponding tuple in the vtlinks list. Presumably
+ * there was once a parent tuple with xmax matching the xmin,
+ * but it's possible that that tuple has been removed --- for
+ * example, if it had xmin = xmax then
+ * HeapTupleSatisfiesVacuum would deem it removable as soon as
+ * the xmin xact completes.
*
- * To be on the safe side, we abandon the repair_frag process if we
- * cannot find the parent tuple in vtlinks. This may be overly
- * conservative; AFAICS it would be safe to move the chain.
+ * To be on the safe side, we abandon the repair_frag process if
+ * we cannot find the parent tuple in vtlinks. This may be
+ * overly conservative; AFAICS it would be safe to move the
+ * chain.
*/
if (((tuple.t_data->t_infomask & HEAP_UPDATED) &&
- !TransactionIdPrecedes(HeapTupleHeaderGetXmin(tuple.t_data),
- OldestXmin)) ||
+ !TransactionIdPrecedes(HeapTupleHeaderGetXmin(tuple.t_data),
+ OldestXmin)) ||
(!(tuple.t_data->t_infomask & (HEAP_XMAX_INVALID |
HEAP_MARKED_FOR_UPDATE)) &&
!(ItemPointerEquals(&(tuple.t_self),
free_vtmove = 100;
/*
- * If this tuple is in the begin/middle of the chain then we
- * have to move to the end of chain.
+ * If this tuple is in the begin/middle of the chain then
+ * we have to move to the end of chain.
*/
while (!(tp.t_data->t_infomask & (HEAP_XMAX_INVALID |
- HEAP_MARKED_FOR_UPDATE)) &&
+ HEAP_MARKED_FOR_UPDATE)) &&
!(ItemPointerEquals(&(tp.t_self),
&(tp.t_data->t_ctid))))
{
ItemPointerGetBlockNumber(&Ctid));
Cpage = BufferGetPage(Cbuf);
Citemid = PageGetItemId(Cpage,
- ItemPointerGetOffsetNumber(&Ctid));
+ ItemPointerGetOffsetNumber(&Ctid));
if (!ItemIdIsUsed(Citemid))
{
/*
- * This means that in the middle of chain there was
- * tuple updated by older (than OldestXmin) xaction
- * and this tuple is already deleted by me. Actually,
- * upper part of chain should be removed and seems
- * that this should be handled in scan_heap(), but
- * it's not implemented at the moment and so we just
- * stop shrinking here.
+ * This means that in the middle of chain there
+ * was tuple updated by older (than OldestXmin)
+ * xaction and this tuple is already deleted by
+ * me. Actually, upper part of chain should be
+ * removed and seems that this should be handled
+ * in scan_heap(), but it's not implemented at the
+ * moment and so we just stop shrinking here.
*/
elog(DEBUG2, "child itemid in update-chain marked as unused --- can't continue repair_frag");
chain_move_failed = true;
break; /* out of walk-along-page loop */
}
- /* Check if all items in chain can be moved */
+ /*
+ * Check if all items in chain can be moved
+ */
for (;;)
{
Buffer Pbuf;
/* At beginning of chain? */
if (!(tp.t_data->t_infomask & HEAP_UPDATED) ||
- TransactionIdPrecedes(HeapTupleHeaderGetXmin(tp.t_data),
- OldestXmin))
+ TransactionIdPrecedes(HeapTupleHeaderGetXmin(tp.t_data),
+ OldestXmin))
break;
/* No, move to tuple with prior row version */
}
tp.t_self = vtlp->this_tid;
Pbuf = ReadBuffer(onerel,
- ItemPointerGetBlockNumber(&(tp.t_self)));
+ ItemPointerGetBlockNumber(&(tp.t_self)));
Ppage = BufferGetPage(Pbuf);
Pitemid = PageGetItemId(Ppage,
- ItemPointerGetOffsetNumber(&(tp.t_self)));
+ ItemPointerGetOffsetNumber(&(tp.t_self)));
/* this can't happen since we saw tuple earlier: */
if (!ItemIdIsUsed(Pitemid))
elog(ERROR, "parent itemid marked as unused");
/*
* Read above about cases when !ItemIdIsUsed(Citemid)
- * (child item is removed)... Due to the fact that at the
- * moment we don't remove unuseful part of update-chain,
- * it's possible to get too old parent row here. Like as
- * in the case which caused this problem, we stop
- * shrinking here. I could try to find real parent row but
- * want not to do it because of real solution will be
- * implemented anyway, later, and we are too close to 6.5
- * release. - vadim 06/11/99
+ * (child item is removed)... Due to the fact that at
+ * the moment we don't remove unuseful part of
+ * update-chain, it's possible to get too old parent
+ * row here. Like as in the case which caused this
+ * problem, we stop shrinking here. I could try to
+ * find real parent row but want not to do it because
+ * of real solution will be implemented anyway, later,
+ * and we are too close to 6.5 release. - vadim
+ * 06/11/99
*/
if (!(TransactionIdEquals(HeapTupleHeaderGetXmax(Ptp.t_data),
- HeapTupleHeaderGetXmin(tp.t_data))))
+ HeapTupleHeaderGetXmin(tp.t_data))))
{
ReleaseBuffer(Pbuf);
elog(DEBUG2, "too old parent tuple found --- can't continue repair_frag");
if (chain_move_failed)
{
/*
- * Undo changes to offsets_used state. We don't bother
- * cleaning up the amount-free state, since we're not
- * going to do any further tuple motion.
+ * Undo changes to offsets_used state. We don't
+ * bother cleaning up the amount-free state, since
+ * we're not going to do any further tuple motion.
*/
for (i = 0; i < num_vtmove; i++)
{
break; /* out of walk-along-page loop */
}
- /* Okay, move the whole tuple chain */
+ /*
+ * Okay, move the whole tuple chain
+ */
ItemPointerSetInvalid(&Ctid);
for (ti = 0; ti < num_vtmove; ti++)
{
/* Get page to move from */
tuple.t_self = vtmove[ti].tid;
Cbuf = ReadBuffer(onerel,
- ItemPointerGetBlockNumber(&(tuple.t_self)));
+ ItemPointerGetBlockNumber(&(tuple.t_self)));
/* Get page to move to */
dst_buffer = ReadBuffer(onerel, destvacpage->blkno);
Cpage = BufferGetPage(Cbuf);
Citemid = PageGetItemId(Cpage,
- ItemPointerGetOffsetNumber(&(tuple.t_self)));
+ ItemPointerGetOffsetNumber(&(tuple.t_self)));
tuple.t_datamcxt = NULL;
tuple.t_data = (HeapTupleHeader) PageGetItem(Cpage, Citemid);
tuple_len = tuple.t_len = ItemIdGetLength(Citemid);
} /* walk along page */
/*
- * If we broke out of the walk-along-page loop early (ie, still have
- * offnum <= maxoff), then we failed to move some tuple off this page.
- * No point in shrinking any more, so clean up and exit the per-page
- * loop.
+ * If we broke out of the walk-along-page loop early (ie, still
+ * have offnum <= maxoff), then we failed to move some tuple off
+ * this page. No point in shrinking any more, so clean up and
+ * exit the per-page loop.
*/
if (offnum < maxoff && keep_tuples > 0)
{
OffsetNumber off;
- /* Fix vacpage state for any unvisited tuples remaining on page */
+ /*
+ * Fix vacpage state for any unvisited tuples remaining on
+ * page
+ */
for (off = OffsetNumberNext(offnum);
off <= maxoff;
off = OffsetNumberNext(off))
continue;
/*
- * * See comments in the walk-along-page loop above, why we *
- * have Asserts here instead of if (...) elog(ERROR).
+ * * See comments in the walk-along-page loop above, why
+ * we * have Asserts here instead of if (...) elog(ERROR).
*/
Assert(!(htup->t_infomask & HEAP_MOVED_IN));
Assert(htup->t_infomask & HEAP_MOVED_OFF);
* We have to commit our tuple movings before we truncate the
* relation. Ideally we should do Commit/StartTransactionCommand
* here, relying on the session-level table lock to protect our
- * exclusive access to the relation. However, that would require a
- * lot of extra code to close and re-open the relation, indexes, etc.
- * For now, a quick hack: record status of current transaction as
- * committed, and continue.
+ * exclusive access to the relation. However, that would require
+ * a lot of extra code to close and re-open the relation, indexes,
+ * etc. For now, a quick hack: record status of current
+ * transaction as committed, and continue.
*/
RecordTransactionCommit();
}
/*
* We are not going to move any more tuples across pages, but we still
- * need to apply vacuum_page to compact free space in the remaining pages
- * in vacuum_pages list. Note that some of these pages may also be in the
- * fraged_pages list, and may have had tuples moved onto them; if so, we
- * already did vacuum_page and needn't do it again.
+ * need to apply vacuum_page to compact free space in the remaining
+ * pages in vacuum_pages list. Note that some of these pages may also
+ * be in the fraged_pages list, and may have had tuples moved onto
+ * them; if so, we already did vacuum_page and needn't do it again.
*/
for (i = 0, curpage = vacuum_pages->pagedesc;
i < vacuumed_pages;
last_move_dest_block, num_moved);
/*
- * It'd be cleaner to make this report at the bottom of this routine, but
- * then the rusage would double-count the second pass of index vacuuming.
- * So do it here and ignore the relatively small amount of processing that
- * occurs below.
+ * It'd be cleaner to make this report at the bottom of this routine,
+ * but then the rusage would double-count the second pass of index
+ * vacuuming. So do it here and ignore the relatively small amount of
+ * processing that occurs below.
*/
ereport(elevel,
- (errmsg("\"%s\": moved %u row versions, truncated %u to %u pages",
- RelationGetRelationName(onerel),
- num_moved, nblocks, blkno),
- errdetail("%s",
- vac_show_rusage(&ru0))));
+ (errmsg("\"%s\": moved %u row versions, truncated %u to %u pages",
+ RelationGetRelationName(onerel),
+ num_moved, nblocks, blkno),
+ errdetail("%s",
+ vac_show_rusage(&ru0))));
- /* Reflect the motion of system tuples to catalog cache here. */
+ /*
+ * Reflect the motion of system tuples to catalog cache here.
+ */
CommandCounterIncrement();
if (Nvacpagelist.num_pages > 0)
/* re-sort Nvacpagelist.pagedesc */
for (vpleft = Nvacpagelist.pagedesc,
- vpright = Nvacpagelist.pagedesc + Nvacpagelist.num_pages - 1;
+ vpright = Nvacpagelist.pagedesc + Nvacpagelist.num_pages - 1;
vpleft < vpright; vpleft++, vpright--)
{
vpsave = *vpleft;
}
/*
- * keep_tuples is the number of tuples that have been moved off a
- * page during chain moves but not been scanned over subsequently.
- * The tuple ids of these tuples are not recorded as free offsets
- * for any VacPage, so they will not be cleared from the indexes.
+ * keep_tuples is the number of tuples that have been moved
+ * off a page during chain moves but not been scanned over
+ * subsequently. The tuple ids of these tuples are not
+ * recorded as free offsets for any VacPage, so they will not
+ * be cleared from the indexes.
*/
Assert(keep_tuples >= 0);
for (i = 0; i < nindexes; i++)
continue;
/*
- * * See comments in the walk-along-page loop above, why we *
- * have Asserts here instead of if (...) elog(ERROR).
+ * * See comments in the walk-along-page loop above, why
+ * we * have Asserts here instead of if (...) elog(ERROR).
*/
Assert(!(htup->t_infomask & HEAP_MOVED_IN));
Assert(htup->t_infomask & HEAP_MOVED_OFF);
else
{
/*
- * No XLOG record, but still need to flag that XID exists on
- * disk
+ * No XLOG record, but still need to flag that XID exists
+ * on disk
*/
MyXactMadeTempRelUpdate = true;
}
}
/*
- * Flush dirty pages out to disk. We do this unconditionally, even if we
- * don't need to truncate, because we want to ensure that all tuples have
- * correct on-row commit status on disk (see bufmgr.c's comments for
- * FlushRelationBuffers()).
+ * Flush dirty pages out to disk. We do this unconditionally, even if
+ * we don't need to truncate, because we want to ensure that all
+ * tuples have correct on-row commit status on disk (see bufmgr.c's
+ * comments for FlushRelationBuffers()).
*/
FlushRelationBuffers(onerel, blkno);
heap_copytuple_with_tuple(old_tup, &newtup);
- /* register invalidation of source tuple in catcaches. */
+ /*
+ * register invalidation of source tuple in catcaches.
+ */
CacheInvalidateHeapTuple(rel, old_tup);
/* NO EREPORT(ERROR) TILL CHANGES ARE LOGGED */
/*
* If this page was not used before - clean it.
*
- * NOTE: a nasty bug used to lurk here. It is possible for the source and
- * destination pages to be the same (since this tuple-chain member can be
- * on a page lower than the one we're currently processing in the outer
- * loop). If that's true, then after vacuum_page() the source tuple will
- * have been moved, and tuple.t_data will be pointing at garbage.
- * Therefore we must do everything that uses old_tup->t_data BEFORE this
- * step!!
+ * NOTE: a nasty bug used to lurk here. It is possible for the source
+ * and destination pages to be the same (since this tuple-chain member
+ * can be on a page lower than the one we're currently processing in
+ * the outer loop). If that's true, then after vacuum_page() the
+ * source tuple will have been moved, and tuple.t_data will be
+ * pointing at garbage. Therefore we must do everything that uses
+ * old_tup->t_data BEFORE this step!!
*
- * This path is different from the other callers of vacuum_page, because we
- * have already incremented the vacpage's offsets_used field to account
- * for the tuple(s) we expect to move onto the page. Therefore
- * vacuum_page's check for offsets_used == 0 is wrong. But since that's a
- * good debugging check for all other callers, we work around it here
- * rather than remove it.
+ * This path is different from the other callers of vacuum_page, because
+ * we have already incremented the vacpage's offsets_used field to
+ * account for the tuple(s) we expect to move onto the page. Therefore
+ * vacuum_page's check for offsets_used == 0 is wrong. But since
+ * that's a good debugging check for all other callers, we work around
+ * it here rather than remove it.
*/
if (!PageIsEmpty(dst_page) && cleanVpd)
{
}
/*
- * Update the state of the copied tuple, and store it on the destination
- * page.
+ * Update the state of the copied tuple, and store it on the
+ * destination page.
*/
newtup.t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED |
HEAP_XMIN_INVALID |
}
else
{
- /* No XLOG record, but still need to flag that XID exists on disk */
+ /*
+ * No XLOG record, but still need to flag that XID exists on disk
+ */
MyXactMadeTempRelUpdate = true;
}
END_CRIT_SECTION();
/*
- * Set new tuple's t_ctid pointing to itself for last tuple in chain, and
- * to next tuple in chain otherwise.
+ * Set new tuple's t_ctid pointing to itself for last tuple in chain,
+ * and to next tuple in chain otherwise.
*/
/* Is this ok after log_heap_move() and END_CRIT_SECTION()? */
if (!ItemPointerIsValid(ctid))
* register invalidation of source tuple in catcaches.
*
* (Note: we do not need to register the copied tuple, because we are not
- * changing the tuple contents and so there cannot be any need to flush
- * negative catcache entries.)
+ * changing the tuple contents and so there cannot be any need to
+ * flush negative catcache entries.)
*/
CacheInvalidateHeapTuple(rel, old_tup);
/* NO EREPORT(ERROR) TILL CHANGES ARE LOGGED */
START_CRIT_SECTION();
- /* Mark new tuple as MOVED_IN by me. */
+ /*
+ * Mark new tuple as MOVED_IN by me.
+ */
newtup.t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED |
HEAP_XMIN_INVALID |
HEAP_MOVED_OFF);
ItemPointerSet(&(newtup.t_data->t_ctid), dst_vacpage->blkno, newoff);
newtup.t_self = newtup.t_data->t_ctid;
- /* Mark old tuple as MOVED_OFF by me. */
+ /*
+ * Mark old tuple as MOVED_OFF by me.
+ */
old_tup->t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED |
HEAP_XMIN_INVALID |
HEAP_MOVED_IN);
}
else
{
- /* No XLOG record, but still need to flag that XID exists on disk */
+ /*
+ * No XLOG record, but still need to flag that XID exists on disk
+ */
MyXactMadeTempRelUpdate = true;
}
/*
* See comments in the walk-along-page loop above, why we have
- * Asserts here instead of if (...) elog(ERROR). The difference
- * here is that we may see MOVED_IN.
+ * Asserts here instead of if (...) elog(ERROR). The
+ * difference here is that we may see MOVED_IN.
*/
Assert(htup->t_infomask & HEAP_MOVED);
Assert(HeapTupleHeaderGetXvac(htup) == GetCurrentTransactionId());
}
/*
- * Flush dirty pages out to disk. We do this unconditionally, even if we
- * don't need to truncate, because we want to ensure that all tuples have
- * correct on-row commit status on disk (see bufmgr.c's comments for
- * FlushRelationBuffers()).
+ * Flush dirty pages out to disk. We do this unconditionally, even if
+ * we don't need to truncate, because we want to ensure that all
+ * tuples have correct on-row commit status on disk (see bufmgr.c's
+ * comments for FlushRelationBuffers()).
*/
Assert(vacrelstats->rel_pages >= vacuum_pages->empty_end_pages);
relblocks = vacrelstats->rel_pages - vacuum_pages->empty_end_pages;
RelationGetRelationName(onerel),
vacrelstats->rel_pages, relblocks)));
RelationTruncate(onerel, relblocks);
- vacrelstats->rel_pages = relblocks; /* set new number of blocks */
+ vacrelstats->rel_pages = relblocks; /* set new number of
+ * blocks */
}
}
/*
* Even though we're not planning to delete anything, we use the
- * ambulkdelete call, because (a) the scan happens within the index AM for
- * more speed, and (b) it may want to pass private statistics to the
- * amvacuumcleanup call.
+ * ambulkdelete call, because (a) the scan happens within the index AM
+ * for more speed, and (b) it may want to pass private statistics to
+ * the amvacuumcleanup call.
*/
stats = index_bulk_delete(indrel, dummy_tid_reaped, NULL);
false);
ereport(elevel,
- (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
- RelationGetRelationName(indrel),
- stats->num_index_tuples,
- stats->num_pages),
- errdetail("%u index pages have been deleted, %u are currently reusable.\n"
- "%s",
- stats->pages_deleted, stats->pages_free,
- vac_show_rusage(&ru0))));
+ (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
+ RelationGetRelationName(indrel),
+ stats->num_index_tuples,
+ stats->num_pages),
+ errdetail("%u index pages have been deleted, %u are currently reusable.\n"
+ "%s",
+ stats->pages_deleted, stats->pages_free,
+ vac_show_rusage(&ru0))));
/*
- * Check for tuple count mismatch. If the index is partial, then it's OK
- * for it to have fewer tuples than the heap; else we got trouble.
+ * Check for tuple count mismatch. If the index is partial, then it's
+ * OK for it to have fewer tuples than the heap; else we got trouble.
*/
if (stats->num_index_tuples != num_tuples)
{
false);
ereport(elevel,
- (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
- RelationGetRelationName(indrel),
- stats->num_index_tuples,
- stats->num_pages),
- errdetail("%.0f index row versions were removed.\n"
- "%u index pages have been deleted, %u are currently reusable.\n"
- "%s",
- stats->tuples_removed,
- stats->pages_deleted, stats->pages_free,
- vac_show_rusage(&ru0))));
+ (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
+ RelationGetRelationName(indrel),
+ stats->num_index_tuples,
+ stats->num_pages),
+ errdetail("%.0f index row versions were removed.\n"
+ "%u index pages have been deleted, %u are currently reusable.\n"
+ "%s",
+ stats->tuples_removed,
+ stats->pages_deleted, stats->pages_free,
+ vac_show_rusage(&ru0))));
/*
- * Check for tuple count mismatch. If the index is partial, then it's OK
- * for it to have fewer tuples than the heap; else we got trouble.
+ * Check for tuple count mismatch. If the index is partial, then it's
+ * OK for it to have fewer tuples than the heap; else we got trouble.
*/
if (stats->num_index_tuples != num_tuples + keep_tuples)
{
ereport(WARNING,
(errmsg("index \"%s\" contains %.0f row versions, but table contains %.0f row versions",
RelationGetRelationName(indrel),
- stats->num_index_tuples, num_tuples + keep_tuples),
+ stats->num_index_tuples, num_tuples + keep_tuples),
errhint("Rebuild the index with REINDEX.")));
}
/*
* We only report pages with free space at least equal to the average
- * request size --- this avoids cluttering FSM with uselessly-small bits
- * of space. Although FSM would discard pages with little free space
- * anyway, it's important to do this prefiltering because (a) it reduces
- * the time spent holding the FSM lock in RecordRelationFreeSpace, and (b)
- * FSM uses the number of pages reported as a statistic for guiding space
- * management. If we didn't threshold our reports the same way
- * vacuumlazy.c does, we'd be skewing that statistic.
+ * request size --- this avoids cluttering FSM with uselessly-small
+ * bits of space. Although FSM would discard pages with little free
+ * space anyway, it's important to do this prefiltering because (a) it
+ * reduces the time spent holding the FSM lock in
+ * RecordRelationFreeSpace, and (b) FSM uses the number of pages
+ * reported as a statistic for guiding space management. If we didn't
+ * threshold our reports the same way vacuumlazy.c does, we'd be
+ * skewing that statistic.
*/
threshold = GetAvgFSMRequestSize(&onerel->rd_node);
for (i = 0; i < nPages; i++)
{
/*
- * fraged_pages may contain entries for pages that we later decided to
- * truncate from the relation; don't enter them into the free space
- * map!
+ * fraged_pages may contain entries for pages that we later
+ * decided to truncate from the relation; don't enter them into
+ * the free space map!
*/
if (pagedesc[i]->blkno >= rel_pages)
break;
/* allocate a VacPageData entry */
newvacpage = (VacPage) palloc(sizeof(VacPageData) +
- vacpage->offsets_free * sizeof(OffsetNumber));
+ vacpage->offsets_free * sizeof(OffsetNumber));
/* fill it in */
if (vacpage->offsets_free > 0)
}
/*
- * Release the resources acquired by vac_open_indexes. Optionally release
+ * Release the resources acquired by vac_open_indexes. Optionally release
* the locks (say NoLock to keep 'em).
*/
void
bool
vac_is_partial_index(Relation indrel)
{
- /* If the index's AM doesn't support nulls, it's partial for our purposes */
+ /*
+ * If the index's AM doesn't support nulls, it's partial for our
+ * purposes
+ */
if (!indrel->rd_am->amindexnulls)
return true;
snprintf(result, sizeof(result),
"CPU %d.%02ds/%d.%02du sec elapsed %d.%02d sec.",
(int) (ru1.ru.ru_stime.tv_sec - ru0->ru.ru_stime.tv_sec),
- (int) (ru1.ru.ru_stime.tv_usec - ru0->ru.ru_stime.tv_usec) / 10000,
+ (int) (ru1.ru.ru_stime.tv_usec - ru0->ru.ru_stime.tv_usec) / 10000,
(int) (ru1.ru.ru_utime.tv_sec - ru0->ru.ru_utime.tv_sec),
- (int) (ru1.ru.ru_utime.tv_usec - ru0->ru.ru_utime.tv_usec) / 10000,
+ (int) (ru1.ru.ru_utime.tv_usec - ru0->ru.ru_utime.tv_usec) / 10000,
(int) (ru1.tv.tv_sec - ru0->tv.tv_sec),
(int) (ru1.tv.tv_usec - ru0->tv.tv_usec) / 10000);