*
*
* IDENTIFICATION
- * $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.168 2000/10/16 17:08:05 momjian Exp $
+ * $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.169 2000/10/22 19:49:43 tgl Exp $
*
*-------------------------------------------------------------------------
tuple.t_data->t_cmin))
{
tuple.t_data->t_infomask |= HEAP_XMIN_INVALID;
+ pgchanged = true;
tupgone = true;
}
else
tuple.t_data->t_cmin))
{
tuple.t_data->t_infomask |= HEAP_XMIN_INVALID;
+ pgchanged = true;
tupgone = true;
}
else
{
if (tuple.t_data->t_infomask & HEAP_MARKED_FOR_UPDATE)
{
- pgchanged = true;
tuple.t_data->t_infomask |= HEAP_XMAX_INVALID;
+ tuple.t_data->t_infomask &=
+ ~(HEAP_XMAX_COMMITTED | HEAP_MARKED_FOR_UPDATE);
+ pgchanged = true;
}
else
tupgone = true;
if (tuple.t_data->t_infomask & HEAP_MARKED_FOR_UPDATE)
{
tuple.t_data->t_infomask |= HEAP_XMAX_INVALID;
+ tuple.t_data->t_infomask &=
+ ~(HEAP_XMAX_COMMITTED | HEAP_MARKED_FOR_UPDATE);
pgchanged = true;
}
else
* from crashed process. - vadim 06/02/97
*/
tuple.t_data->t_infomask |= HEAP_XMAX_INVALID;
+ tuple.t_data->t_infomask &=
+ ~(HEAP_XMAX_COMMITTED | HEAP_MARKED_FOR_UPDATE);
pgchanged = true;
}
else
{
ItemId lpp;
+ /*
+ * Here we are building a temporary copy of the page with
+ * dead tuples removed. Below we will apply
+ * PageRepairFragmentation to the copy, so that we can
+ * determine how much space will be available after
+ * removal of dead tuples. But note we are NOT changing
+ * the real page yet...
+ */
if (tempPage == (Page) NULL)
{
Size pageSize;
memmove(tempPage, page, pageSize);
}
+ /* mark it unused on the temp page */
lpp = &(((PageHeader) tempPage)->pd_linp[offnum - 1]);
-
- /* mark it unused */
lpp->lp_flags &= ~LP_USED;
vacpage->offsets[vacpage->offsets_free++] = offnum;
tups_vacuumed++;
-
}
else
{
tuple.t_datamcxt = NULL;
tuple.t_data = (HeapTupleHeader) PageGetItem(Cpage, Citemid);
tuple_len = tuple.t_len = ItemIdGetLength(Citemid);
- /* Get page to move in */
+
+ /*
+ * make a copy of the source tuple, and then mark the
+ * source tuple MOVED_OFF.
+ */
+ heap_copytuple_with_tuple(&tuple, &newtup);
+
+ RelationInvalidateHeapTuple(onerel, &tuple);
+
+ TransactionIdStore(myXID, (TransactionId *) &(tuple.t_data->t_cmin));
+ tuple.t_data->t_infomask &=
+ ~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | HEAP_MOVED_IN);
+ tuple.t_data->t_infomask |= HEAP_MOVED_OFF;
+
+ /* Get page to move to */
cur_buffer = ReadBuffer(onerel, destvacpage->blkno);
/*
* We should LockBuffer(cur_buffer) but don't, at the
- * moment. If you'll do LockBuffer then UNLOCK it
- * before index_insert: unique btree-s call heap_fetch
- * to get t_infomask of inserted heap tuple !!!
+ * moment. This should be safe enough, since we have
+ * exclusive lock on the whole relation.
+ * If you'll do LockBuffer then UNLOCK it before
+ * index_insert: unique btree-s call heap_fetch to get
+ * t_infomask of inserted heap tuple !!!
*/
ToPage = BufferGetPage(cur_buffer);
/*
* If this page was not used before - clean it.
*
+ * NOTE: a nasty bug used to lurk here. It is possible
+ * for the source and destination pages to be the same
+ * (since this tuple-chain member can be on a page lower
+ * than the one we're currently processing in the outer
+ * loop). If that's true, then after vacuum_page() the
+ * source tuple will have been moved, and tuple.t_data
+ * will be pointing at garbage. Therefore we must do
+ * everything that uses tuple.t_data BEFORE this step!!
+ *
* This path is different from the other callers of
* vacuum_page, because we have already incremented the
* vacpage's offsets_used field to account for the
vacuum_page(ToPage, destvacpage);
destvacpage->offsets_used = sv_offsets_used;
}
- heap_copytuple_with_tuple(&tuple, &newtup);
- RelationInvalidateHeapTuple(onerel, &tuple);
+
+ /*
+ * Update the state of the copied tuple, and store it
+ * on the destination page.
+ */
TransactionIdStore(myXID, (TransactionId *) &(newtup.t_data->t_cmin));
newtup.t_data->t_infomask &=
~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | HEAP_MOVED_OFF);
last_move_dest_block = destvacpage->blkno;
/*
- * Set t_ctid pointing to itself for last tuple in
- * chain and to next tuple in chain otherwise.
+ * Set new tuple's t_ctid pointing to itself for last
+ * tuple in chain, and to next tuple in chain otherwise.
*/
if (!ItemPointerIsValid(&Ctid))
newtup.t_data->t_ctid = newtup.t_self;
newtup.t_data->t_ctid = Ctid;
Ctid = newtup.t_self;
- TransactionIdStore(myXID, (TransactionId *) &(tuple.t_data->t_cmin));
- tuple.t_data->t_infomask &=
- ~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | HEAP_MOVED_IN);
- tuple.t_data->t_infomask |= HEAP_MOVED_OFF;
-
num_moved++;
/*
}
}
WriteBuffer(cur_buffer);
- if (Cbuf == buf)
- ReleaseBuffer(Cbuf);
- else
- WriteBuffer(Cbuf);
+ WriteBuffer(Cbuf);
}
cur_buffer = InvalidBuffer;
pfree(vtmove);