]> granicus.if.org Git - postgresql/commitdiff
Patch VACUUM problem with moving chain of update tuples when source
authorTom Lane <tgl@sss.pgh.pa.us>
Sun, 22 Oct 2000 19:49:43 +0000 (19:49 +0000)
committerTom Lane <tgl@sss.pgh.pa.us>
Sun, 22 Oct 2000 19:49:43 +0000 (19:49 +0000)
and destination of a tuple lie on the same page.
(Previously fixed in REL7_0 branch, now apply to current.)

src/backend/commands/vacuum.c

index c7496c6c46c531652a27dad872dc176db93fb70e..9a790aae2e3195310238926947cb0e83d7c6d0d7 100644 (file)
@@ -8,7 +8,7 @@
  *
  *
  * IDENTIFICATION
- *       $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.168 2000/10/16 17:08:05 momjian Exp $
+ *       $Header: /cvsroot/pgsql/src/backend/commands/vacuum.c,v 1.169 2000/10/22 19:49:43 tgl Exp $
  *
 
  *-------------------------------------------------------------------------
@@ -669,6 +669,7 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
                                                                                           tuple.t_data->t_cmin))
                                        {
                                                tuple.t_data->t_infomask |= HEAP_XMIN_INVALID;
+                                               pgchanged = true;
                                                tupgone = true;
                                        }
                                        else
@@ -683,6 +684,7 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
                                                                                                tuple.t_data->t_cmin))
                                        {
                                                tuple.t_data->t_infomask |= HEAP_XMIN_INVALID;
+                                               pgchanged = true;
                                                tupgone = true;
                                        }
                                        else
@@ -730,8 +732,10 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
                                {
                                        if (tuple.t_data->t_infomask & HEAP_MARKED_FOR_UPDATE)
                                        {
-                                               pgchanged = true;
                                                tuple.t_data->t_infomask |= HEAP_XMAX_INVALID;
+                                               tuple.t_data->t_infomask &=
+                                                       ~(HEAP_XMAX_COMMITTED | HEAP_MARKED_FOR_UPDATE);
+                                               pgchanged = true;
                                        }
                                        else
                                                tupgone = true;
@@ -746,6 +750,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
                                        if (tuple.t_data->t_infomask & HEAP_MARKED_FOR_UPDATE)
                                        {
                                                tuple.t_data->t_infomask |= HEAP_XMAX_INVALID;
+                                               tuple.t_data->t_infomask &=
+                                                       ~(HEAP_XMAX_COMMITTED | HEAP_MARKED_FOR_UPDATE);
                                                pgchanged = true;
                                        }
                                        else
@@ -759,6 +765,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
                                         * from crashed process. - vadim 06/02/97
                                         */
                                        tuple.t_data->t_infomask |= HEAP_XMAX_INVALID;
+                                       tuple.t_data->t_infomask &=
+                                               ~(HEAP_XMAX_COMMITTED | HEAP_MARKED_FOR_UPDATE);
                                        pgchanged = true;
                                }
                                else
@@ -818,6 +826,14 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
                        {
                                ItemId          lpp;
 
+                               /*
+                                * Here we are building a temporary copy of the page with
+                                * dead tuples removed.  Below we will apply
+                                * PageRepairFragmentation to the copy, so that we can
+                                * determine how much space will be available after
+                                * removal of dead tuples.  But note we are NOT changing
+                                * the real page yet...
+                                */
                                if (tempPage == (Page) NULL)
                                {
                                        Size            pageSize;
@@ -827,14 +843,12 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
                                        memmove(tempPage, page, pageSize);
                                }
 
+                               /* mark it unused on the temp page */
                                lpp = &(((PageHeader) tempPage)->pd_linp[offnum - 1]);
-
-                               /* mark it unused */
                                lpp->lp_flags &= ~LP_USED;
 
                                vacpage->offsets[vacpage->offsets_free++] = offnum;
                                tups_vacuumed++;
-
                        }
                        else
                        {
@@ -1397,20 +1411,45 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
                                        tuple.t_datamcxt = NULL;
                                        tuple.t_data = (HeapTupleHeader) PageGetItem(Cpage, Citemid);
                                        tuple_len = tuple.t_len = ItemIdGetLength(Citemid);
-                                       /* Get page to move in */
+
+                                       /*
+                                        * make a copy of the source tuple, and then mark the
+                                        * source tuple MOVED_OFF.
+                                        */
+                                       heap_copytuple_with_tuple(&tuple, &newtup);
+
+                                       RelationInvalidateHeapTuple(onerel, &tuple);
+
+                                       TransactionIdStore(myXID, (TransactionId *) &(tuple.t_data->t_cmin));
+                                       tuple.t_data->t_infomask &=
+                                               ~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | HEAP_MOVED_IN);
+                                       tuple.t_data->t_infomask |= HEAP_MOVED_OFF;
+
+                                       /* Get page to move to */
                                        cur_buffer = ReadBuffer(onerel, destvacpage->blkno);
 
                                        /*
                                         * We should LockBuffer(cur_buffer) but don't, at the
-                                        * moment. If you'll do LockBuffer then UNLOCK it
-                                        * before index_insert: unique btree-s call heap_fetch
-                                        * to get t_infomask of inserted heap tuple !!!
+                                        * moment.  This should be safe enough, since we have
+                                        * exclusive lock on the whole relation.
+                                        * If you'll do LockBuffer then UNLOCK it before
+                                        * index_insert: unique btree-s call heap_fetch to get
+                                        * t_infomask of inserted heap tuple !!!
                                         */
                                        ToPage = BufferGetPage(cur_buffer);
 
                                        /*
                                         * If this page was not used before - clean it.
                                         *
+                                        * NOTE: a nasty bug used to lurk here.  It is possible
+                                        * for the source and destination pages to be the same
+                                        * (since this tuple-chain member can be on a page lower
+                                        * than the one we're currently processing in the outer
+                                        * loop).  If that's true, then after vacuum_page() the
+                                        * source tuple will have been moved, and tuple.t_data
+                                        * will be pointing at garbage.  Therefore we must do
+                                        * everything that uses tuple.t_data BEFORE this step!!
+                                        *
                                         * This path is different from the other callers of
                                         * vacuum_page, because we have already incremented the
                                         * vacpage's offsets_used field to account for the
@@ -1428,8 +1467,11 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
                                                vacuum_page(ToPage, destvacpage);
                                                destvacpage->offsets_used = sv_offsets_used;
                                        }
-                                       heap_copytuple_with_tuple(&tuple, &newtup);
-                                       RelationInvalidateHeapTuple(onerel, &tuple);
+
+                                       /*
+                                        * Update the state of the copied tuple, and store it
+                                        * on the destination page.
+                                        */
                                        TransactionIdStore(myXID, (TransactionId *) &(newtup.t_data->t_cmin));
                                        newtup.t_data->t_infomask &=
                                                ~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | HEAP_MOVED_OFF);
@@ -1450,8 +1492,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
                                                last_move_dest_block = destvacpage->blkno;
 
                                        /*
-                                        * Set t_ctid pointing to itself for last tuple in
-                                        * chain and to next tuple in chain otherwise.
+                                        * Set new tuple's t_ctid pointing to itself for last
+                                        * tuple in chain, and to next tuple in chain otherwise.
                                         */
                                        if (!ItemPointerIsValid(&Ctid))
                                                newtup.t_data->t_ctid = newtup.t_self;
@@ -1459,11 +1501,6 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
                                                newtup.t_data->t_ctid = Ctid;
                                        Ctid = newtup.t_self;
 
-                                       TransactionIdStore(myXID, (TransactionId *) &(tuple.t_data->t_cmin));
-                                       tuple.t_data->t_infomask &=
-                                               ~(HEAP_XMIN_COMMITTED | HEAP_XMIN_INVALID | HEAP_MOVED_IN);
-                                       tuple.t_data->t_infomask |= HEAP_MOVED_OFF;
-
                                        num_moved++;
 
                                        /*
@@ -1504,10 +1541,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
                                                }
                                        }
                                        WriteBuffer(cur_buffer);
-                                       if (Cbuf == buf)
-                                               ReleaseBuffer(Cbuf);
-                                       else
-                                               WriteBuffer(Cbuf);
+                                       WriteBuffer(Cbuf);
                                }
                                cur_buffer = InvalidBuffer;
                                pfree(vtmove);