1 /*-------------------------------------------------------------------------
4 * Support functions to rewrite tables.
6 * These functions provide a facility to completely rewrite a heap, while
7 * preserving visibility information and update chains.
11 * The caller is responsible for creating the new heap, all catalog
12 * changes, supplying the tuples to be written to the new heap, and
13 * rebuilding indexes. The caller must hold AccessExclusiveLock on the
14 * target table, because we assume no one else is writing into it.
16 * To use the facility:
19 * while (fetch next tuple)
22 * rewrite_heap_dead_tuple
25 * // do any transformations here if required
31 * The contents of the new relation shouldn't be relied on until after
32 * end_heap_rewrite is called.
37 * This would be a fairly trivial affair, except that we need to maintain
38 * the ctid chains that link versions of an updated tuple together.
39 * Since the newly stored tuples will have tids different from the original
40 * ones, if we just copied t_ctid fields to the new table the links would
41 * be wrong. When we are required to copy a (presumably recently-dead or
42 * delete-in-progress) tuple whose ctid doesn't point to itself, we have
43 * to substitute the correct ctid instead.
45 * For each ctid reference from A -> B, we might encounter either A first
46 * or B first. (Note that a tuple in the middle of a chain is both A and B
47 * of different pairs.)
49 * If we encounter A first, we'll store the tuple in the unresolved_tups
50 * hash table. When we later encounter B, we remove A from the hash table,
51 * fix the ctid to point to the new location of B, and insert both A and B
54 * If we encounter B first, we can insert B to the new heap right away.
55 * We then add an entry to the old_new_tid_map hash table showing B's
56 * original tid (in the old heap) and new tid (in the new heap).
57 * When we later encounter A, we get the new location of B from the table,
58 * and can write A immediately with the correct ctid.
60 * Entries in the hash tables can be removed as soon as the later tuple
61 * is encountered. That helps to keep the memory usage down. At the end,
62 * both tables are usually empty; we should have encountered both A and B
63 * of each pair. However, it's possible for A to be RECENTLY_DEAD and B
64 * entirely DEAD according to HeapTupleSatisfiesVacuum, because the test
65 * for deadness using OldestXmin is not exact. In such a case we might
66 * encounter B first, and skip it, and find A later. Then A would be added
67 * to unresolved_tups, and stay there until end of the rewrite. Since
68 * this case is very unusual, we don't worry about the memory usage.
70 * Using in-memory hash tables means that we use some memory for each live
71 * update chain in the table, from the time we find one end of the
72 * reference until we find the other end. That shouldn't be a problem in
73 * practice, but if you do something like an UPDATE without a where-clause
74 * on a large table, and then run CLUSTER in the same transaction, you
75 * could run out of memory. It doesn't seem worthwhile to add support for
76 * spill-to-disk, as there shouldn't be that many RECENTLY_DEAD tuples in a
77 * table under normal circumstances. Furthermore, in the typical scenario
78 * of CLUSTERing on an unchanging key column, we'll see all the versions
79 * of a given tuple together anyway, and so the peak memory usage is only
80 * proportional to the number of RECENTLY_DEAD versions of a single row, not
81 * in the whole table. Note that if we do fail halfway through a CLUSTER,
82 * the old table is still valid, so failure is not catastrophic.
84 * We can't use the normal heap_insert function to insert into the new
85 * heap, because heap_insert overwrites the visibility information.
86 * We use a special-purpose raw_heap_insert function instead, which
87 * is optimized for bulk inserting a lot of tuples, knowing that we have
88 * exclusive access to the heap. raw_heap_insert builds new pages in
89 * local storage. When a page is full, or at the end of the process,
90 * we insert it to WAL as a single record and then write it to disk
91 * directly through smgr. Note, however, that any data sent to the new
92 * heap's TOAST table will go through the normal bufmgr.
95 * Portions Copyright (c) 1996-2013, PostgreSQL Global Development Group
96 * Portions Copyright (c) 1994-5, Regents of the University of California
99 * src/backend/access/heap/rewriteheap.c
101 *-------------------------------------------------------------------------
103 #include "postgres.h"
105 #include "access/heapam.h"
106 #include "access/heapam_xlog.h"
107 #include "access/rewriteheap.h"
108 #include "access/transam.h"
109 #include "access/tuptoaster.h"
110 #include "storage/bufmgr.h"
111 #include "storage/smgr.h"
112 #include "utils/memutils.h"
113 #include "utils/rel.h"
114 #include "utils/tqual.h"
118 * State associated with a rewrite operation. This is opaque to the user
119 * of the rewrite facility.
121 typedef struct RewriteStateData
123 Relation rs_new_rel; /* destination heap */
124 Page rs_buffer; /* page currently being built */
125 BlockNumber rs_blockno; /* block where page will go */
126 bool rs_buffer_valid; /* T if any tuples in buffer */
127 bool rs_use_wal; /* must we WAL-log inserts? */
128 TransactionId rs_oldest_xmin; /* oldest xmin used by caller to
129 * determine tuple visibility */
130 TransactionId rs_freeze_xid;/* Xid that will be used as freeze cutoff
132 MultiXactId rs_cutoff_multi;/* MultiXactId that will be used as cutoff
133 * point for multixacts */
134 MemoryContext rs_cxt; /* for hash tables and entries and tuples in
136 HTAB *rs_unresolved_tups; /* unmatched A tuples */
137 HTAB *rs_old_new_tid_map; /* unmatched B tuples */
141 * The lookup keys for the hash tables are tuple TID and xmin (we must check
142 * both to avoid false matches from dead tuples). Beware that there is
143 * probably some padding space in this struct; it must be zeroed out for
144 * correct hashtable operation.
148 TransactionId xmin; /* tuple xmin */
149 ItemPointerData tid; /* tuple location in old heap */
153 * Entry structures for the hash tables
157 TidHashKey key; /* expected xmin/old location of B tuple */
158 ItemPointerData old_tid; /* A's location in the old heap */
159 HeapTuple tuple; /* A's tuple contents */
162 typedef UnresolvedTupData *UnresolvedTup;
166 TidHashKey key; /* actual xmin/old location of B tuple */
167 ItemPointerData new_tid; /* where we put it in the new heap */
168 } OldToNewMappingData;
170 typedef OldToNewMappingData *OldToNewMapping;
173 /* prototypes for internal functions */
174 static void raw_heap_insert(RewriteState state, HeapTuple tup);
178 * Begin a rewrite of a table
180 * new_heap new, locked heap relation to insert tuples to
181 * oldest_xmin xid used by the caller to determine which tuples are dead
182 * freeze_xid xid before which tuples will be frozen
183 * min_multi multixact before which multis will be removed
184 * use_wal should the inserts to the new heap be WAL-logged?
186 * Returns an opaque RewriteState, allocated in current memory context,
187 * to be used in subsequent calls to the other functions.
190 begin_heap_rewrite(Relation new_heap, TransactionId oldest_xmin,
191 TransactionId freeze_xid, MultiXactId cutoff_multi,
195 MemoryContext rw_cxt;
196 MemoryContext old_cxt;
200 * To ease cleanup, make a separate context that will contain the
201 * RewriteState struct itself plus all subsidiary data.
203 rw_cxt = AllocSetContextCreate(CurrentMemoryContext,
205 ALLOCSET_DEFAULT_MINSIZE,
206 ALLOCSET_DEFAULT_INITSIZE,
207 ALLOCSET_DEFAULT_MAXSIZE);
208 old_cxt = MemoryContextSwitchTo(rw_cxt);
210 /* Create and fill in the state struct */
211 state = palloc0(sizeof(RewriteStateData));
213 state->rs_new_rel = new_heap;
214 state->rs_buffer = (Page) palloc(BLCKSZ);
215 /* new_heap needn't be empty, just locked */
216 state->rs_blockno = RelationGetNumberOfBlocks(new_heap);
217 state->rs_buffer_valid = false;
218 state->rs_use_wal = use_wal;
219 state->rs_oldest_xmin = oldest_xmin;
220 state->rs_freeze_xid = freeze_xid;
221 state->rs_cutoff_multi = cutoff_multi;
222 state->rs_cxt = rw_cxt;
224 /* Initialize hash tables used to track update chains */
225 memset(&hash_ctl, 0, sizeof(hash_ctl));
226 hash_ctl.keysize = sizeof(TidHashKey);
227 hash_ctl.entrysize = sizeof(UnresolvedTupData);
228 hash_ctl.hcxt = state->rs_cxt;
229 hash_ctl.hash = tag_hash;
231 state->rs_unresolved_tups =
232 hash_create("Rewrite / Unresolved ctids",
233 128, /* arbitrary initial size */
235 HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
237 hash_ctl.entrysize = sizeof(OldToNewMappingData);
239 state->rs_old_new_tid_map =
240 hash_create("Rewrite / Old to new tid map",
241 128, /* arbitrary initial size */
243 HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
245 MemoryContextSwitchTo(old_cxt);
253 * state and any other resources are freed.
256 end_heap_rewrite(RewriteState state)
258 HASH_SEQ_STATUS seq_status;
259 UnresolvedTup unresolved;
262 * Write any remaining tuples in the UnresolvedTups table. If we have any
263 * left, they should in fact be dead, but let's err on the safe side.
265 hash_seq_init(&seq_status, state->rs_unresolved_tups);
267 while ((unresolved = hash_seq_search(&seq_status)) != NULL)
269 ItemPointerSetInvalid(&unresolved->tuple->t_data->t_ctid);
270 raw_heap_insert(state, unresolved->tuple);
273 /* Write the last page, if any */
274 if (state->rs_buffer_valid)
276 if (state->rs_use_wal)
277 log_newpage(&state->rs_new_rel->rd_node,
282 RelationOpenSmgr(state->rs_new_rel);
284 PageSetChecksumInplace(state->rs_buffer, state->rs_blockno);
286 smgrextend(state->rs_new_rel->rd_smgr, MAIN_FORKNUM, state->rs_blockno,
287 (char *) state->rs_buffer, true);
291 * If the rel is WAL-logged, must fsync before commit. We use heap_sync
292 * to ensure that the toast table gets fsync'd too.
294 * It's obvious that we must do this when not WAL-logging. It's less
295 * obvious that we have to do it even if we did WAL-log the pages. The
296 * reason is the same as in tablecmds.c's copy_relation_data(): we're
297 * writing data that's not in shared buffers, and so a CHECKPOINT
298 * occurring during the rewriteheap operation won't have fsync'd data we
299 * wrote before the checkpoint.
301 if (RelationNeedsWAL(state->rs_new_rel))
302 heap_sync(state->rs_new_rel);
304 /* Deleting the context frees everything */
305 MemoryContextDelete(state->rs_cxt);
309 * Add a tuple to the new heap.
311 * Visibility information is copied from the original tuple, except that
312 * we "freeze" very-old tuples. Note that since we scribble on new_tuple,
313 * it had better be temp storage not a pointer to the original tuple.
315 * state opaque state as returned by begin_heap_rewrite
316 * old_tuple original tuple in the old heap
317 * new_tuple new, rewritten tuple to be inserted to new heap
320 rewrite_heap_tuple(RewriteState state,
321 HeapTuple old_tuple, HeapTuple new_tuple)
323 MemoryContext old_cxt;
324 ItemPointerData old_tid;
329 old_cxt = MemoryContextSwitchTo(state->rs_cxt);
332 * Copy the original tuple's visibility information into new_tuple.
334 * XXX we might later need to copy some t_infomask2 bits, too? Right now,
335 * we intentionally clear the HOT status bits.
337 memcpy(&new_tuple->t_data->t_choice.t_heap,
338 &old_tuple->t_data->t_choice.t_heap,
339 sizeof(HeapTupleFields));
341 new_tuple->t_data->t_infomask &= ~HEAP_XACT_MASK;
342 new_tuple->t_data->t_infomask2 &= ~HEAP2_XACT_MASK;
343 new_tuple->t_data->t_infomask |=
344 old_tuple->t_data->t_infomask & HEAP_XACT_MASK;
347 * While we have our hands on the tuple, we may as well freeze any
348 * eligible xmin or xmax, so that future VACUUM effort can be saved.
350 heap_freeze_tuple(new_tuple->t_data, state->rs_freeze_xid,
351 state->rs_cutoff_multi);
354 * Invalid ctid means that ctid should point to the tuple itself. We'll
355 * override it later if the tuple is part of an update chain.
357 ItemPointerSetInvalid(&new_tuple->t_data->t_ctid);
360 * If the tuple has been updated, check the old-to-new mapping hash table.
362 if (!((old_tuple->t_data->t_infomask & HEAP_XMAX_INVALID) ||
363 HeapTupleHeaderIsOnlyLocked(old_tuple->t_data)) &&
364 !(ItemPointerEquals(&(old_tuple->t_self),
365 &(old_tuple->t_data->t_ctid))))
367 OldToNewMapping mapping;
369 memset(&hashkey, 0, sizeof(hashkey));
370 hashkey.xmin = HeapTupleHeaderGetUpdateXid(old_tuple->t_data);
371 hashkey.tid = old_tuple->t_data->t_ctid;
373 mapping = (OldToNewMapping)
374 hash_search(state->rs_old_new_tid_map, &hashkey,
380 * We've already copied the tuple that t_ctid points to, so we can
381 * set the ctid of this tuple to point to the new location, and
382 * insert it right away.
384 new_tuple->t_data->t_ctid = mapping->new_tid;
386 /* We don't need the mapping entry anymore */
387 hash_search(state->rs_old_new_tid_map, &hashkey,
388 HASH_REMOVE, &found);
394 * We haven't seen the tuple t_ctid points to yet. Stash this
395 * tuple into unresolved_tups to be written later.
397 UnresolvedTup unresolved;
399 unresolved = hash_search(state->rs_unresolved_tups, &hashkey,
403 unresolved->old_tid = old_tuple->t_self;
404 unresolved->tuple = heap_copytuple(new_tuple);
407 * We can't do anything more now, since we don't know where the
408 * tuple will be written.
410 MemoryContextSwitchTo(old_cxt);
416 * Now we will write the tuple, and then check to see if it is the B tuple
417 * in any new or known pair. When we resolve a known pair, we will be
418 * able to write that pair's A tuple, and then we have to check if it
419 * resolves some other pair. Hence, we need a loop here.
421 old_tid = old_tuple->t_self;
426 ItemPointerData new_tid;
428 /* Insert the tuple and find out where it's put in new_heap */
429 raw_heap_insert(state, new_tuple);
430 new_tid = new_tuple->t_self;
433 * If the tuple is the updated version of a row, and the prior version
434 * wouldn't be DEAD yet, then we need to either resolve the prior
435 * version (if it's waiting in rs_unresolved_tups), or make an entry
436 * in rs_old_new_tid_map (so we can resolve it when we do see it). The
437 * previous tuple's xmax would equal this one's xmin, so it's
438 * RECENTLY_DEAD if and only if the xmin is not before OldestXmin.
440 if ((new_tuple->t_data->t_infomask & HEAP_UPDATED) &&
441 !TransactionIdPrecedes(HeapTupleHeaderGetXmin(new_tuple->t_data),
442 state->rs_oldest_xmin))
445 * Okay, this is B in an update pair. See if we've seen A.
447 UnresolvedTup unresolved;
449 memset(&hashkey, 0, sizeof(hashkey));
450 hashkey.xmin = HeapTupleHeaderGetXmin(new_tuple->t_data);
451 hashkey.tid = old_tid;
453 unresolved = hash_search(state->rs_unresolved_tups, &hashkey,
456 if (unresolved != NULL)
459 * We have seen and memorized the previous tuple already. Now
460 * that we know where we inserted the tuple its t_ctid points
461 * to, fix its t_ctid and insert it to the new heap.
464 heap_freetuple(new_tuple);
465 new_tuple = unresolved->tuple;
467 old_tid = unresolved->old_tid;
468 new_tuple->t_data->t_ctid = new_tid;
471 * We don't need the hash entry anymore, but don't free its
474 hash_search(state->rs_unresolved_tups, &hashkey,
475 HASH_REMOVE, &found);
478 /* loop back to insert the previous tuple in the chain */
484 * Remember the new tid of this tuple. We'll use it to set the
485 * ctid when we find the previous tuple in the chain.
487 OldToNewMapping mapping;
489 mapping = hash_search(state->rs_old_new_tid_map, &hashkey,
493 mapping->new_tid = new_tid;
497 /* Done with this (chain of) tuples, for now */
499 heap_freetuple(new_tuple);
503 MemoryContextSwitchTo(old_cxt);
507 * Register a dead tuple with an ongoing rewrite. Dead tuples are not
508 * copied to the new table, but we still make note of them so that we
509 * can release some resources earlier.
511 * Returns true if a tuple was removed from the unresolved_tups table.
512 * This indicates that that tuple, previously thought to be "recently dead",
513 * is now known really dead and won't be written to the output.
516 rewrite_heap_dead_tuple(RewriteState state, HeapTuple old_tuple)
519 * If we have already seen an earlier tuple in the update chain that
520 * points to this tuple, let's forget about that earlier tuple. It's in
521 * fact dead as well, our simple xmax < OldestXmin test in
522 * HeapTupleSatisfiesVacuum just wasn't enough to detect it. It happens
523 * when xmin of a tuple is greater than xmax, which sounds
524 * counter-intuitive but is perfectly valid.
526 * We don't bother to try to detect the situation the other way round,
527 * when we encounter the dead tuple first and then the recently dead one
528 * that points to it. If that happens, we'll have some unmatched entries
529 * in the UnresolvedTups hash table at the end. That can happen anyway,
530 * because a vacuum might have removed the dead tuple in the chain before
533 UnresolvedTup unresolved;
537 memset(&hashkey, 0, sizeof(hashkey));
538 hashkey.xmin = HeapTupleHeaderGetXmin(old_tuple->t_data);
539 hashkey.tid = old_tuple->t_self;
541 unresolved = hash_search(state->rs_unresolved_tups, &hashkey,
544 if (unresolved != NULL)
546 /* Need to free the contained tuple as well as the hashtable entry */
547 heap_freetuple(unresolved->tuple);
548 hash_search(state->rs_unresolved_tups, &hashkey,
549 HASH_REMOVE, &found);
558 * Insert a tuple to the new relation. This has to track heap_insert
559 * and its subsidiary functions!
561 * t_self of the tuple is set to the new TID of the tuple. If t_ctid of the
562 * tuple is invalid on entry, it's replaced with the new TID as well (in
563 * the inserted data only, not in the caller's copy).
566 raw_heap_insert(RewriteState state, HeapTuple tup)
568 Page page = state->rs_buffer;
576 * If the new tuple is too big for storage or contains already toasted
577 * out-of-line attributes from some other relation, invoke the toaster.
579 * Note: below this point, heaptup is the data we actually intend to store
580 * into the relation; tup is the caller's original untoasted data.
582 if (state->rs_new_rel->rd_rel->relkind == RELKIND_TOASTVALUE)
584 /* toast table entries should never be recursively toasted */
585 Assert(!HeapTupleHasExternal(tup));
588 else if (HeapTupleHasExternal(tup) || tup->t_len > TOAST_TUPLE_THRESHOLD)
589 heaptup = toast_insert_or_update(state->rs_new_rel, tup, NULL,
590 HEAP_INSERT_SKIP_FSM |
592 0 : HEAP_INSERT_SKIP_WAL));
596 len = MAXALIGN(heaptup->t_len); /* be conservative */
599 * If we're gonna fail for oversize tuple, do it right away
601 if (len > MaxHeapTupleSize)
603 (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
604 errmsg("row is too big: size %lu, maximum size %lu",
606 (unsigned long) MaxHeapTupleSize)));
608 /* Compute desired extra freespace due to fillfactor option */
609 saveFreeSpace = RelationGetTargetPageFreeSpace(state->rs_new_rel,
610 HEAP_DEFAULT_FILLFACTOR);
612 /* Now we can check to see if there's enough free space already. */
613 if (state->rs_buffer_valid)
615 pageFreeSpace = PageGetHeapFreeSpace(page);
617 if (len + saveFreeSpace > pageFreeSpace)
619 /* Doesn't fit, so write out the existing page */
622 if (state->rs_use_wal)
623 log_newpage(&state->rs_new_rel->rd_node,
630 * Now write the page. We say isTemp = true even if it's not a
631 * temp table, because there's no need for smgr to schedule an
632 * fsync for this write; we'll do it ourselves in
635 RelationOpenSmgr(state->rs_new_rel);
637 PageSetChecksumInplace(page, state->rs_blockno);
639 smgrextend(state->rs_new_rel->rd_smgr, MAIN_FORKNUM,
640 state->rs_blockno, (char *) page, true);
643 state->rs_buffer_valid = false;
647 if (!state->rs_buffer_valid)
649 /* Initialize a new empty page */
650 PageInit(page, BLCKSZ, 0);
651 state->rs_buffer_valid = true;
654 /* And now we can insert the tuple into the page */
655 newoff = PageAddItem(page, (Item) heaptup->t_data, heaptup->t_len,
656 InvalidOffsetNumber, false, true);
657 if (newoff == InvalidOffsetNumber)
658 elog(ERROR, "failed to add tuple");
660 /* Update caller's t_self to the actual position where it was stored */
661 ItemPointerSet(&(tup->t_self), state->rs_blockno, newoff);
664 * Insert the correct position into CTID of the stored tuple, too, if the
665 * caller didn't supply a valid CTID.
667 if (!ItemPointerIsValid(&tup->t_data->t_ctid))
670 HeapTupleHeader onpage_tup;
672 newitemid = PageGetItemId(page, newoff);
673 onpage_tup = (HeapTupleHeader) PageGetItem(page, newitemid);
675 onpage_tup->t_ctid = tup->t_self;
678 /* If heaptup is a private copy, release it. */
680 heap_freetuple(heaptup);