1 /*-------------------------------------------------------------------------
4 * WAL replay logic for hash index.
7 * Portions Copyright (c) 1996-2017, PostgreSQL Global Development Group
8 * Portions Copyright (c) 1994, Regents of the University of California
11 * src/backend/access/hash/hash_xlog.c
13 *-------------------------------------------------------------------------
17 #include "access/hash.h"
18 #include "access/hash_xlog.h"
19 #include "access/xlogutils.h"
22 * replay a hash index meta page
25 hash_xlog_init_meta_page(XLogReaderState *record)
27 XLogRecPtr lsn = record->EndRecPtr;
31 xl_hash_init_meta_page *xlrec = (xl_hash_init_meta_page *) XLogRecGetData(record);
33 /* create the index' metapage */
34 metabuf = XLogInitBufferForRedo(record, 0);
35 Assert(BufferIsValid(metabuf));
36 _hash_init_metabuffer(metabuf, xlrec->num_tuples, xlrec->procid,
37 xlrec->ffactor, true);
38 page = (Page) BufferGetPage(metabuf);
39 PageSetLSN(page, lsn);
40 MarkBufferDirty(metabuf);
42 UnlockReleaseBuffer(metabuf);
46 * replay a hash index bitmap page
49 hash_xlog_init_bitmap_page(XLogReaderState *record)
51 XLogRecPtr lsn = record->EndRecPtr;
58 xl_hash_init_bitmap_page *xlrec = (xl_hash_init_bitmap_page *) XLogRecGetData(record);
61 * Initialize bitmap page
63 bitmapbuf = XLogInitBufferForRedo(record, 0);
64 _hash_initbitmapbuffer(bitmapbuf, xlrec->bmsize, true);
65 PageSetLSN(BufferGetPage(bitmapbuf), lsn);
66 MarkBufferDirty(bitmapbuf);
67 UnlockReleaseBuffer(bitmapbuf);
69 /* add the new bitmap page to the metapage's list of bitmaps */
70 if (XLogReadBufferForRedo(record, 1, &metabuf) == BLK_NEEDS_REDO)
73 * Note: in normal operation, we'd update the metapage while still
74 * holding lock on the bitmap page. But during replay it's not
75 * necessary to hold that lock, since nobody can see it yet; the
76 * creating transaction hasn't yet committed.
78 page = BufferGetPage(metabuf);
79 metap = HashPageGetMeta(page);
81 num_buckets = metap->hashm_maxbucket + 1;
82 metap->hashm_mapp[metap->hashm_nmaps] = num_buckets + 1;
85 PageSetLSN(page, lsn);
86 MarkBufferDirty(metabuf);
88 if (BufferIsValid(metabuf))
89 UnlockReleaseBuffer(metabuf);
93 * replay a hash index insert without split
96 hash_xlog_insert(XLogReaderState *record)
99 XLogRecPtr lsn = record->EndRecPtr;
100 xl_hash_insert *xlrec = (xl_hash_insert *) XLogRecGetData(record);
104 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
107 char *datapos = XLogRecGetBlockData(record, 0, &datalen);
109 page = BufferGetPage(buffer);
111 if (PageAddItem(page, (Item) datapos, datalen, xlrec->offnum,
112 false, false) == InvalidOffsetNumber)
113 elog(PANIC, "hash_xlog_insert: failed to add item");
115 PageSetLSN(page, lsn);
116 MarkBufferDirty(buffer);
118 if (BufferIsValid(buffer))
119 UnlockReleaseBuffer(buffer);
121 if (XLogReadBufferForRedo(record, 1, &buffer) == BLK_NEEDS_REDO)
124 * Note: in normal operation, we'd update the metapage while still
125 * holding lock on the page we inserted into. But during replay it's
126 * not necessary to hold that lock, since no other index updates can
127 * be happening concurrently.
129 page = BufferGetPage(buffer);
130 metap = HashPageGetMeta(page);
131 metap->hashm_ntuples += 1;
133 PageSetLSN(page, lsn);
134 MarkBufferDirty(buffer);
136 if (BufferIsValid(buffer))
137 UnlockReleaseBuffer(buffer);
141 * replay addition of overflow page for hash index
144 hash_xlog_add_ovfl_page(XLogReaderState *record)
146 XLogRecPtr lsn = record->EndRecPtr;
147 xl_hash_add_ovfl_page *xlrec = (xl_hash_add_ovfl_page *) XLogRecGetData(record);
152 BlockNumber rightblk;
153 BlockNumber newmapblk = InvalidBlockNumber;
155 HashPageOpaque ovflopaque;
158 Size datalen PG_USED_FOR_ASSERTS_ONLY;
159 bool new_bmpage = false;
161 XLogRecGetBlockTag(record, 0, NULL, NULL, &rightblk);
162 XLogRecGetBlockTag(record, 1, NULL, NULL, &leftblk);
164 ovflbuf = XLogInitBufferForRedo(record, 0);
165 Assert(BufferIsValid(ovflbuf));
167 data = XLogRecGetBlockData(record, 0, &datalen);
168 num_bucket = (uint32 *) data;
169 Assert(datalen == sizeof(uint32));
170 _hash_initbuf(ovflbuf, InvalidBlockNumber, *num_bucket, LH_OVERFLOW_PAGE,
172 /* update backlink */
173 ovflpage = BufferGetPage(ovflbuf);
174 ovflopaque = (HashPageOpaque) PageGetSpecialPointer(ovflpage);
175 ovflopaque->hasho_prevblkno = leftblk;
177 PageSetLSN(ovflpage, lsn);
178 MarkBufferDirty(ovflbuf);
180 if (XLogReadBufferForRedo(record, 1, &leftbuf) == BLK_NEEDS_REDO)
183 HashPageOpaque leftopaque;
185 leftpage = BufferGetPage(leftbuf);
186 leftopaque = (HashPageOpaque) PageGetSpecialPointer(leftpage);
187 leftopaque->hasho_nextblkno = rightblk;
189 PageSetLSN(leftpage, lsn);
190 MarkBufferDirty(leftbuf);
193 if (BufferIsValid(leftbuf))
194 UnlockReleaseBuffer(leftbuf);
195 UnlockReleaseBuffer(ovflbuf);
198 * Note: in normal operation, we'd update the bitmap and meta page while
199 * still holding lock on the overflow pages. But during replay it's not
200 * necessary to hold those locks, since no other index updates can be
201 * happening concurrently.
203 if (XLogRecHasBlockRef(record, 2))
207 if (XLogReadBufferForRedo(record, 2, &mapbuffer) == BLK_NEEDS_REDO)
209 Page mappage = (Page) BufferGetPage(mapbuffer);
210 uint32 *freep = NULL;
212 uint32 *bitmap_page_bit;
214 freep = HashPageGetBitmap(mappage);
216 data = XLogRecGetBlockData(record, 2, &datalen);
217 bitmap_page_bit = (uint32 *) data;
219 SETBIT(freep, *bitmap_page_bit);
221 PageSetLSN(mappage, lsn);
222 MarkBufferDirty(mapbuffer);
224 if (BufferIsValid(mapbuffer))
225 UnlockReleaseBuffer(mapbuffer);
228 if (XLogRecHasBlockRef(record, 3))
232 newmapbuf = XLogInitBufferForRedo(record, 3);
234 _hash_initbitmapbuffer(newmapbuf, xlrec->bmsize, true);
237 newmapblk = BufferGetBlockNumber(newmapbuf);
239 MarkBufferDirty(newmapbuf);
240 PageSetLSN(BufferGetPage(newmapbuf), lsn);
242 UnlockReleaseBuffer(newmapbuf);
245 if (XLogReadBufferForRedo(record, 4, &metabuf) == BLK_NEEDS_REDO)
249 uint32 *firstfree_ovflpage;
251 data = XLogRecGetBlockData(record, 4, &datalen);
252 firstfree_ovflpage = (uint32 *) data;
254 page = BufferGetPage(metabuf);
255 metap = HashPageGetMeta(page);
256 metap->hashm_firstfree = *firstfree_ovflpage;
258 if (!xlrec->bmpage_found)
260 metap->hashm_spares[metap->hashm_ovflpoint]++;
264 Assert(BlockNumberIsValid(newmapblk));
266 metap->hashm_mapp[metap->hashm_nmaps] = newmapblk;
267 metap->hashm_nmaps++;
268 metap->hashm_spares[metap->hashm_ovflpoint]++;
272 PageSetLSN(page, lsn);
273 MarkBufferDirty(metabuf);
275 if (BufferIsValid(metabuf))
276 UnlockReleaseBuffer(metabuf);
280 * replay allocation of page for split operation
283 hash_xlog_split_allocate_page(XLogReaderState *record)
285 XLogRecPtr lsn = record->EndRecPtr;
286 xl_hash_split_allocate_page *xlrec = (xl_hash_split_allocate_page *) XLogRecGetData(record);
290 Size datalen PG_USED_FOR_ASSERTS_ONLY;
292 XLogRedoAction action;
295 * To be consistent with normal operation, here we take cleanup locks on
296 * both the old and new buckets even though there can't be any concurrent
300 /* replay the record for old bucket */
301 action = XLogReadBufferForRedoExtended(record, 0, RBM_NORMAL, true, &oldbuf);
304 * Note that we still update the page even if it was restored from a full
305 * page image, because the special space is not included in the image.
307 if (action == BLK_NEEDS_REDO || action == BLK_RESTORED)
310 HashPageOpaque oldopaque;
312 oldpage = BufferGetPage(oldbuf);
313 oldopaque = (HashPageOpaque) PageGetSpecialPointer(oldpage);
315 oldopaque->hasho_flag = xlrec->old_bucket_flag;
316 oldopaque->hasho_prevblkno = xlrec->new_bucket;
318 PageSetLSN(oldpage, lsn);
319 MarkBufferDirty(oldbuf);
322 /* replay the record for new bucket */
323 newbuf = XLogInitBufferForRedo(record, 1);
324 _hash_initbuf(newbuf, xlrec->new_bucket, xlrec->new_bucket,
325 xlrec->new_bucket_flag, true);
326 if (!IsBufferCleanupOK(newbuf))
327 elog(PANIC, "hash_xlog_split_allocate_page: failed to acquire cleanup lock");
328 MarkBufferDirty(newbuf);
329 PageSetLSN(BufferGetPage(newbuf), lsn);
332 * We can release the lock on old bucket early as well but doing here to
333 * consistent with normal operation.
335 if (BufferIsValid(oldbuf))
336 UnlockReleaseBuffer(oldbuf);
337 if (BufferIsValid(newbuf))
338 UnlockReleaseBuffer(newbuf);
341 * Note: in normal operation, we'd update the meta page while still
342 * holding lock on the old and new bucket pages. But during replay it's
343 * not necessary to hold those locks, since no other bucket splits can be
344 * happening concurrently.
347 /* replay the record for metapage changes */
348 if (XLogReadBufferForRedo(record, 2, &metabuf) == BLK_NEEDS_REDO)
353 page = BufferGetPage(metabuf);
354 metap = HashPageGetMeta(page);
355 metap->hashm_maxbucket = xlrec->new_bucket;
357 data = XLogRecGetBlockData(record, 2, &datalen);
359 if (xlrec->flags & XLH_SPLIT_META_UPDATE_MASKS)
364 /* extract low and high masks. */
365 memcpy(&lowmask, data, sizeof(uint32));
366 highmask = (uint32 *) ((char *) data + sizeof(uint32));
368 /* update metapage */
369 metap->hashm_lowmask = lowmask;
370 metap->hashm_highmask = *highmask;
372 data += sizeof(uint32) * 2;
375 if (xlrec->flags & XLH_SPLIT_META_UPDATE_SPLITPOINT)
380 /* extract information of overflow pages. */
381 memcpy(&ovflpoint, data, sizeof(uint32));
382 ovflpages = (uint32 *) ((char *) data + sizeof(uint32));
384 /* update metapage */
385 metap->hashm_spares[ovflpoint] = *ovflpages;
386 metap->hashm_ovflpoint = ovflpoint;
389 MarkBufferDirty(metabuf);
390 PageSetLSN(BufferGetPage(metabuf), lsn);
393 if (BufferIsValid(metabuf))
394 UnlockReleaseBuffer(metabuf);
398 * replay of split operation
401 hash_xlog_split_page(XLogReaderState *record)
405 if (XLogReadBufferForRedo(record, 0, &buf) != BLK_RESTORED)
406 elog(ERROR, "Hash split record did not contain a full-page image");
408 UnlockReleaseBuffer(buf);
412 * replay completion of split operation
415 hash_xlog_split_complete(XLogReaderState *record)
417 XLogRecPtr lsn = record->EndRecPtr;
418 xl_hash_split_complete *xlrec = (xl_hash_split_complete *) XLogRecGetData(record);
421 XLogRedoAction action;
423 /* replay the record for old bucket */
424 action = XLogReadBufferForRedo(record, 0, &oldbuf);
427 * Note that we still update the page even if it was restored from a full
428 * page image, because the bucket flag is not included in the image.
430 if (action == BLK_NEEDS_REDO || action == BLK_RESTORED)
433 HashPageOpaque oldopaque;
435 oldpage = BufferGetPage(oldbuf);
436 oldopaque = (HashPageOpaque) PageGetSpecialPointer(oldpage);
438 oldopaque->hasho_flag = xlrec->old_bucket_flag;
440 PageSetLSN(oldpage, lsn);
441 MarkBufferDirty(oldbuf);
443 if (BufferIsValid(oldbuf))
444 UnlockReleaseBuffer(oldbuf);
446 /* replay the record for new bucket */
447 action = XLogReadBufferForRedo(record, 1, &newbuf);
450 * Note that we still update the page even if it was restored from a full
451 * page image, because the bucket flag is not included in the image.
453 if (action == BLK_NEEDS_REDO || action == BLK_RESTORED)
456 HashPageOpaque nopaque;
458 newpage = BufferGetPage(newbuf);
459 nopaque = (HashPageOpaque) PageGetSpecialPointer(newpage);
461 nopaque->hasho_flag = xlrec->new_bucket_flag;
463 PageSetLSN(newpage, lsn);
464 MarkBufferDirty(newbuf);
466 if (BufferIsValid(newbuf))
467 UnlockReleaseBuffer(newbuf);
471 * replay move of page contents for squeeze operation of hash index
474 hash_xlog_move_page_contents(XLogReaderState *record)
476 XLogRecPtr lsn = record->EndRecPtr;
477 xl_hash_move_page_contents *xldata = (xl_hash_move_page_contents *) XLogRecGetData(record);
478 Buffer bucketbuf = InvalidBuffer;
479 Buffer writebuf = InvalidBuffer;
480 Buffer deletebuf = InvalidBuffer;
481 XLogRedoAction action;
484 * Ensure we have a cleanup lock on primary bucket page before we start
485 * with the actual replay operation. This is to ensure that neither a
486 * scan can start nor a scan can be already-in-progress during the replay
487 * of this operation. If we allow scans during this operation, then they
488 * can miss some records or show the same record multiple times.
490 if (xldata->is_prim_bucket_same_wrt)
491 action = XLogReadBufferForRedoExtended(record, 1, RBM_NORMAL, true, &writebuf);
495 * we don't care for return value as the purpose of reading bucketbuf
496 * is to ensure a cleanup lock on primary bucket page.
498 (void) XLogReadBufferForRedoExtended(record, 0, RBM_NORMAL, true, &bucketbuf);
500 action = XLogReadBufferForRedo(record, 1, &writebuf);
503 /* replay the record for adding entries in overflow buffer */
504 if (action == BLK_NEEDS_REDO)
510 uint16 ninserted = 0;
512 data = begin = XLogRecGetBlockData(record, 1, &datalen);
514 writepage = (Page) BufferGetPage(writebuf);
516 if (xldata->ntups > 0)
518 OffsetNumber *towrite = (OffsetNumber *) data;
520 data += sizeof(OffsetNumber) * xldata->ntups;
522 while (data - begin < datalen)
524 IndexTuple itup = (IndexTuple) data;
528 itemsz = IndexTupleDSize(*itup);
529 itemsz = MAXALIGN(itemsz);
533 l = PageAddItem(writepage, (Item) itup, itemsz, towrite[ninserted], false, false);
534 if (l == InvalidOffsetNumber)
535 elog(ERROR, "hash_xlog_move_page_contents: failed to add item to hash index page, size %d bytes",
543 * number of tuples inserted must be same as requested in REDO record.
545 Assert(ninserted == xldata->ntups);
547 PageSetLSN(writepage, lsn);
548 MarkBufferDirty(writebuf);
551 /* replay the record for deleting entries from overflow buffer */
552 if (XLogReadBufferForRedo(record, 2, &deletebuf) == BLK_NEEDS_REDO)
558 ptr = XLogRecGetBlockData(record, 2, &len);
560 page = (Page) BufferGetPage(deletebuf);
564 OffsetNumber *unused;
567 unused = (OffsetNumber *) ptr;
568 unend = (OffsetNumber *) ((char *) ptr + len);
570 if ((unend - unused) > 0)
571 PageIndexMultiDelete(page, unused, unend - unused);
574 PageSetLSN(page, lsn);
575 MarkBufferDirty(deletebuf);
579 * Replay is complete, now we can release the buffers. We release locks at
580 * end of replay operation to ensure that we hold lock on primary bucket
581 * page till end of operation. We can optimize by releasing the lock on
582 * write buffer as soon as the operation for same is complete, if it is
583 * not same as primary bucket page, but that doesn't seem to be worth
584 * complicating the code.
586 if (BufferIsValid(deletebuf))
587 UnlockReleaseBuffer(deletebuf);
589 if (BufferIsValid(writebuf))
590 UnlockReleaseBuffer(writebuf);
592 if (BufferIsValid(bucketbuf))
593 UnlockReleaseBuffer(bucketbuf);
597 * replay squeeze page operation of hash index
600 hash_xlog_squeeze_page(XLogReaderState *record)
602 XLogRecPtr lsn = record->EndRecPtr;
603 xl_hash_squeeze_page *xldata = (xl_hash_squeeze_page *) XLogRecGetData(record);
604 Buffer bucketbuf = InvalidBuffer;
607 Buffer prevbuf = InvalidBuffer;
609 XLogRedoAction action;
612 * Ensure we have a cleanup lock on primary bucket page before we start
613 * with the actual replay operation. This is to ensure that neither a
614 * scan can start nor a scan can be already-in-progress during the replay
615 * of this operation. If we allow scans during this operation, then they
616 * can miss some records or show the same record multiple times.
618 if (xldata->is_prim_bucket_same_wrt)
619 action = XLogReadBufferForRedoExtended(record, 1, RBM_NORMAL, true, &writebuf);
623 * we don't care for return value as the purpose of reading bucketbuf
624 * is to ensure a cleanup lock on primary bucket page.
626 (void) XLogReadBufferForRedoExtended(record, 0, RBM_NORMAL, true, &bucketbuf);
628 action = XLogReadBufferForRedo(record, 1, &writebuf);
631 /* replay the record for adding entries in overflow buffer */
632 if (action == BLK_NEEDS_REDO)
638 uint16 ninserted = 0;
640 data = begin = XLogRecGetBlockData(record, 1, &datalen);
642 writepage = (Page) BufferGetPage(writebuf);
644 if (xldata->ntups > 0)
646 OffsetNumber *towrite = (OffsetNumber *) data;
648 data += sizeof(OffsetNumber) * xldata->ntups;
650 while (data - begin < datalen)
652 IndexTuple itup = (IndexTuple) data;
656 itemsz = IndexTupleDSize(*itup);
657 itemsz = MAXALIGN(itemsz);
661 l = PageAddItem(writepage, (Item) itup, itemsz, towrite[ninserted], false, false);
662 if (l == InvalidOffsetNumber)
663 elog(ERROR, "hash_xlog_squeeze_page: failed to add item to hash index page, size %d bytes",
671 * number of tuples inserted must be same as requested in REDO record.
673 Assert(ninserted == xldata->ntups);
676 * if the page on which are adding tuples is a page previous to freed
677 * overflow page, then update its nextblno.
679 if (xldata->is_prev_bucket_same_wrt)
681 HashPageOpaque writeopaque = (HashPageOpaque) PageGetSpecialPointer(writepage);
683 writeopaque->hasho_nextblkno = xldata->nextblkno;
686 PageSetLSN(writepage, lsn);
687 MarkBufferDirty(writebuf);
690 /* replay the record for initializing overflow buffer */
691 if (XLogReadBufferForRedo(record, 2, &ovflbuf) == BLK_NEEDS_REDO)
695 ovflpage = BufferGetPage(ovflbuf);
697 _hash_pageinit(ovflpage, BufferGetPageSize(ovflbuf));
699 PageSetLSN(ovflpage, lsn);
700 MarkBufferDirty(ovflbuf);
702 if (BufferIsValid(ovflbuf))
703 UnlockReleaseBuffer(ovflbuf);
705 /* replay the record for page previous to the freed overflow page */
706 if (!xldata->is_prev_bucket_same_wrt &&
707 XLogReadBufferForRedo(record, 3, &prevbuf) == BLK_NEEDS_REDO)
709 Page prevpage = BufferGetPage(prevbuf);
710 HashPageOpaque prevopaque = (HashPageOpaque) PageGetSpecialPointer(prevpage);
712 prevopaque->hasho_nextblkno = xldata->nextblkno;
714 PageSetLSN(prevpage, lsn);
715 MarkBufferDirty(prevbuf);
717 if (BufferIsValid(prevbuf))
718 UnlockReleaseBuffer(prevbuf);
720 /* replay the record for page next to the freed overflow page */
721 if (XLogRecHasBlockRef(record, 4))
725 if (XLogReadBufferForRedo(record, 4, &nextbuf) == BLK_NEEDS_REDO)
727 Page nextpage = BufferGetPage(nextbuf);
728 HashPageOpaque nextopaque = (HashPageOpaque) PageGetSpecialPointer(nextpage);
730 nextopaque->hasho_prevblkno = xldata->prevblkno;
732 PageSetLSN(nextpage, lsn);
733 MarkBufferDirty(nextbuf);
735 if (BufferIsValid(nextbuf))
736 UnlockReleaseBuffer(nextbuf);
739 if (BufferIsValid(writebuf))
740 UnlockReleaseBuffer(writebuf);
742 if (BufferIsValid(bucketbuf))
743 UnlockReleaseBuffer(bucketbuf);
746 * Note: in normal operation, we'd update the bitmap and meta page while
747 * still holding lock on the primary bucket page and overflow pages. But
748 * during replay it's not necessary to hold those locks, since no other
749 * index updates can be happening concurrently.
751 /* replay the record for bitmap page */
752 if (XLogReadBufferForRedo(record, 5, &mapbuf) == BLK_NEEDS_REDO)
754 Page mappage = (Page) BufferGetPage(mapbuf);
755 uint32 *freep = NULL;
757 uint32 *bitmap_page_bit;
760 freep = HashPageGetBitmap(mappage);
762 data = XLogRecGetBlockData(record, 5, &datalen);
763 bitmap_page_bit = (uint32 *) data;
765 CLRBIT(freep, *bitmap_page_bit);
767 PageSetLSN(mappage, lsn);
768 MarkBufferDirty(mapbuf);
770 if (BufferIsValid(mapbuf))
771 UnlockReleaseBuffer(mapbuf);
773 /* replay the record for meta page */
774 if (XLogRecHasBlockRef(record, 6))
778 if (XLogReadBufferForRedo(record, 6, &metabuf) == BLK_NEEDS_REDO)
783 uint32 *firstfree_ovflpage;
786 data = XLogRecGetBlockData(record, 6, &datalen);
787 firstfree_ovflpage = (uint32 *) data;
789 page = BufferGetPage(metabuf);
790 metap = HashPageGetMeta(page);
791 metap->hashm_firstfree = *firstfree_ovflpage;
793 PageSetLSN(page, lsn);
794 MarkBufferDirty(metabuf);
796 if (BufferIsValid(metabuf))
797 UnlockReleaseBuffer(metabuf);
802 * replay delete operation of hash index
805 hash_xlog_delete(XLogReaderState *record)
807 XLogRecPtr lsn = record->EndRecPtr;
808 xl_hash_delete *xldata = (xl_hash_delete *) XLogRecGetData(record);
809 Buffer bucketbuf = InvalidBuffer;
812 XLogRedoAction action;
815 * Ensure we have a cleanup lock on primary bucket page before we start
816 * with the actual replay operation. This is to ensure that neither a
817 * scan can start nor a scan can be already-in-progress during the replay
818 * of this operation. If we allow scans during this operation, then they
819 * can miss some records or show the same record multiple times.
821 if (xldata->is_primary_bucket_page)
822 action = XLogReadBufferForRedoExtended(record, 1, RBM_NORMAL, true, &deletebuf);
826 * we don't care for return value as the purpose of reading bucketbuf
827 * is to ensure a cleanup lock on primary bucket page.
829 (void) XLogReadBufferForRedoExtended(record, 0, RBM_NORMAL, true, &bucketbuf);
831 action = XLogReadBufferForRedo(record, 1, &deletebuf);
834 /* replay the record for deleting entries in bucket page */
835 if (action == BLK_NEEDS_REDO)
840 ptr = XLogRecGetBlockData(record, 1, &len);
842 page = (Page) BufferGetPage(deletebuf);
846 OffsetNumber *unused;
849 unused = (OffsetNumber *) ptr;
850 unend = (OffsetNumber *) ((char *) ptr + len);
852 if ((unend - unused) > 0)
853 PageIndexMultiDelete(page, unused, unend - unused);
856 PageSetLSN(page, lsn);
857 MarkBufferDirty(deletebuf);
859 if (BufferIsValid(deletebuf))
860 UnlockReleaseBuffer(deletebuf);
862 if (BufferIsValid(bucketbuf))
863 UnlockReleaseBuffer(bucketbuf);
867 * replay split cleanup flag operation for primary bucket page.
870 hash_xlog_split_cleanup(XLogReaderState *record)
872 XLogRecPtr lsn = record->EndRecPtr;
876 if (XLogReadBufferForRedo(record, 0, &buffer) == BLK_NEEDS_REDO)
878 HashPageOpaque bucket_opaque;
880 page = (Page) BufferGetPage(buffer);
882 bucket_opaque = (HashPageOpaque) PageGetSpecialPointer(page);
883 bucket_opaque->hasho_flag &= ~LH_BUCKET_NEEDS_SPLIT_CLEANUP;
884 PageSetLSN(page, lsn);
885 MarkBufferDirty(buffer);
887 if (BufferIsValid(buffer))
888 UnlockReleaseBuffer(buffer);
892 * replay for update meta page
895 hash_xlog_update_meta_page(XLogReaderState *record)
898 XLogRecPtr lsn = record->EndRecPtr;
899 xl_hash_update_meta_page *xldata = (xl_hash_update_meta_page *) XLogRecGetData(record);
903 if (XLogReadBufferForRedo(record, 0, &metabuf) == BLK_NEEDS_REDO)
905 page = BufferGetPage(metabuf);
906 metap = HashPageGetMeta(page);
908 metap->hashm_ntuples = xldata->ntuples;
910 PageSetLSN(page, lsn);
911 MarkBufferDirty(metabuf);
913 if (BufferIsValid(metabuf))
914 UnlockReleaseBuffer(metabuf);
918 hash_redo(XLogReaderState *record)
920 uint8 info = XLogRecGetInfo(record) & ~XLR_INFO_MASK;
924 case XLOG_HASH_INIT_META_PAGE:
925 hash_xlog_init_meta_page(record);
927 case XLOG_HASH_INIT_BITMAP_PAGE:
928 hash_xlog_init_bitmap_page(record);
930 case XLOG_HASH_INSERT:
931 hash_xlog_insert(record);
933 case XLOG_HASH_ADD_OVFL_PAGE:
934 hash_xlog_add_ovfl_page(record);
936 case XLOG_HASH_SPLIT_ALLOCATE_PAGE:
937 hash_xlog_split_allocate_page(record);
939 case XLOG_HASH_SPLIT_PAGE:
940 hash_xlog_split_page(record);
942 case XLOG_HASH_SPLIT_COMPLETE:
943 hash_xlog_split_complete(record);
945 case XLOG_HASH_MOVE_PAGE_CONTENTS:
946 hash_xlog_move_page_contents(record);
948 case XLOG_HASH_SQUEEZE_PAGE:
949 hash_xlog_squeeze_page(record);
951 case XLOG_HASH_DELETE:
952 hash_xlog_delete(record);
954 case XLOG_HASH_SPLIT_CLEANUP:
955 hash_xlog_split_cleanup(record);
957 case XLOG_HASH_UPDATE_META_PAGE:
958 hash_xlog_update_meta_page(record);
961 elog(PANIC, "hash_redo: unknown op code %u", info);