]> granicus.if.org Git - postgresql/blob - src/backend/access/hash/hash.c
Update copyright for 2016
[postgresql] / src / backend / access / hash / hash.c
1 /*-------------------------------------------------------------------------
2  *
3  * hash.c
4  *        Implementation of Margo Seltzer's Hashing package for postgres.
5  *
6  * Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group
7  * Portions Copyright (c) 1994, Regents of the University of California
8  *
9  *
10  * IDENTIFICATION
11  *        src/backend/access/hash/hash.c
12  *
13  * NOTES
14  *        This file contains only the public interface routines.
15  *
16  *-------------------------------------------------------------------------
17  */
18
19 #include "postgres.h"
20
21 #include "access/hash.h"
22 #include "access/relscan.h"
23 #include "catalog/index.h"
24 #include "commands/vacuum.h"
25 #include "optimizer/cost.h"
26 #include "optimizer/plancat.h"
27 #include "storage/bufmgr.h"
28 #include "utils/rel.h"
29
30
31 /* Working state for hashbuild and its callback */
32 typedef struct
33 {
34         HSpool     *spool;                      /* NULL if not using spooling */
35         double          indtuples;              /* # tuples accepted into index */
36 } HashBuildState;
37
38 static void hashbuildCallback(Relation index,
39                                   HeapTuple htup,
40                                   Datum *values,
41                                   bool *isnull,
42                                   bool tupleIsAlive,
43                                   void *state);
44
45
46 /*
47  *      hashbuild() -- build a new hash index.
48  */
49 Datum
50 hashbuild(PG_FUNCTION_ARGS)
51 {
52         Relation        heap = (Relation) PG_GETARG_POINTER(0);
53         Relation        index = (Relation) PG_GETARG_POINTER(1);
54         IndexInfo  *indexInfo = (IndexInfo *) PG_GETARG_POINTER(2);
55         IndexBuildResult *result;
56         BlockNumber relpages;
57         double          reltuples;
58         double          allvisfrac;
59         uint32          num_buckets;
60         HashBuildState buildstate;
61
62         /*
63          * We expect to be called exactly once for any index relation. If that's
64          * not the case, big trouble's what we have.
65          */
66         if (RelationGetNumberOfBlocks(index) != 0)
67                 elog(ERROR, "index \"%s\" already contains data",
68                          RelationGetRelationName(index));
69
70         /* Estimate the number of rows currently present in the table */
71         estimate_rel_size(heap, NULL, &relpages, &reltuples, &allvisfrac);
72
73         /* Initialize the hash index metadata page and initial buckets */
74         num_buckets = _hash_metapinit(index, reltuples, MAIN_FORKNUM);
75
76         /*
77          * If we just insert the tuples into the index in scan order, then
78          * (assuming their hash codes are pretty random) there will be no locality
79          * of access to the index, and if the index is bigger than available RAM
80          * then we'll thrash horribly.  To prevent that scenario, we can sort the
81          * tuples by (expected) bucket number.  However, such a sort is useless
82          * overhead when the index does fit in RAM.  We choose to sort if the
83          * initial index size exceeds NBuffers.
84          *
85          * NOTE: this test will need adjustment if a bucket is ever different from
86          * one page.
87          */
88         if (num_buckets >= (uint32) NBuffers)
89                 buildstate.spool = _h_spoolinit(heap, index, num_buckets);
90         else
91                 buildstate.spool = NULL;
92
93         /* prepare to build the index */
94         buildstate.indtuples = 0;
95
96         /* do the heap scan */
97         reltuples = IndexBuildHeapScan(heap, index, indexInfo, true,
98                                                                    hashbuildCallback, (void *) &buildstate);
99
100         if (buildstate.spool)
101         {
102                 /* sort the tuples and insert them into the index */
103                 _h_indexbuild(buildstate.spool);
104                 _h_spooldestroy(buildstate.spool);
105         }
106
107         /*
108          * Return statistics
109          */
110         result = (IndexBuildResult *) palloc(sizeof(IndexBuildResult));
111
112         result->heap_tuples = reltuples;
113         result->index_tuples = buildstate.indtuples;
114
115         PG_RETURN_POINTER(result);
116 }
117
118 /*
119  *      hashbuildempty() -- build an empty hash index in the initialization fork
120  */
121 Datum
122 hashbuildempty(PG_FUNCTION_ARGS)
123 {
124         Relation        index = (Relation) PG_GETARG_POINTER(0);
125
126         _hash_metapinit(index, 0, INIT_FORKNUM);
127
128         PG_RETURN_VOID();
129 }
130
131 /*
132  * Per-tuple callback from IndexBuildHeapScan
133  */
134 static void
135 hashbuildCallback(Relation index,
136                                   HeapTuple htup,
137                                   Datum *values,
138                                   bool *isnull,
139                                   bool tupleIsAlive,
140                                   void *state)
141 {
142         HashBuildState *buildstate = (HashBuildState *) state;
143         IndexTuple      itup;
144
145         /* Hash indexes don't index nulls, see notes in hashinsert */
146         if (isnull[0])
147                 return;
148
149         /* Either spool the tuple for sorting, or just put it into the index */
150         if (buildstate->spool)
151                 _h_spool(buildstate->spool, &htup->t_self, values, isnull);
152         else
153         {
154                 /* form an index tuple and point it at the heap tuple */
155                 itup = _hash_form_tuple(index, values, isnull);
156                 itup->t_tid = htup->t_self;
157                 _hash_doinsert(index, itup);
158                 pfree(itup);
159         }
160
161         buildstate->indtuples += 1;
162 }
163
164 /*
165  *      hashinsert() -- insert an index tuple into a hash table.
166  *
167  *      Hash on the heap tuple's key, form an index tuple with hash code.
168  *      Find the appropriate location for the new tuple, and put it there.
169  */
170 Datum
171 hashinsert(PG_FUNCTION_ARGS)
172 {
173         Relation        rel = (Relation) PG_GETARG_POINTER(0);
174         Datum      *values = (Datum *) PG_GETARG_POINTER(1);
175         bool       *isnull = (bool *) PG_GETARG_POINTER(2);
176         ItemPointer ht_ctid = (ItemPointer) PG_GETARG_POINTER(3);
177
178 #ifdef NOT_USED
179         Relation        heapRel = (Relation) PG_GETARG_POINTER(4);
180         IndexUniqueCheck checkUnique = (IndexUniqueCheck) PG_GETARG_INT32(5);
181 #endif
182         IndexTuple      itup;
183
184         /*
185          * If the single index key is null, we don't insert it into the index.
186          * Hash tables support scans on '='. Relational algebra says that A = B
187          * returns null if either A or B is null.  This means that no
188          * qualification used in an index scan could ever return true on a null
189          * attribute.  It also means that indices can't be used by ISNULL or
190          * NOTNULL scans, but that's an artifact of the strategy map architecture
191          * chosen in 1986, not of the way nulls are handled here.
192          */
193         if (isnull[0])
194                 PG_RETURN_BOOL(false);
195
196         /* generate an index tuple */
197         itup = _hash_form_tuple(rel, values, isnull);
198         itup->t_tid = *ht_ctid;
199
200         _hash_doinsert(rel, itup);
201
202         pfree(itup);
203
204         PG_RETURN_BOOL(false);
205 }
206
207
208 /*
209  *      hashgettuple() -- Get the next tuple in the scan.
210  */
211 Datum
212 hashgettuple(PG_FUNCTION_ARGS)
213 {
214         IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
215         ScanDirection dir = (ScanDirection) PG_GETARG_INT32(1);
216         HashScanOpaque so = (HashScanOpaque) scan->opaque;
217         Relation        rel = scan->indexRelation;
218         Buffer          buf;
219         Page            page;
220         OffsetNumber offnum;
221         ItemPointer current;
222         bool            res;
223
224         /* Hash indexes are always lossy since we store only the hash code */
225         scan->xs_recheck = true;
226
227         /*
228          * We hold pin but not lock on current buffer while outside the hash AM.
229          * Reacquire the read lock here.
230          */
231         if (BufferIsValid(so->hashso_curbuf))
232                 _hash_chgbufaccess(rel, so->hashso_curbuf, HASH_NOLOCK, HASH_READ);
233
234         /*
235          * If we've already initialized this scan, we can just advance it in the
236          * appropriate direction.  If we haven't done so yet, we call a routine to
237          * get the first item in the scan.
238          */
239         current = &(so->hashso_curpos);
240         if (ItemPointerIsValid(current))
241         {
242                 /*
243                  * An insertion into the current index page could have happened while
244                  * we didn't have read lock on it.  Re-find our position by looking
245                  * for the TID we previously returned.  (Because we hold share lock on
246                  * the bucket, no deletions or splits could have occurred; therefore
247                  * we can expect that the TID still exists in the current index page,
248                  * at an offset >= where we were.)
249                  */
250                 OffsetNumber maxoffnum;
251
252                 buf = so->hashso_curbuf;
253                 Assert(BufferIsValid(buf));
254                 page = BufferGetPage(buf);
255                 maxoffnum = PageGetMaxOffsetNumber(page);
256                 for (offnum = ItemPointerGetOffsetNumber(current);
257                          offnum <= maxoffnum;
258                          offnum = OffsetNumberNext(offnum))
259                 {
260                         IndexTuple      itup;
261
262                         itup = (IndexTuple) PageGetItem(page, PageGetItemId(page, offnum));
263                         if (ItemPointerEquals(&(so->hashso_heappos), &(itup->t_tid)))
264                                 break;
265                 }
266                 if (offnum > maxoffnum)
267                         elog(ERROR, "failed to re-find scan position within index \"%s\"",
268                                  RelationGetRelationName(rel));
269                 ItemPointerSetOffsetNumber(current, offnum);
270
271                 /*
272                  * Check to see if we should kill the previously-fetched tuple.
273                  */
274                 if (scan->kill_prior_tuple)
275                 {
276                         /*
277                          * Yes, so mark it by setting the LP_DEAD state in the item flags.
278                          */
279                         ItemIdMarkDead(PageGetItemId(page, offnum));
280
281                         /*
282                          * Since this can be redone later if needed, mark as a hint.
283                          */
284                         MarkBufferDirtyHint(buf, true);
285                 }
286
287                 /*
288                  * Now continue the scan.
289                  */
290                 res = _hash_next(scan, dir);
291         }
292         else
293                 res = _hash_first(scan, dir);
294
295         /*
296          * Skip killed tuples if asked to.
297          */
298         if (scan->ignore_killed_tuples)
299         {
300                 while (res)
301                 {
302                         offnum = ItemPointerGetOffsetNumber(current);
303                         page = BufferGetPage(so->hashso_curbuf);
304                         if (!ItemIdIsDead(PageGetItemId(page, offnum)))
305                                 break;
306                         res = _hash_next(scan, dir);
307                 }
308         }
309
310         /* Release read lock on current buffer, but keep it pinned */
311         if (BufferIsValid(so->hashso_curbuf))
312                 _hash_chgbufaccess(rel, so->hashso_curbuf, HASH_READ, HASH_NOLOCK);
313
314         /* Return current heap TID on success */
315         scan->xs_ctup.t_self = so->hashso_heappos;
316
317         PG_RETURN_BOOL(res);
318 }
319
320
321 /*
322  *      hashgetbitmap() -- get all tuples at once
323  */
324 Datum
325 hashgetbitmap(PG_FUNCTION_ARGS)
326 {
327         IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
328         TIDBitmap  *tbm = (TIDBitmap *) PG_GETARG_POINTER(1);
329         HashScanOpaque so = (HashScanOpaque) scan->opaque;
330         bool            res;
331         int64           ntids = 0;
332
333         res = _hash_first(scan, ForwardScanDirection);
334
335         while (res)
336         {
337                 bool            add_tuple;
338
339                 /*
340                  * Skip killed tuples if asked to.
341                  */
342                 if (scan->ignore_killed_tuples)
343                 {
344                         Page            page;
345                         OffsetNumber offnum;
346
347                         offnum = ItemPointerGetOffsetNumber(&(so->hashso_curpos));
348                         page = BufferGetPage(so->hashso_curbuf);
349                         add_tuple = !ItemIdIsDead(PageGetItemId(page, offnum));
350                 }
351                 else
352                         add_tuple = true;
353
354                 /* Save tuple ID, and continue scanning */
355                 if (add_tuple)
356                 {
357                         /* Note we mark the tuple ID as requiring recheck */
358                         tbm_add_tuples(tbm, &(so->hashso_heappos), 1, true);
359                         ntids++;
360                 }
361
362                 res = _hash_next(scan, ForwardScanDirection);
363         }
364
365         PG_RETURN_INT64(ntids);
366 }
367
368
369 /*
370  *      hashbeginscan() -- start a scan on a hash index
371  */
372 Datum
373 hashbeginscan(PG_FUNCTION_ARGS)
374 {
375         Relation        rel = (Relation) PG_GETARG_POINTER(0);
376         int                     nkeys = PG_GETARG_INT32(1);
377         int                     norderbys = PG_GETARG_INT32(2);
378         IndexScanDesc scan;
379         HashScanOpaque so;
380
381         /* no order by operators allowed */
382         Assert(norderbys == 0);
383
384         scan = RelationGetIndexScan(rel, nkeys, norderbys);
385
386         so = (HashScanOpaque) palloc(sizeof(HashScanOpaqueData));
387         so->hashso_bucket_valid = false;
388         so->hashso_bucket_blkno = 0;
389         so->hashso_curbuf = InvalidBuffer;
390         /* set position invalid (this will cause _hash_first call) */
391         ItemPointerSetInvalid(&(so->hashso_curpos));
392         ItemPointerSetInvalid(&(so->hashso_heappos));
393
394         scan->opaque = so;
395
396         /* register scan in case we change pages it's using */
397         _hash_regscan(scan);
398
399         PG_RETURN_POINTER(scan);
400 }
401
402 /*
403  *      hashrescan() -- rescan an index relation
404  */
405 Datum
406 hashrescan(PG_FUNCTION_ARGS)
407 {
408         IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
409         ScanKey         scankey = (ScanKey) PG_GETARG_POINTER(1);
410
411         /* remaining arguments are ignored */
412         HashScanOpaque so = (HashScanOpaque) scan->opaque;
413         Relation        rel = scan->indexRelation;
414
415         /* release any pin we still hold */
416         if (BufferIsValid(so->hashso_curbuf))
417                 _hash_dropbuf(rel, so->hashso_curbuf);
418         so->hashso_curbuf = InvalidBuffer;
419
420         /* release lock on bucket, too */
421         if (so->hashso_bucket_blkno)
422                 _hash_droplock(rel, so->hashso_bucket_blkno, HASH_SHARE);
423         so->hashso_bucket_blkno = 0;
424
425         /* set position invalid (this will cause _hash_first call) */
426         ItemPointerSetInvalid(&(so->hashso_curpos));
427         ItemPointerSetInvalid(&(so->hashso_heappos));
428
429         /* Update scan key, if a new one is given */
430         if (scankey && scan->numberOfKeys > 0)
431         {
432                 memmove(scan->keyData,
433                                 scankey,
434                                 scan->numberOfKeys * sizeof(ScanKeyData));
435                 so->hashso_bucket_valid = false;
436         }
437
438         PG_RETURN_VOID();
439 }
440
441 /*
442  *      hashendscan() -- close down a scan
443  */
444 Datum
445 hashendscan(PG_FUNCTION_ARGS)
446 {
447         IndexScanDesc scan = (IndexScanDesc) PG_GETARG_POINTER(0);
448         HashScanOpaque so = (HashScanOpaque) scan->opaque;
449         Relation        rel = scan->indexRelation;
450
451         /* don't need scan registered anymore */
452         _hash_dropscan(scan);
453
454         /* release any pin we still hold */
455         if (BufferIsValid(so->hashso_curbuf))
456                 _hash_dropbuf(rel, so->hashso_curbuf);
457         so->hashso_curbuf = InvalidBuffer;
458
459         /* release lock on bucket, too */
460         if (so->hashso_bucket_blkno)
461                 _hash_droplock(rel, so->hashso_bucket_blkno, HASH_SHARE);
462         so->hashso_bucket_blkno = 0;
463
464         pfree(so);
465         scan->opaque = NULL;
466
467         PG_RETURN_VOID();
468 }
469
470 /*
471  *      hashmarkpos() -- save current scan position
472  */
473 Datum
474 hashmarkpos(PG_FUNCTION_ARGS)
475 {
476         elog(ERROR, "hash does not support mark/restore");
477         PG_RETURN_VOID();
478 }
479
480 /*
481  *      hashrestrpos() -- restore scan to last saved position
482  */
483 Datum
484 hashrestrpos(PG_FUNCTION_ARGS)
485 {
486         elog(ERROR, "hash does not support mark/restore");
487         PG_RETURN_VOID();
488 }
489
490 /*
491  * Bulk deletion of all index entries pointing to a set of heap tuples.
492  * The set of target tuples is specified via a callback routine that tells
493  * whether any given heap tuple (identified by ItemPointer) is being deleted.
494  *
495  * Result: a palloc'd struct containing statistical info for VACUUM displays.
496  */
497 Datum
498 hashbulkdelete(PG_FUNCTION_ARGS)
499 {
500         IndexVacuumInfo *info = (IndexVacuumInfo *) PG_GETARG_POINTER(0);
501         IndexBulkDeleteResult *stats = (IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
502         IndexBulkDeleteCallback callback = (IndexBulkDeleteCallback) PG_GETARG_POINTER(2);
503         void       *callback_state = (void *) PG_GETARG_POINTER(3);
504         Relation        rel = info->index;
505         double          tuples_removed;
506         double          num_index_tuples;
507         double          orig_ntuples;
508         Bucket          orig_maxbucket;
509         Bucket          cur_maxbucket;
510         Bucket          cur_bucket;
511         Buffer          metabuf;
512         HashMetaPage metap;
513         HashMetaPageData local_metapage;
514
515         tuples_removed = 0;
516         num_index_tuples = 0;
517
518         /*
519          * Read the metapage to fetch original bucket and tuple counts.  Also, we
520          * keep a copy of the last-seen metapage so that we can use its
521          * hashm_spares[] values to compute bucket page addresses.  This is a bit
522          * hokey but perfectly safe, since the interesting entries in the spares
523          * array cannot change under us; and it beats rereading the metapage for
524          * each bucket.
525          */
526         metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_READ, LH_META_PAGE);
527         metap = HashPageGetMeta(BufferGetPage(metabuf));
528         orig_maxbucket = metap->hashm_maxbucket;
529         orig_ntuples = metap->hashm_ntuples;
530         memcpy(&local_metapage, metap, sizeof(local_metapage));
531         _hash_relbuf(rel, metabuf);
532
533         /* Scan the buckets that we know exist */
534         cur_bucket = 0;
535         cur_maxbucket = orig_maxbucket;
536
537 loop_top:
538         while (cur_bucket <= cur_maxbucket)
539         {
540                 BlockNumber bucket_blkno;
541                 BlockNumber blkno;
542                 bool            bucket_dirty = false;
543
544                 /* Get address of bucket's start page */
545                 bucket_blkno = BUCKET_TO_BLKNO(&local_metapage, cur_bucket);
546
547                 /* Exclusive-lock the bucket so we can shrink it */
548                 _hash_getlock(rel, bucket_blkno, HASH_EXCLUSIVE);
549
550                 /* Shouldn't have any active scans locally, either */
551                 if (_hash_has_active_scan(rel, cur_bucket))
552                         elog(ERROR, "hash index has active scan during VACUUM");
553
554                 /* Scan each page in bucket */
555                 blkno = bucket_blkno;
556                 while (BlockNumberIsValid(blkno))
557                 {
558                         Buffer          buf;
559                         Page            page;
560                         HashPageOpaque opaque;
561                         OffsetNumber offno;
562                         OffsetNumber maxoffno;
563                         OffsetNumber deletable[MaxOffsetNumber];
564                         int                     ndeletable = 0;
565
566                         vacuum_delay_point();
567
568                         buf = _hash_getbuf_with_strategy(rel, blkno, HASH_WRITE,
569                                                                                    LH_BUCKET_PAGE | LH_OVERFLOW_PAGE,
570                                                                                          info->strategy);
571                         page = BufferGetPage(buf);
572                         opaque = (HashPageOpaque) PageGetSpecialPointer(page);
573                         Assert(opaque->hasho_bucket == cur_bucket);
574
575                         /* Scan each tuple in page */
576                         maxoffno = PageGetMaxOffsetNumber(page);
577                         for (offno = FirstOffsetNumber;
578                                  offno <= maxoffno;
579                                  offno = OffsetNumberNext(offno))
580                         {
581                                 IndexTuple      itup;
582                                 ItemPointer htup;
583
584                                 itup = (IndexTuple) PageGetItem(page,
585                                                                                                 PageGetItemId(page, offno));
586                                 htup = &(itup->t_tid);
587                                 if (callback(htup, callback_state))
588                                 {
589                                         /* mark the item for deletion */
590                                         deletable[ndeletable++] = offno;
591                                         tuples_removed += 1;
592                                 }
593                                 else
594                                         num_index_tuples += 1;
595                         }
596
597                         /*
598                          * Apply deletions and write page if needed, advance to next page.
599                          */
600                         blkno = opaque->hasho_nextblkno;
601
602                         if (ndeletable > 0)
603                         {
604                                 PageIndexMultiDelete(page, deletable, ndeletable);
605                                 _hash_wrtbuf(rel, buf);
606                                 bucket_dirty = true;
607                         }
608                         else
609                                 _hash_relbuf(rel, buf);
610                 }
611
612                 /* If we deleted anything, try to compact free space */
613                 if (bucket_dirty)
614                         _hash_squeezebucket(rel, cur_bucket, bucket_blkno,
615                                                                 info->strategy);
616
617                 /* Release bucket lock */
618                 _hash_droplock(rel, bucket_blkno, HASH_EXCLUSIVE);
619
620                 /* Advance to next bucket */
621                 cur_bucket++;
622         }
623
624         /* Write-lock metapage and check for split since we started */
625         metabuf = _hash_getbuf(rel, HASH_METAPAGE, HASH_WRITE, LH_META_PAGE);
626         metap = HashPageGetMeta(BufferGetPage(metabuf));
627
628         if (cur_maxbucket != metap->hashm_maxbucket)
629         {
630                 /* There's been a split, so process the additional bucket(s) */
631                 cur_maxbucket = metap->hashm_maxbucket;
632                 memcpy(&local_metapage, metap, sizeof(local_metapage));
633                 _hash_relbuf(rel, metabuf);
634                 goto loop_top;
635         }
636
637         /* Okay, we're really done.  Update tuple count in metapage. */
638
639         if (orig_maxbucket == metap->hashm_maxbucket &&
640                 orig_ntuples == metap->hashm_ntuples)
641         {
642                 /*
643                  * No one has split or inserted anything since start of scan, so
644                  * believe our count as gospel.
645                  */
646                 metap->hashm_ntuples = num_index_tuples;
647         }
648         else
649         {
650                 /*
651                  * Otherwise, our count is untrustworthy since we may have
652                  * double-scanned tuples in split buckets.  Proceed by dead-reckoning.
653                  * (Note: we still return estimated_count = false, because using this
654                  * count is better than not updating reltuples at all.)
655                  */
656                 if (metap->hashm_ntuples > tuples_removed)
657                         metap->hashm_ntuples -= tuples_removed;
658                 else
659                         metap->hashm_ntuples = 0;
660                 num_index_tuples = metap->hashm_ntuples;
661         }
662
663         _hash_wrtbuf(rel, metabuf);
664
665         /* return statistics */
666         if (stats == NULL)
667                 stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
668         stats->estimated_count = false;
669         stats->num_index_tuples = num_index_tuples;
670         stats->tuples_removed += tuples_removed;
671         /* hashvacuumcleanup will fill in num_pages */
672
673         PG_RETURN_POINTER(stats);
674 }
675
676 /*
677  * Post-VACUUM cleanup.
678  *
679  * Result: a palloc'd struct containing statistical info for VACUUM displays.
680  */
681 Datum
682 hashvacuumcleanup(PG_FUNCTION_ARGS)
683 {
684         IndexVacuumInfo *info = (IndexVacuumInfo *) PG_GETARG_POINTER(0);
685         IndexBulkDeleteResult *stats = (IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
686         Relation        rel = info->index;
687         BlockNumber num_pages;
688
689         /* If hashbulkdelete wasn't called, return NULL signifying no change */
690         /* Note: this covers the analyze_only case too */
691         if (stats == NULL)
692                 PG_RETURN_POINTER(NULL);
693
694         /* update statistics */
695         num_pages = RelationGetNumberOfBlocks(rel);
696         stats->num_pages = num_pages;
697
698         PG_RETURN_POINTER(stats);
699 }
700
701
702 void
703 hash_redo(XLogReaderState *record)
704 {
705         elog(PANIC, "hash_redo: unimplemented");
706 }