]> granicus.if.org Git - postgresql/commitdiff
Remove no-longer-needed compatibility code for hash indexes.
authorRobert Haas <rhaas@postgresql.org>
Wed, 10 May 2017 03:44:21 +0000 (23:44 -0400)
committerRobert Haas <rhaas@postgresql.org>
Wed, 10 May 2017 03:44:21 +0000 (23:44 -0400)
Because commit ea69a0dead5128c421140dc53fac165ba4af8520 bumped the
HASH_VERSION, we don't need to worry about PostgreSQL 10 seeing
bucket pages from earlier versions.

Amit Kapila

Discussion: http://postgr.es/m/CAA4eK1LAo4DGwh+mi-G3U8Pj1WkBBeFL38xdCnUHJv1z4bZFkQ@mail.gmail.com

src/backend/access/hash/hash.c
src/backend/access/hash/hashpage.c

index 3eb5b1d0d5e2394907ce69310f80a32b374642a8..df54638f3e06f4b1e51aef9046028bd872355224 100644 (file)
@@ -624,13 +624,9 @@ loop_top:
                         * now that the primary page of the target bucket has been locked
                         * (and thus can't be further split), check whether we need to
                         * update our cached metapage data.
-                        *
-                        * NB: The check for InvalidBlockNumber is only needed for
-                        * on-disk compatibility with indexes created before we started
-                        * storing hashm_maxbucket in the primary page's hasho_prevblkno.
                         */
-                       if (bucket_opaque->hasho_prevblkno != InvalidBlockNumber &&
-                               bucket_opaque->hasho_prevblkno > cachedmetap->hashm_maxbucket)
+                       Assert(bucket_opaque->hasho_prevblkno != InvalidBlockNumber);
+                       if (bucket_opaque->hasho_prevblkno > cachedmetap->hashm_maxbucket)
                        {
                                cachedmetap = _hash_getcachedmetap(rel, &metabuf, true);
                                Assert(cachedmetap != NULL);
index 3cd4daa32507bbbdedf84311cec1a348d973bddc..bf1ffff4e8c31382b00cb21db0d142545a1b74de 100644 (file)
@@ -1564,16 +1564,12 @@ _hash_getbucketbuf_from_hashkey(Relation rel, uint32 hashkey, int access,
                page = BufferGetPage(buf);
                opaque = (HashPageOpaque) PageGetSpecialPointer(page);
                Assert(opaque->hasho_bucket == bucket);
+               Assert(opaque->hasho_prevblkno != InvalidBlockNumber);
 
                /*
                 * If this bucket hasn't been split, we're done.
-                *
-                * NB: The check for InvalidBlockNumber is only needed for on-disk
-                * compatibility with indexes created before we started storing
-                * hashm_maxbucket in the primary page's hasho_prevblkno.
                 */
-               if (opaque->hasho_prevblkno == InvalidBlockNumber ||
-                       opaque->hasho_prevblkno <= metap->hashm_maxbucket)
+               if (opaque->hasho_prevblkno <= metap->hashm_maxbucket)
                        break;
 
                /* Drop lock on this buffer, update cached metapage, and retry. */