]> granicus.if.org Git - zfs/commitdiff
Illumos 5349 - verify that block pointer is plausible before reading
authorMatthew Ahrens <mahrens@delphix.com>
Wed, 26 Nov 2014 17:57:30 +0000 (09:57 -0800)
committerBrian Behlendorf <behlendorf1@llnl.gov>
Fri, 8 May 2015 21:09:15 +0000 (14:09 -0700)
5349 verify that block pointer is plausible before reading
Reviewed by: Alex Reece <alex.reece@delphix.com>
Reviewed by: Christopher Siden <christopher.siden@delphix.com>
Reviewed by: Dan McDonald <danmcd@omniti.com>
Reviewed by: George Wilson <george.wilson@delphix.com>
Reviewed by: Richard Lowe <richlowe@richlowe.net>
Reviewed by: Xin Li <delphij@FreeBSD.org>
Reviewed by: Josef 'Jeff' Sipek <josef.sipek@nexenta.com>
Approved by: Gordon Ross <gwr@nexenta.com>

References:
  https://www.illumos.org/issues/5349
  https://github.com/illumos/illumos-gate/commit/f63ab3d5

Porting notes:
* Several variable declarations were moved due to C style needs

Ported-by: DHE <git@dehacked.net>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #3373

include/sys/spa.h
module/zfs/zio.c

index 13cbbf98b566f3da6cf5db8a482d27fe360faff7..834ad005a1a74d035ba7a17c3fecd880db03da9e 100644 (file)
@@ -830,6 +830,7 @@ extern boolean_t spa_has_slogs(spa_t *spa);
 extern boolean_t spa_is_root(spa_t *spa);
 extern boolean_t spa_writeable(spa_t *spa);
 extern boolean_t spa_has_pending_synctask(spa_t *spa);
+extern void zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp);
 
 extern int spa_mode(spa_t *spa);
 extern uint64_t strtonum(const char *str, char **nptr);
index c89a2f99e654b7f36f0b18cf6e7798213e98a04b..9204df2b22fb63ee7064fbd6a1571a68e36f07bd 100644 (file)
@@ -212,7 +212,7 @@ zio_buf_alloc(size_t size)
 {
        size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
 
-       ASSERT3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
+       VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
 
        return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE));
 }
@@ -228,7 +228,7 @@ zio_data_buf_alloc(size_t size)
 {
        size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
 
-       ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
+       VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
 
        return (kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE));
 }
@@ -238,7 +238,7 @@ zio_buf_free(void *buf, size_t size)
 {
        size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
 
-       ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
+       VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
 
        kmem_cache_free(zio_buf_cache[c], buf);
 }
@@ -248,7 +248,7 @@ zio_data_buf_free(void *buf, size_t size)
 {
        size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
 
-       ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
+       VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
 
        kmem_cache_free(zio_data_buf_cache[c], buf);
 }
@@ -596,6 +596,90 @@ zio_root(spa_t *spa, zio_done_func_t *done, void *private, enum zio_flag flags)
        return (zio_null(NULL, spa, NULL, done, private, flags));
 }
 
+void
+zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp)
+{
+       int i;
+
+       if (!DMU_OT_IS_VALID(BP_GET_TYPE(bp))) {
+               zfs_panic_recover("blkptr at %p has invalid TYPE %llu",
+                   bp, (longlong_t)BP_GET_TYPE(bp));
+       }
+       if (BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS ||
+           BP_GET_CHECKSUM(bp) <= ZIO_CHECKSUM_ON) {
+               zfs_panic_recover("blkptr at %p has invalid CHECKSUM %llu",
+                   bp, (longlong_t)BP_GET_CHECKSUM(bp));
+       }
+       if (BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS ||
+           BP_GET_COMPRESS(bp) <= ZIO_COMPRESS_ON) {
+               zfs_panic_recover("blkptr at %p has invalid COMPRESS %llu",
+                   bp, (longlong_t)BP_GET_COMPRESS(bp));
+       }
+       if (BP_GET_LSIZE(bp) > SPA_MAXBLOCKSIZE) {
+               zfs_panic_recover("blkptr at %p has invalid LSIZE %llu",
+                   bp, (longlong_t)BP_GET_LSIZE(bp));
+       }
+       if (BP_GET_PSIZE(bp) > SPA_MAXBLOCKSIZE) {
+               zfs_panic_recover("blkptr at %p has invalid PSIZE %llu",
+                   bp, (longlong_t)BP_GET_PSIZE(bp));
+       }
+
+       if (BP_IS_EMBEDDED(bp)) {
+               if (BPE_GET_ETYPE(bp) > NUM_BP_EMBEDDED_TYPES) {
+                       zfs_panic_recover("blkptr at %p has invalid ETYPE %llu",
+                           bp, (longlong_t)BPE_GET_ETYPE(bp));
+               }
+       }
+
+       /*
+        * Pool-specific checks.
+        *
+        * Note: it would be nice to verify that the blk_birth and
+        * BP_PHYSICAL_BIRTH() are not too large.  However, spa_freeze()
+        * allows the birth time of log blocks (and dmu_sync()-ed blocks
+        * that are in the log) to be arbitrarily large.
+        */
+       for (i = 0; i < BP_GET_NDVAS(bp); i++) {
+               uint64_t vdevid = DVA_GET_VDEV(&bp->blk_dva[i]);
+               vdev_t *vd;
+               uint64_t offset, asize;
+               if (vdevid >= spa->spa_root_vdev->vdev_children) {
+                       zfs_panic_recover("blkptr at %p DVA %u has invalid "
+                           "VDEV %llu",
+                           bp, i, (longlong_t)vdevid);
+               }
+               vd = spa->spa_root_vdev->vdev_child[vdevid];
+               if (vd == NULL) {
+                       zfs_panic_recover("blkptr at %p DVA %u has invalid "
+                           "VDEV %llu",
+                           bp, i, (longlong_t)vdevid);
+               }
+               if (vd->vdev_ops == &vdev_hole_ops) {
+                       zfs_panic_recover("blkptr at %p DVA %u has hole "
+                           "VDEV %llu",
+                           bp, i, (longlong_t)vdevid);
+
+               }
+               if (vd->vdev_ops == &vdev_missing_ops) {
+                       /*
+                        * "missing" vdevs are valid during import, but we
+                        * don't have their detailed info (e.g. asize), so
+                        * we can't perform any more checks on them.
+                        */
+                       continue;
+               }
+               offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
+               asize = DVA_GET_ASIZE(&bp->blk_dva[i]);
+               if (BP_IS_GANG(bp))
+                       asize = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
+               if (offset + asize > vd->vdev_asize) {
+                       zfs_panic_recover("blkptr at %p DVA %u has invalid "
+                           "OFFSET %llu",
+                           bp, i, (longlong_t)offset);
+               }
+       }
+}
+
 zio_t *
 zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
     void *data, uint64_t size, zio_done_func_t *done, void *private,
@@ -603,6 +687,8 @@ zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
 {
        zio_t *zio;
 
+       zfs_blkptr_verify(spa, bp);
+
        zio = zio_create(pio, spa, BP_PHYSICAL_BIRTH(bp), bp,
            data, size, done, private,
            ZIO_TYPE_READ, priority, flags, NULL, 0, zb,