]> granicus.if.org Git - zfs/commitdiff
Illumos #3329, #3330, #3331, #3335
authorGeorge Wilson <george.wilson@delphix.com>
Mon, 6 May 2013 17:14:52 +0000 (10:14 -0700)
committerBrian Behlendorf <behlendorf1@llnl.gov>
Mon, 6 May 2013 19:39:34 +0000 (12:39 -0700)
3329 spa_sync() spends 10-20% of its time in spa_free_sync_cb()
3330 space_seg_t should have its own kmem_cache
3331 deferred frees should happen after sync_pass 1
3335 make SYNC_PASS_* constants tunable

Reviewed by: Adam Leventhal <ahl@delphix.com>
Reviewed by: Matt Ahrens <matthew.ahrens@delphix.com>
Reviewed by: Christopher Siden <chris.siden@delphix.com>
Reviewed by: Eric Schrock <eric.schrock@delphix.com>
Reviewed by: Richard Lowe <richlowe@richlowe.net>
Reviewed by: Dan McDonald <danmcd@nexenta.com>
Approved by: Eric Schrock <eric.schrock@delphix.com>

References:
  illumos/illumos-gate@01f55e48fb4d524eaf70687728aa51b7762e2e97
  https://www.illumos.org/issues/3329
  https://www.illumos.org/issues/3330
  https://www.illumos.org/issues/3331
  https://www.illumos.org/issues/3335

Ported-by: Brian Behlendorf <behlendorf1@llnl.gov>
include/sys/metaslab_impl.h
include/sys/spa.h
include/sys/space_map.h
include/sys/zio_impl.h
module/zfs/metaslab.c
module/zfs/spa.c
module/zfs/spa_misc.c
module/zfs/space_map.c
module/zfs/zio.c

index 6583594787e1ba51ec15abe093876e54ed884e11..386f6240cbe228584b130194b8c08588cf91d5bd 100644 (file)
 /*
  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  * Use is subject to license terms.
- * Copyright (c) 2011 by Delphix. All rights reserved.
+ */
+
+/*
+ * Copyright (c) 2012 by Delphix. All rights reserved.
  */
 
 #ifndef _SYS_METASLAB_IMPL_H
index ca9fb24384d2373156f97756a05c844b7310eab6..8f2af8af2b7086056ddbe41303dd96d33667a1d8 100644 (file)
@@ -486,13 +486,7 @@ extern int spa_scan_stop(spa_t *spa);
 extern void spa_sync(spa_t *spa, uint64_t txg); /* only for DMU use */
 extern void spa_sync_allpools(void);
 
-/*
- * DEFERRED_FREE must be large enough that regular blocks are not
- * deferred.  XXX so can't we change it back to 1?
- */
-#define        SYNC_PASS_DEFERRED_FREE 2       /* defer frees after this pass */
-#define        SYNC_PASS_DONT_COMPRESS 4       /* don't compress after this pass */
-#define        SYNC_PASS_REWRITE       1       /* rewrite new bps after this pass */
+extern int zfs_sync_pass_deferred_free;
 
 /* spa namespace global mutex */
 extern kmutex_t spa_namespace_lock;
index 3322997243d63b6f5705c7a6dba1dd52fee77460..cbe75afd1a8df592400059e167a642db551a9e4f 100644 (file)
  * Use is subject to license terms.
  */
 
+/*
+ * Copyright (c) 2012 by Delphix. All rights reserved.
+ */
+
 #ifndef _SYS_SPACE_MAP_H
 #define        _SYS_SPACE_MAP_H
 
@@ -136,6 +140,8 @@ struct space_map_ops {
 
 typedef void space_map_func_t(space_map_t *sm, uint64_t start, uint64_t size);
 
+extern void space_map_init(void);
+extern void space_map_fini(void);
 extern void space_map_create(space_map_t *sm, uint64_t start, uint64_t size,
     uint8_t shift, kmutex_t *lp);
 extern void space_map_destroy(space_map_t *sm);
index d90bd8bd592174d8318e1d195890b890a114983f..2d062d0915a6c90ff91ff6fa210d6ce42532f9d9 100644 (file)
  * Use is subject to license terms.
  */
 
+/*
+ * Copyright (c) 2012 by Delphix. All rights reserved.
+ */
+
 #ifndef _ZIO_IMPL_H
 #define        _ZIO_IMPL_H
 
@@ -143,6 +147,7 @@ enum zio_stage {
 #define        ZIO_FREE_PIPELINE                       \
        (ZIO_INTERLOCK_STAGES |                 \
        ZIO_STAGE_FREE_BP_INIT |                \
+       ZIO_STAGE_ISSUE_ASYNC |                 \
        ZIO_STAGE_DVA_FREE)
 
 #define        ZIO_DDT_FREE_PIPELINE                   \
index 03d5a1d9c75ee865a907ba8d5ac3d6b2a86b9a22..76dc4f637a23153625466238e6d0068f2bea12bf 100644 (file)
@@ -899,8 +899,9 @@ metaslab_activate(metaslab_t *msp, uint64_t activation_weight)
        if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
                space_map_load_wait(sm);
                if (!sm->sm_loaded) {
-                       int error = space_map_load(sm, sm_ops, SM_FREE,
-                           &msp->ms_smo,
+                       space_map_obj_t *smo = &msp->ms_smo;
+
+                       int error = space_map_load(sm, sm_ops, SM_FREE, smo,
                            spa_meta_objset(msp->ms_group->mg_vd->vdev_spa));
                        if (error)  {
                                metaslab_group_sort(msp->ms_group, msp, 0);
index 12c24879b6197257c48390507c7c1aec7116bc75..22fa7878cadfa537fd45e7668af417196b8cf33a 100644 (file)
@@ -6078,7 +6078,7 @@ spa_sync(spa_t *spa, uint64_t txg)
                spa_errlog_sync(spa, txg);
                dsl_pool_sync(dp, txg);
 
-               if (pass <= SYNC_PASS_DEFERRED_FREE) {
+               if (pass < zfs_sync_pass_deferred_free) {
                        zio_t *zio = zio_root(spa, NULL, NULL, 0);
                        bplist_iterate(free_bpl, spa_free_sync_cb,
                            zio, tx);
index 476f9c02cdea2bac826ec8eaad55286a0dc6c651..0ca9f3a7a622a89bf7fd49fc2ef876e806a089f3 100644 (file)
@@ -1633,6 +1633,7 @@ spa_init(int mode)
        fm_init();
        refcount_init();
        unique_init();
+       space_map_init();
        zio_init();
        dmu_init();
        zil_init();
@@ -1655,6 +1656,7 @@ spa_fini(void)
        zil_fini();
        dmu_fini();
        zio_fini();
+       space_map_fini();
        unique_fini();
        refcount_fini();
        fm_fini();
index 9c0cdb6beeccd947c47773d043a311dd6081b8bc..d99c7c0b3997c6139b41669822ddbb96d5338067 100644 (file)
 #include <sys/zio.h>
 #include <sys/space_map.h>
 
+static kmem_cache_t *space_seg_cache;
+
+void
+space_map_init(void)
+{
+       ASSERT(space_seg_cache == NULL);
+       space_seg_cache = kmem_cache_create("space_seg_cache",
+           sizeof (space_seg_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
+}
+
+void
+space_map_fini(void)
+{
+       kmem_cache_destroy(space_seg_cache);
+       space_seg_cache = NULL;
+}
+
 /*
  * Space map routines.
  * NOTE: caller is responsible for all locking.
@@ -121,7 +138,7 @@ space_map_add(space_map_t *sm, uint64_t start, uint64_t size)
                        avl_remove(sm->sm_pp_root, ss_after);
                }
                ss_after->ss_start = ss_before->ss_start;
-               kmem_free(ss_before, sizeof (*ss_before));
+               kmem_cache_free(space_seg_cache, ss_before);
                ss = ss_after;
        } else if (merge_before) {
                ss_before->ss_end = end;
@@ -134,7 +151,7 @@ space_map_add(space_map_t *sm, uint64_t start, uint64_t size)
                        avl_remove(sm->sm_pp_root, ss_after);
                ss = ss_after;
        } else {
-               ss = kmem_alloc(sizeof (*ss), KM_PUSHPAGE);
+               ss = kmem_cache_alloc(space_seg_cache, KM_PUSHPAGE);
                ss->ss_start = start;
                ss->ss_end = end;
                avl_insert(&sm->sm_root, ss, where);
@@ -181,7 +198,7 @@ space_map_remove(space_map_t *sm, uint64_t start, uint64_t size)
                avl_remove(sm->sm_pp_root, ss);
 
        if (left_over && right_over) {
-               newseg = kmem_alloc(sizeof (*newseg), KM_PUSHPAGE);
+               newseg = kmem_cache_alloc(space_seg_cache, KM_PUSHPAGE);
                newseg->ss_start = end;
                newseg->ss_end = ss->ss_end;
                ss->ss_end = start;
@@ -194,7 +211,7 @@ space_map_remove(space_map_t *sm, uint64_t start, uint64_t size)
                ss->ss_start = end;
        } else {
                avl_remove(&sm->sm_root, ss);
-               kmem_free(ss, sizeof (*ss));
+               kmem_cache_free(space_seg_cache, ss);
                ss = NULL;
        }
 
@@ -234,7 +251,7 @@ space_map_vacate(space_map_t *sm, space_map_func_t *func, space_map_t *mdest)
        while ((ss = avl_destroy_nodes(&sm->sm_root, &cookie)) != NULL) {
                if (func != NULL)
                        func(mdest, ss->ss_start, ss->ss_end - ss->ss_start);
-               kmem_free(ss, sizeof (*ss));
+               kmem_cache_free(space_seg_cache, ss);
        }
        sm->sm_space = 0;
 }
@@ -406,7 +423,7 @@ space_map_sync(space_map_t *sm, uint8_t maptype,
        spa_t *spa = dmu_objset_spa(os);
        void *cookie = NULL;
        space_seg_t *ss;
-       uint64_t bufsize, start, size, run_len;
+       uint64_t bufsize, start, size, run_len, delta, sm_space;
        uint64_t *entry, *entry_map, *entry_map_end;
 
        ASSERT(MUTEX_HELD(sm->sm_lock));
@@ -435,11 +452,13 @@ space_map_sync(space_map_t *sm, uint8_t maptype,
            SM_DEBUG_SYNCPASS_ENCODE(spa_sync_pass(spa)) |
            SM_DEBUG_TXG_ENCODE(dmu_tx_get_txg(tx));
 
+       delta = 0;
+       sm_space = sm->sm_space;
        while ((ss = avl_destroy_nodes(&sm->sm_root, &cookie)) != NULL) {
                size = ss->ss_end - ss->ss_start;
                start = (ss->ss_start - sm->sm_start) >> sm->sm_shift;
 
-               sm->sm_space -= size;
+               delta += size;
                size >>= sm->sm_shift;
 
                while (size) {
@@ -461,7 +480,7 @@ space_map_sync(space_map_t *sm, uint8_t maptype,
                        start += run_len;
                        size -= run_len;
                }
-               kmem_free(ss, sizeof (*ss));
+               kmem_cache_free(space_seg_cache, ss);
        }
 
        if (entry != entry_map) {
@@ -473,8 +492,15 @@ space_map_sync(space_map_t *sm, uint8_t maptype,
                smo->smo_objsize += size;
        }
 
+       /*
+        * Ensure that the space_map's accounting wasn't changed
+        * while we were in the middle of writing it out.
+        */
+       VERIFY3U(sm->sm_space, ==, sm_space);
+
        zio_buf_free(entry_map, bufsize);
 
+       sm->sm_space -= delta;
        VERIFY3U(sm->sm_space, ==, 0);
 }
 
index a721903a37ab875d594f08b770dc91aa73be1aa9..fda76123e4927315bd01d2f5cfb0f4278f368ca0 100644 (file)
@@ -84,6 +84,22 @@ extern vmem_t *zio_alloc_arena;
 #endif
 extern int zfs_mg_alloc_failures;
 
+/*
+ * The following actions directly effect the spa's sync-to-convergence logic.
+ * The values below define the sync pass when we start performing the action.
+ * Care should be taken when changing these values as they directly impact
+ * spa_sync() performance. Tuning these values may introduce subtle performance
+ * pathologies and should only be done in the context of performance analysis.
+ * These tunables will eventually be removed and replaced with #defines once
+ * enough analysis has been done to determine optimal values.
+ *
+ * The 'zfs_sync_pass_deferred_free' pass must be greater than 1 to ensure that
+ * regular blocks are not deferred.
+ */
+int zfs_sync_pass_deferred_free = 2; /* defer frees starting in this pass */
+int zfs_sync_pass_dont_compress = 5; /* don't compress starting in this pass */
+int zfs_sync_pass_rewrite = 2; /* rewrite new bps starting in this pass */
+
 /*
  * An allocating zio is one that either currently has the DVA allocate
  * stage set or will have it later in its lifetime.
@@ -771,7 +787,7 @@ zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
 
        ASSERT(!BP_IS_HOLE(bp));
        ASSERT(spa_syncing_txg(spa) == txg);
-       ASSERT(spa_sync_pass(spa) <= SYNC_PASS_DEFERRED_FREE);
+       ASSERT(spa_sync_pass(spa) < zfs_sync_pass_deferred_free);
 
        zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp),
            NULL, NULL, ZIO_TYPE_FREE, ZIO_PRIORITY_FREE, flags,
@@ -1068,7 +1084,7 @@ zio_write_bp_init(zio_t *zio)
                ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
                ASSERT(!BP_GET_DEDUP(bp));
 
-               if (pass > SYNC_PASS_DONT_COMPRESS)
+               if (pass >= zfs_sync_pass_dont_compress)
                        compress = ZIO_COMPRESS_OFF;
 
                /* Make sure someone doesn't change their mind on overwrites */
@@ -1097,7 +1113,7 @@ zio_write_bp_init(zio_t *zio)
         * There should only be a handful of blocks after pass 1 in any case.
         */
        if (bp->blk_birth == zio->io_txg && BP_GET_PSIZE(bp) == psize &&
-           pass > SYNC_PASS_REWRITE) {
+           pass >= zfs_sync_pass_rewrite) {
                enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES;
                ASSERT(psize != 0);
                zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages;