]> granicus.if.org Git - zfs/commitdiff
Refactor txg history kstat
authorBrian Behlendorf <behlendorf1@llnl.gov>
Fri, 2 Dec 2016 23:57:49 +0000 (16:57 -0700)
committerGitHub <noreply@github.com>
Fri, 2 Dec 2016 23:57:49 +0000 (16:57 -0700)
It was observed that even when the txg history is disabled by
setting `zfs_txg_history=0` the txg_sync thread still fetches
the vdev stats unnecessarily.

This patch refactors the code such that vdev_get_stats() is no
longer called when `zfs_txg_history=0`.  And it further reduces
the  differences between upstream and the ZoL txg_sync_thread()
function.

Reviewed-by: Tony Hutter <hutter2@llnl.gov>
Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov>
Closes #5412

include/sys/spa.h
module/zfs/spa_stats.c
module/zfs/txg.c

index d679e53d6ca7c135e356100c65cd8ff1107ff95e..58520118e73beff5f15c9038ed47107b994ea337 100644 (file)
@@ -725,6 +725,13 @@ typedef enum txg_state {
        TXG_STATE_COMMITTED     = 5,
 } txg_state_t;
 
+typedef struct txg_stat {
+       vdev_stat_t             vs1;
+       vdev_stat_t             vs2;
+       uint64_t                txg;
+       uint64_t                ndirty;
+} txg_stat_t;
+
 extern void spa_stats_init(spa_t *spa);
 extern void spa_stats_destroy(spa_t *spa);
 extern void spa_read_history_add(spa_t *spa, const zbookmark_phys_t *zb,
@@ -732,8 +739,9 @@ extern void spa_read_history_add(spa_t *spa, const zbookmark_phys_t *zb,
 extern void spa_txg_history_add(spa_t *spa, uint64_t txg, hrtime_t birth_time);
 extern int spa_txg_history_set(spa_t *spa,  uint64_t txg,
     txg_state_t completed_state, hrtime_t completed_time);
-extern int spa_txg_history_set_io(spa_t *spa,  uint64_t txg, uint64_t nread,
-    uint64_t nwritten, uint64_t reads, uint64_t writes, uint64_t ndirty);
+extern txg_stat_t *spa_txg_history_init_io(spa_t *, uint64_t,
+    struct dsl_pool *);
+extern void spa_txg_history_fini_io(spa_t *, txg_stat_t *);
 extern void spa_tx_assign_add_nsecs(spa_t *spa, uint64_t nsecs);
 
 /* Pool configuration locks */
index c3d1c3b958dc96f2c83974fe77983c7a4936d73f..20d7dc9d1d50b0c41db8d9e89cf049cb42a8b643 100644 (file)
@@ -474,7 +474,7 @@ spa_txg_history_set(spa_t *spa, uint64_t txg, txg_state_t completed_state,
 /*
  * Set txg IO stats.
  */
-int
+static int
 spa_txg_history_set_io(spa_t *spa, uint64_t txg, uint64_t nread,
     uint64_t nwritten, uint64_t reads, uint64_t writes, uint64_t ndirty)
 {
@@ -503,6 +503,54 @@ spa_txg_history_set_io(spa_t *spa, uint64_t txg, uint64_t nread,
        return (error);
 }
 
+txg_stat_t *
+spa_txg_history_init_io(spa_t *spa, uint64_t txg, dsl_pool_t *dp)
+{
+       txg_stat_t *ts;
+
+       if (zfs_txg_history == 0)
+               return (NULL);
+
+       ts = kmem_alloc(sizeof (txg_stat_t), KM_SLEEP);
+
+       spa_config_enter(spa, SCL_ALL, FTAG, RW_READER);
+       vdev_get_stats(spa->spa_root_vdev, &ts->vs1);
+       spa_config_exit(spa, SCL_ALL, FTAG);
+
+       ts->txg = txg;
+       ts->ndirty = dp->dp_dirty_pertxg[txg & TXG_MASK];
+
+       spa_txg_history_set(spa, txg, TXG_STATE_WAIT_FOR_SYNC, gethrtime());
+
+       return (ts);
+}
+
+void
+spa_txg_history_fini_io(spa_t *spa, txg_stat_t *ts)
+{
+       if (ts == NULL)
+               return;
+
+       if (zfs_txg_history == 0) {
+               kmem_free(ts, sizeof (txg_stat_t));
+               return;
+       }
+
+       spa_config_enter(spa, SCL_ALL, FTAG, RW_READER);
+       vdev_get_stats(spa->spa_root_vdev, &ts->vs2);
+       spa_config_exit(spa, SCL_ALL, FTAG);
+
+       spa_txg_history_set(spa, ts->txg, TXG_STATE_SYNCED, gethrtime());
+       spa_txg_history_set_io(spa, ts->txg,
+           ts->vs2.vs_bytes[ZIO_TYPE_READ] - ts->vs1.vs_bytes[ZIO_TYPE_READ],
+           ts->vs2.vs_bytes[ZIO_TYPE_WRITE] - ts->vs1.vs_bytes[ZIO_TYPE_WRITE],
+           ts->vs2.vs_ops[ZIO_TYPE_READ] - ts->vs1.vs_ops[ZIO_TYPE_READ],
+           ts->vs2.vs_ops[ZIO_TYPE_WRITE] - ts->vs1.vs_ops[ZIO_TYPE_WRITE],
+           ts->ndirty);
+
+       kmem_free(ts, sizeof (txg_stat_t));
+}
+
 /*
  * ==========================================================================
  * SPA TX Assign Histogram Routines
index 9dda58c283df40b7d35df97777ad9e17f51f5430..043547e97af3929aff08d02f6b82585e127643d2 100644 (file)
@@ -481,22 +481,17 @@ txg_sync_thread(dsl_pool_t *dp)
        spa_t *spa = dp->dp_spa;
        tx_state_t *tx = &dp->dp_tx;
        callb_cpr_t cpr;
-       vdev_stat_t *vs1, *vs2;
        clock_t start, delta;
 
        (void) spl_fstrans_mark();
        txg_thread_enter(tx, &cpr);
 
-       vs1 = kmem_alloc(sizeof (vdev_stat_t), KM_SLEEP);
-       vs2 = kmem_alloc(sizeof (vdev_stat_t), KM_SLEEP);
-
        start = delta = 0;
        for (;;) {
-               clock_t timer, timeout;
+               clock_t timeout = zfs_txg_timeout * hz;
+               clock_t timer;
                uint64_t txg;
-               uint64_t ndirty;
-
-               timeout = zfs_txg_timeout * hz;
+               txg_stat_t *ts;
 
                /*
                 * We sync when we're scanning, there's someone waiting
@@ -527,15 +522,8 @@ txg_sync_thread(dsl_pool_t *dp)
                        txg_thread_wait(tx, &cpr, &tx->tx_quiesce_done_cv, 0);
                }
 
-               if (tx->tx_exiting) {
-                       kmem_free(vs2, sizeof (vdev_stat_t));
-                       kmem_free(vs1, sizeof (vdev_stat_t));
+               if (tx->tx_exiting)
                        txg_thread_exit(tx, &cpr, &tx->tx_sync_thread);
-               }
-
-               spa_config_enter(spa, SCL_ALL, FTAG, RW_READER);
-               vdev_get_stats(spa->spa_root_vdev, vs1);
-               spa_config_exit(spa, SCL_ALL, FTAG);
 
                /*
                 * Consume the quiesced txg which has been handed off to
@@ -546,16 +534,13 @@ txg_sync_thread(dsl_pool_t *dp)
                tx->tx_quiesced_txg = 0;
                tx->tx_syncing_txg = txg;
                DTRACE_PROBE2(txg__syncing, dsl_pool_t *, dp, uint64_t, txg);
+               ts = spa_txg_history_init_io(spa, txg, dp);
                cv_broadcast(&tx->tx_quiesce_more_cv);
 
                dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
                    txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
                mutex_exit(&tx->tx_sync_lock);
 
-               spa_txg_history_set(spa, txg, TXG_STATE_WAIT_FOR_SYNC,
-                   gethrtime());
-               ndirty = dp->dp_dirty_pertxg[txg & TXG_MASK];
-
                start = ddi_get_lbolt();
                spa_sync(spa, txg);
                delta = ddi_get_lbolt() - start;
@@ -564,23 +549,13 @@ txg_sync_thread(dsl_pool_t *dp)
                tx->tx_synced_txg = txg;
                tx->tx_syncing_txg = 0;
                DTRACE_PROBE2(txg__synced, dsl_pool_t *, dp, uint64_t, txg);
+               spa_txg_history_fini_io(spa, ts);
                cv_broadcast(&tx->tx_sync_done_cv);
 
                /*
                 * Dispatch commit callbacks to worker threads.
                 */
                txg_dispatch_callbacks(dp, txg);
-
-               spa_config_enter(spa, SCL_ALL, FTAG, RW_READER);
-               vdev_get_stats(spa->spa_root_vdev, vs2);
-               spa_config_exit(spa, SCL_ALL, FTAG);
-               spa_txg_history_set_io(spa, txg,
-                   vs2->vs_bytes[ZIO_TYPE_READ]-vs1->vs_bytes[ZIO_TYPE_READ],
-                   vs2->vs_bytes[ZIO_TYPE_WRITE]-vs1->vs_bytes[ZIO_TYPE_WRITE],
-                   vs2->vs_ops[ZIO_TYPE_READ]-vs1->vs_ops[ZIO_TYPE_READ],
-                   vs2->vs_ops[ZIO_TYPE_WRITE]-vs1->vs_ops[ZIO_TYPE_WRITE],
-                   ndirty);
-               spa_txg_history_set(spa, txg, TXG_STATE_SYNCED, gethrtime());
        }
 }