4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 #include <sys/zfs_context.h>
23 #include <sys/spa_impl.h>
24 #include <sys/vdev_impl.h>
27 * Keeps stats on last N reads per spa_t, disabled by default.
29 int zfs_read_history = 0;
32 * Include cache hits in history, disabled by default.
34 int zfs_read_history_hits = 0;
37 * Keeps stats on the last N txgs, disabled by default.
39 int zfs_txg_history = 0;
42 * Keeps stats on the last N MMP updates, disabled by default.
44 int zfs_multihost_history = 0;
47 * ==========================================================================
48 * SPA Read History Routines
49 * ==========================================================================
53 * Read statistics - Information exported regarding each arc_read call
55 typedef struct spa_read_history {
56 uint64_t uid; /* unique identifier */
57 hrtime_t start; /* time read completed */
58 uint64_t objset; /* read from this objset */
59 uint64_t object; /* read of this object number */
60 uint64_t level; /* block's indirection level */
61 uint64_t blkid; /* read of this block id */
62 char origin[24]; /* read originated from here */
63 uint32_t aflags; /* ARC flags (cached, prefetch, etc.) */
64 pid_t pid; /* PID of task doing read */
65 char comm[16]; /* process name of task doing read */
70 spa_read_history_headers(char *buf, size_t size)
72 (void) snprintf(buf, size, "%-8s %-16s %-8s %-8s %-8s %-8s %-8s "
73 "%-24s %-8s %-16s\n", "UID", "start", "objset", "object",
74 "level", "blkid", "aflags", "origin", "pid", "process");
80 spa_read_history_data(char *buf, size_t size, void *data)
82 spa_read_history_t *srh = (spa_read_history_t *)data;
84 (void) snprintf(buf, size, "%-8llu %-16llu 0x%-6llx "
85 "%-8lli %-8lli %-8lli 0x%-6x %-24s %-8i %-16s\n",
86 (u_longlong_t)srh->uid, srh->start,
87 (longlong_t)srh->objset, (longlong_t)srh->object,
88 (longlong_t)srh->level, (longlong_t)srh->blkid,
89 srh->aflags, srh->origin, srh->pid, srh->comm);
95 * Calculate the address for the next spa_stats_history_t entry. The
96 * ssh->lock will be held until ksp->ks_ndata entries are processed.
99 spa_read_history_addr(kstat_t *ksp, loff_t n)
101 spa_t *spa = ksp->ks_private;
102 spa_stats_history_t *ssh = &spa->spa_stats.read_history;
104 ASSERT(MUTEX_HELD(&ssh->lock));
107 ssh->private = list_tail(&ssh->list);
108 else if (ssh->private)
109 ssh->private = list_prev(&ssh->list, ssh->private);
111 return (ssh->private);
115 * When the kstat is written discard all spa_read_history_t entries. The
116 * ssh->lock will be held until ksp->ks_ndata entries are processed.
119 spa_read_history_update(kstat_t *ksp, int rw)
121 spa_t *spa = ksp->ks_private;
122 spa_stats_history_t *ssh = &spa->spa_stats.read_history;
124 if (rw == KSTAT_WRITE) {
125 spa_read_history_t *srh;
127 while ((srh = list_remove_head(&ssh->list))) {
129 kmem_free(srh, sizeof (spa_read_history_t));
132 ASSERT3U(ssh->size, ==, 0);
135 ksp->ks_ndata = ssh->size;
136 ksp->ks_data_size = ssh->size * sizeof (spa_read_history_t);
142 spa_read_history_init(spa_t *spa)
144 spa_stats_history_t *ssh = &spa->spa_stats.read_history;
145 char name[KSTAT_STRLEN];
148 mutex_init(&ssh->lock, NULL, MUTEX_DEFAULT, NULL);
149 list_create(&ssh->list, sizeof (spa_read_history_t),
150 offsetof(spa_read_history_t, srh_link));
156 (void) snprintf(name, KSTAT_STRLEN, "zfs/%s", spa_name(spa));
158 ksp = kstat_create(name, 0, "reads", "misc",
159 KSTAT_TYPE_RAW, 0, KSTAT_FLAG_VIRTUAL);
163 ksp->ks_lock = &ssh->lock;
165 ksp->ks_private = spa;
166 ksp->ks_update = spa_read_history_update;
167 kstat_set_raw_ops(ksp, spa_read_history_headers,
168 spa_read_history_data, spa_read_history_addr);
174 spa_read_history_destroy(spa_t *spa)
176 spa_stats_history_t *ssh = &spa->spa_stats.read_history;
177 spa_read_history_t *srh;
184 mutex_enter(&ssh->lock);
185 while ((srh = list_remove_head(&ssh->list))) {
187 kmem_free(srh, sizeof (spa_read_history_t));
190 ASSERT3U(ssh->size, ==, 0);
191 list_destroy(&ssh->list);
192 mutex_exit(&ssh->lock);
194 mutex_destroy(&ssh->lock);
198 spa_read_history_add(spa_t *spa, const zbookmark_phys_t *zb, uint32_t aflags)
200 spa_stats_history_t *ssh = &spa->spa_stats.read_history;
201 spa_read_history_t *srh, *rm;
203 ASSERT3P(spa, !=, NULL);
204 ASSERT3P(zb, !=, NULL);
206 if (zfs_read_history == 0 && ssh->size == 0)
209 if (zfs_read_history_hits == 0 && (aflags & ARC_FLAG_CACHED))
212 srh = kmem_zalloc(sizeof (spa_read_history_t), KM_SLEEP);
213 strlcpy(srh->comm, getcomm(), sizeof (srh->comm));
214 srh->start = gethrtime();
215 srh->objset = zb->zb_objset;
216 srh->object = zb->zb_object;
217 srh->level = zb->zb_level;
218 srh->blkid = zb->zb_blkid;
219 srh->aflags = aflags;
222 mutex_enter(&ssh->lock);
224 srh->uid = ssh->count++;
225 list_insert_head(&ssh->list, srh);
228 while (ssh->size > zfs_read_history) {
230 rm = list_remove_tail(&ssh->list);
231 kmem_free(rm, sizeof (spa_read_history_t));
234 mutex_exit(&ssh->lock);
238 * ==========================================================================
239 * SPA TXG History Routines
240 * ==========================================================================
244 * Txg statistics - Information exported regarding each txg sync
247 typedef struct spa_txg_history {
248 uint64_t txg; /* txg id */
249 txg_state_t state; /* active txg state */
250 uint64_t nread; /* number of bytes read */
251 uint64_t nwritten; /* number of bytes written */
252 uint64_t reads; /* number of read operations */
253 uint64_t writes; /* number of write operations */
254 uint64_t ndirty; /* number of dirty bytes */
255 hrtime_t times[TXG_STATE_COMMITTED]; /* completion times */
256 list_node_t sth_link;
260 spa_txg_history_headers(char *buf, size_t size)
262 (void) snprintf(buf, size, "%-8s %-16s %-5s %-12s %-12s %-12s "
263 "%-8s %-8s %-12s %-12s %-12s %-12s\n", "txg", "birth", "state",
264 "ndirty", "nread", "nwritten", "reads", "writes",
265 "otime", "qtime", "wtime", "stime");
271 spa_txg_history_data(char *buf, size_t size, void *data)
273 spa_txg_history_t *sth = (spa_txg_history_t *)data;
274 uint64_t open = 0, quiesce = 0, wait = 0, sync = 0;
277 switch (sth->state) {
278 case TXG_STATE_BIRTH: state = 'B'; break;
279 case TXG_STATE_OPEN: state = 'O'; break;
280 case TXG_STATE_QUIESCED: state = 'Q'; break;
281 case TXG_STATE_WAIT_FOR_SYNC: state = 'W'; break;
282 case TXG_STATE_SYNCED: state = 'S'; break;
283 case TXG_STATE_COMMITTED: state = 'C'; break;
284 default: state = '?'; break;
287 if (sth->times[TXG_STATE_OPEN])
288 open = sth->times[TXG_STATE_OPEN] -
289 sth->times[TXG_STATE_BIRTH];
291 if (sth->times[TXG_STATE_QUIESCED])
292 quiesce = sth->times[TXG_STATE_QUIESCED] -
293 sth->times[TXG_STATE_OPEN];
295 if (sth->times[TXG_STATE_WAIT_FOR_SYNC])
296 wait = sth->times[TXG_STATE_WAIT_FOR_SYNC] -
297 sth->times[TXG_STATE_QUIESCED];
299 if (sth->times[TXG_STATE_SYNCED])
300 sync = sth->times[TXG_STATE_SYNCED] -
301 sth->times[TXG_STATE_WAIT_FOR_SYNC];
303 (void) snprintf(buf, size, "%-8llu %-16llu %-5c %-12llu "
304 "%-12llu %-12llu %-8llu %-8llu %-12llu %-12llu %-12llu %-12llu\n",
305 (longlong_t)sth->txg, sth->times[TXG_STATE_BIRTH], state,
306 (u_longlong_t)sth->ndirty,
307 (u_longlong_t)sth->nread, (u_longlong_t)sth->nwritten,
308 (u_longlong_t)sth->reads, (u_longlong_t)sth->writes,
309 (u_longlong_t)open, (u_longlong_t)quiesce, (u_longlong_t)wait,
316 * Calculate the address for the next spa_stats_history_t entry. The
317 * ssh->lock will be held until ksp->ks_ndata entries are processed.
320 spa_txg_history_addr(kstat_t *ksp, loff_t n)
322 spa_t *spa = ksp->ks_private;
323 spa_stats_history_t *ssh = &spa->spa_stats.txg_history;
325 ASSERT(MUTEX_HELD(&ssh->lock));
328 ssh->private = list_tail(&ssh->list);
329 else if (ssh->private)
330 ssh->private = list_prev(&ssh->list, ssh->private);
332 return (ssh->private);
336 * When the kstat is written discard all spa_txg_history_t entries. The
337 * ssh->lock will be held until ksp->ks_ndata entries are processed.
340 spa_txg_history_update(kstat_t *ksp, int rw)
342 spa_t *spa = ksp->ks_private;
343 spa_stats_history_t *ssh = &spa->spa_stats.txg_history;
345 ASSERT(MUTEX_HELD(&ssh->lock));
347 if (rw == KSTAT_WRITE) {
348 spa_txg_history_t *sth;
350 while ((sth = list_remove_head(&ssh->list))) {
352 kmem_free(sth, sizeof (spa_txg_history_t));
355 ASSERT3U(ssh->size, ==, 0);
358 ksp->ks_ndata = ssh->size;
359 ksp->ks_data_size = ssh->size * sizeof (spa_txg_history_t);
365 spa_txg_history_init(spa_t *spa)
367 spa_stats_history_t *ssh = &spa->spa_stats.txg_history;
368 char name[KSTAT_STRLEN];
371 mutex_init(&ssh->lock, NULL, MUTEX_DEFAULT, NULL);
372 list_create(&ssh->list, sizeof (spa_txg_history_t),
373 offsetof(spa_txg_history_t, sth_link));
379 (void) snprintf(name, KSTAT_STRLEN, "zfs/%s", spa_name(spa));
381 ksp = kstat_create(name, 0, "txgs", "misc",
382 KSTAT_TYPE_RAW, 0, KSTAT_FLAG_VIRTUAL);
386 ksp->ks_lock = &ssh->lock;
388 ksp->ks_private = spa;
389 ksp->ks_update = spa_txg_history_update;
390 kstat_set_raw_ops(ksp, spa_txg_history_headers,
391 spa_txg_history_data, spa_txg_history_addr);
397 spa_txg_history_destroy(spa_t *spa)
399 spa_stats_history_t *ssh = &spa->spa_stats.txg_history;
400 spa_txg_history_t *sth;
407 mutex_enter(&ssh->lock);
408 while ((sth = list_remove_head(&ssh->list))) {
410 kmem_free(sth, sizeof (spa_txg_history_t));
413 ASSERT3U(ssh->size, ==, 0);
414 list_destroy(&ssh->list);
415 mutex_exit(&ssh->lock);
417 mutex_destroy(&ssh->lock);
421 * Add a new txg to historical record.
424 spa_txg_history_add(spa_t *spa, uint64_t txg, hrtime_t birth_time)
426 spa_stats_history_t *ssh = &spa->spa_stats.txg_history;
427 spa_txg_history_t *sth, *rm;
429 if (zfs_txg_history == 0 && ssh->size == 0)
432 sth = kmem_zalloc(sizeof (spa_txg_history_t), KM_SLEEP);
434 sth->state = TXG_STATE_OPEN;
435 sth->times[TXG_STATE_BIRTH] = birth_time;
437 mutex_enter(&ssh->lock);
439 list_insert_head(&ssh->list, sth);
442 while (ssh->size > zfs_txg_history) {
444 rm = list_remove_tail(&ssh->list);
445 kmem_free(rm, sizeof (spa_txg_history_t));
448 mutex_exit(&ssh->lock);
452 * Set txg state completion time and increment current state.
455 spa_txg_history_set(spa_t *spa, uint64_t txg, txg_state_t completed_state,
456 hrtime_t completed_time)
458 spa_stats_history_t *ssh = &spa->spa_stats.txg_history;
459 spa_txg_history_t *sth;
462 if (zfs_txg_history == 0)
465 mutex_enter(&ssh->lock);
466 for (sth = list_head(&ssh->list); sth != NULL;
467 sth = list_next(&ssh->list, sth)) {
468 if (sth->txg == txg) {
469 sth->times[completed_state] = completed_time;
475 mutex_exit(&ssh->lock);
484 spa_txg_history_set_io(spa_t *spa, uint64_t txg, uint64_t nread,
485 uint64_t nwritten, uint64_t reads, uint64_t writes, uint64_t ndirty)
487 spa_stats_history_t *ssh = &spa->spa_stats.txg_history;
488 spa_txg_history_t *sth;
491 if (zfs_txg_history == 0)
494 mutex_enter(&ssh->lock);
495 for (sth = list_head(&ssh->list); sth != NULL;
496 sth = list_next(&ssh->list, sth)) {
497 if (sth->txg == txg) {
499 sth->nwritten = nwritten;
501 sth->writes = writes;
502 sth->ndirty = ndirty;
507 mutex_exit(&ssh->lock);
513 spa_txg_history_init_io(spa_t *spa, uint64_t txg, dsl_pool_t *dp)
517 if (zfs_txg_history == 0)
520 ts = kmem_alloc(sizeof (txg_stat_t), KM_SLEEP);
522 spa_config_enter(spa, SCL_ALL, FTAG, RW_READER);
523 vdev_get_stats(spa->spa_root_vdev, &ts->vs1);
524 spa_config_exit(spa, SCL_ALL, FTAG);
527 ts->ndirty = dp->dp_dirty_pertxg[txg & TXG_MASK];
529 spa_txg_history_set(spa, txg, TXG_STATE_WAIT_FOR_SYNC, gethrtime());
535 spa_txg_history_fini_io(spa_t *spa, txg_stat_t *ts)
540 if (zfs_txg_history == 0) {
541 kmem_free(ts, sizeof (txg_stat_t));
545 spa_config_enter(spa, SCL_ALL, FTAG, RW_READER);
546 vdev_get_stats(spa->spa_root_vdev, &ts->vs2);
547 spa_config_exit(spa, SCL_ALL, FTAG);
549 spa_txg_history_set(spa, ts->txg, TXG_STATE_SYNCED, gethrtime());
550 spa_txg_history_set_io(spa, ts->txg,
551 ts->vs2.vs_bytes[ZIO_TYPE_READ] - ts->vs1.vs_bytes[ZIO_TYPE_READ],
552 ts->vs2.vs_bytes[ZIO_TYPE_WRITE] - ts->vs1.vs_bytes[ZIO_TYPE_WRITE],
553 ts->vs2.vs_ops[ZIO_TYPE_READ] - ts->vs1.vs_ops[ZIO_TYPE_READ],
554 ts->vs2.vs_ops[ZIO_TYPE_WRITE] - ts->vs1.vs_ops[ZIO_TYPE_WRITE],
557 kmem_free(ts, sizeof (txg_stat_t));
561 * ==========================================================================
562 * SPA TX Assign Histogram Routines
563 * ==========================================================================
567 * Tx statistics - Information exported regarding dmu_tx_assign time.
571 * When the kstat is written zero all buckets. When the kstat is read
572 * count the number of trailing buckets set to zero and update ks_ndata
573 * such that they are not output.
576 spa_tx_assign_update(kstat_t *ksp, int rw)
578 spa_t *spa = ksp->ks_private;
579 spa_stats_history_t *ssh = &spa->spa_stats.tx_assign_histogram;
582 if (rw == KSTAT_WRITE) {
583 for (i = 0; i < ssh->count; i++)
584 ((kstat_named_t *)ssh->private)[i].value.ui64 = 0;
587 for (i = ssh->count; i > 0; i--)
588 if (((kstat_named_t *)ssh->private)[i-1].value.ui64 != 0)
592 ksp->ks_data_size = i * sizeof (kstat_named_t);
598 spa_tx_assign_init(spa_t *spa)
600 spa_stats_history_t *ssh = &spa->spa_stats.tx_assign_histogram;
601 char name[KSTAT_STRLEN];
606 mutex_init(&ssh->lock, NULL, MUTEX_DEFAULT, NULL);
608 ssh->count = 42; /* power of two buckets for 1ns to 2,199s */
609 ssh->size = ssh->count * sizeof (kstat_named_t);
610 ssh->private = kmem_alloc(ssh->size, KM_SLEEP);
612 (void) snprintf(name, KSTAT_STRLEN, "zfs/%s", spa_name(spa));
614 for (i = 0; i < ssh->count; i++) {
615 ks = &((kstat_named_t *)ssh->private)[i];
616 ks->data_type = KSTAT_DATA_UINT64;
618 (void) snprintf(ks->name, KSTAT_STRLEN, "%llu ns",
619 (u_longlong_t)1 << i);
622 ksp = kstat_create(name, 0, "dmu_tx_assign", "misc",
623 KSTAT_TYPE_NAMED, 0, KSTAT_FLAG_VIRTUAL);
627 ksp->ks_lock = &ssh->lock;
628 ksp->ks_data = ssh->private;
629 ksp->ks_ndata = ssh->count;
630 ksp->ks_data_size = ssh->size;
631 ksp->ks_private = spa;
632 ksp->ks_update = spa_tx_assign_update;
638 spa_tx_assign_destroy(spa_t *spa)
640 spa_stats_history_t *ssh = &spa->spa_stats.tx_assign_histogram;
647 kmem_free(ssh->private, ssh->size);
648 mutex_destroy(&ssh->lock);
652 spa_tx_assign_add_nsecs(spa_t *spa, uint64_t nsecs)
654 spa_stats_history_t *ssh = &spa->spa_stats.tx_assign_histogram;
657 while (((1ULL << idx) < nsecs) && (idx < ssh->size - 1))
660 atomic_inc_64(&((kstat_named_t *)ssh->private)[idx].value.ui64);
664 * ==========================================================================
665 * SPA IO History Routines
666 * ==========================================================================
669 spa_io_history_update(kstat_t *ksp, int rw)
671 if (rw == KSTAT_WRITE)
672 memset(ksp->ks_data, 0, ksp->ks_data_size);
678 spa_io_history_init(spa_t *spa)
680 spa_stats_history_t *ssh = &spa->spa_stats.io_history;
681 char name[KSTAT_STRLEN];
684 mutex_init(&ssh->lock, NULL, MUTEX_DEFAULT, NULL);
686 (void) snprintf(name, KSTAT_STRLEN, "zfs/%s", spa_name(spa));
688 ksp = kstat_create(name, 0, "io", "disk", KSTAT_TYPE_IO, 1, 0);
692 ksp->ks_lock = &ssh->lock;
693 ksp->ks_private = spa;
694 ksp->ks_update = spa_io_history_update;
700 spa_io_history_destroy(spa_t *spa)
702 spa_stats_history_t *ssh = &spa->spa_stats.io_history;
705 kstat_delete(ssh->kstat);
707 mutex_destroy(&ssh->lock);
711 * ==========================================================================
712 * SPA MMP History Routines
713 * ==========================================================================
717 * MMP statistics - Information exported regarding each MMP update
720 typedef struct spa_mmp_history {
721 uint64_t mmp_kstat_id; /* unique # for updates */
722 uint64_t txg; /* txg of last sync */
723 uint64_t timestamp; /* UTC time of of last sync */
724 uint64_t mmp_delay; /* nanosec since last MMP write */
725 uint64_t vdev_guid; /* unique ID of leaf vdev */
727 uint64_t vdev_label; /* vdev label */
728 int io_error; /* error status of MMP write */
729 hrtime_t duration; /* time from submission to completion */
730 list_node_t smh_link;
734 spa_mmp_history_headers(char *buf, size_t size)
736 (void) snprintf(buf, size, "%-10s %-10s %-10s %-6s %-10s %-12s %-24s "
737 "%-10s %s\n", "id", "txg", "timestamp", "error", "duration",
738 "mmp_delay", "vdev_guid", "vdev_label", "vdev_path");
743 spa_mmp_history_data(char *buf, size_t size, void *data)
745 spa_mmp_history_t *smh = (spa_mmp_history_t *)data;
747 (void) snprintf(buf, size, "%-10llu %-10llu %-10llu %-6lld %-10lld "
748 "%-12llu %-24llu %-10llu %s\n",
749 (u_longlong_t)smh->mmp_kstat_id, (u_longlong_t)smh->txg,
750 (u_longlong_t)smh->timestamp, (longlong_t)smh->io_error,
751 (longlong_t)smh->duration, (u_longlong_t)smh->mmp_delay,
752 (u_longlong_t)smh->vdev_guid, (u_longlong_t)smh->vdev_label,
753 (smh->vdev_path ? smh->vdev_path : "-"));
759 * Calculate the address for the next spa_stats_history_t entry. The
760 * ssh->lock will be held until ksp->ks_ndata entries are processed.
763 spa_mmp_history_addr(kstat_t *ksp, loff_t n)
765 spa_t *spa = ksp->ks_private;
766 spa_stats_history_t *ssh = &spa->spa_stats.mmp_history;
768 ASSERT(MUTEX_HELD(&ssh->lock));
771 ssh->private = list_tail(&ssh->list);
772 else if (ssh->private)
773 ssh->private = list_prev(&ssh->list, ssh->private);
775 return (ssh->private);
779 * When the kstat is written discard all spa_mmp_history_t entries. The
780 * ssh->lock will be held until ksp->ks_ndata entries are processed.
783 spa_mmp_history_update(kstat_t *ksp, int rw)
785 spa_t *spa = ksp->ks_private;
786 spa_stats_history_t *ssh = &spa->spa_stats.mmp_history;
788 ASSERT(MUTEX_HELD(&ssh->lock));
790 if (rw == KSTAT_WRITE) {
791 spa_mmp_history_t *smh;
793 while ((smh = list_remove_head(&ssh->list))) {
796 strfree(smh->vdev_path);
797 kmem_free(smh, sizeof (spa_mmp_history_t));
800 ASSERT3U(ssh->size, ==, 0);
803 ksp->ks_ndata = ssh->size;
804 ksp->ks_data_size = ssh->size * sizeof (spa_mmp_history_t);
810 spa_mmp_history_init(spa_t *spa)
812 spa_stats_history_t *ssh = &spa->spa_stats.mmp_history;
813 char name[KSTAT_STRLEN];
816 mutex_init(&ssh->lock, NULL, MUTEX_DEFAULT, NULL);
817 list_create(&ssh->list, sizeof (spa_mmp_history_t),
818 offsetof(spa_mmp_history_t, smh_link));
824 (void) snprintf(name, KSTAT_STRLEN, "zfs/%s", spa_name(spa));
826 ksp = kstat_create(name, 0, "multihost", "misc",
827 KSTAT_TYPE_RAW, 0, KSTAT_FLAG_VIRTUAL);
831 ksp->ks_lock = &ssh->lock;
833 ksp->ks_private = spa;
834 ksp->ks_update = spa_mmp_history_update;
835 kstat_set_raw_ops(ksp, spa_mmp_history_headers,
836 spa_mmp_history_data, spa_mmp_history_addr);
842 spa_mmp_history_destroy(spa_t *spa)
844 spa_stats_history_t *ssh = &spa->spa_stats.mmp_history;
845 spa_mmp_history_t *smh;
852 mutex_enter(&ssh->lock);
853 while ((smh = list_remove_head(&ssh->list))) {
856 strfree(smh->vdev_path);
857 kmem_free(smh, sizeof (spa_mmp_history_t));
860 ASSERT3U(ssh->size, ==, 0);
861 list_destroy(&ssh->list);
862 mutex_exit(&ssh->lock);
864 mutex_destroy(&ssh->lock);
868 * Set MMP write duration and error status in existing record.
871 spa_mmp_history_set(spa_t *spa, uint64_t mmp_kstat_id, int io_error,
874 spa_stats_history_t *ssh = &spa->spa_stats.mmp_history;
875 spa_mmp_history_t *smh;
878 if (zfs_multihost_history == 0 && ssh->size == 0)
881 mutex_enter(&ssh->lock);
882 for (smh = list_head(&ssh->list); smh != NULL;
883 smh = list_next(&ssh->list, smh)) {
884 if (smh->mmp_kstat_id == mmp_kstat_id) {
885 smh->io_error = io_error;
886 smh->duration = duration;
891 mutex_exit(&ssh->lock);
897 * Add a new MMP write to historical record.
900 spa_mmp_history_add(uint64_t txg, uint64_t timestamp, uint64_t mmp_delay,
901 vdev_t *vd, int label, uint64_t mmp_kstat_id)
903 spa_t *spa = vd->vdev_spa;
904 spa_stats_history_t *ssh = &spa->spa_stats.mmp_history;
905 spa_mmp_history_t *smh, *rm;
907 if (zfs_multihost_history == 0 && ssh->size == 0)
910 smh = kmem_zalloc(sizeof (spa_mmp_history_t), KM_SLEEP);
912 smh->timestamp = timestamp;
913 smh->mmp_delay = mmp_delay;
914 smh->vdev_guid = vd->vdev_guid;
916 smh->vdev_path = strdup(vd->vdev_path);
917 smh->vdev_label = label;
918 smh->mmp_kstat_id = mmp_kstat_id;
920 mutex_enter(&ssh->lock);
922 list_insert_head(&ssh->list, smh);
925 while (ssh->size > zfs_multihost_history) {
927 rm = list_remove_tail(&ssh->list);
929 strfree(rm->vdev_path);
930 kmem_free(rm, sizeof (spa_mmp_history_t));
933 mutex_exit(&ssh->lock);
937 spa_stats_init(spa_t *spa)
939 spa_read_history_init(spa);
940 spa_txg_history_init(spa);
941 spa_tx_assign_init(spa);
942 spa_io_history_init(spa);
943 spa_mmp_history_init(spa);
947 spa_stats_destroy(spa_t *spa)
949 spa_tx_assign_destroy(spa);
950 spa_txg_history_destroy(spa);
951 spa_read_history_destroy(spa);
952 spa_io_history_destroy(spa);
953 spa_mmp_history_destroy(spa);
956 #if defined(_KERNEL) && defined(HAVE_SPL)
958 module_param(zfs_read_history, int, 0644);
959 MODULE_PARM_DESC(zfs_read_history,
960 "Historical statistics for the last N reads");
962 module_param(zfs_read_history_hits, int, 0644);
963 MODULE_PARM_DESC(zfs_read_history_hits,
964 "Include cache hits in read history");
966 module_param(zfs_txg_history, int, 0644);
967 MODULE_PARM_DESC(zfs_txg_history,
968 "Historical statistics for the last N txgs");
970 module_param(zfs_multihost_history, int, 0644);
971 MODULE_PARM_DESC(zfs_multihost_history,
972 "Historical statistics for last N multihost writes");