4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 * Copyright (c) 2012 by Delphix. All rights reserved.
31 #include <sys/spa_impl.h>
33 #include <sys/vdev_impl.h>
35 #include <sys/zio_checksum.h>
37 #include <sys/fm/fs/zfs.h>
38 #include <sys/fm/protocol.h>
39 #include <sys/fm/util.h>
40 #include <sys/sysevent.h>
43 * This general routine is responsible for generating all the different ZFS
44 * ereports. The payload is dependent on the class, and which arguments are
45 * supplied to the function:
47 * EREPORT POOL VDEV IO
53 * If we are in a loading state, all errors are chained together by the same
54 * SPA-wide ENA (Error Numeric Association).
56 * For isolated I/O requests, we get the ENA from the zio_t. The propagation
57 * gets very complicated due to RAID-Z, gang blocks, and vdev caching. We want
58 * to chain together all ereports associated with a logical piece of data. For
59 * read I/Os, there are basically three 'types' of I/O, which form a roughly
63 * | Aggregate I/O | No associated logical data or device
67 * +---------------+ Reads associated with a piece of logical data.
68 * | Read I/O | This includes reads on behalf of RAID-Z,
69 * +---------------+ mirrors, gang blocks, retries, etc.
72 * +---------------+ Reads associated with a particular device, but
73 * | Physical I/O | no logical data. Issued as part of vdev caching
74 * +---------------+ and I/O aggregation.
76 * Note that 'physical I/O' here is not the same terminology as used in the rest
77 * of ZIO. Typically, 'physical I/O' simply means that there is no attached
78 * blockpointer. But I/O with no associated block pointer can still be related
79 * to a logical piece of data (i.e. RAID-Z requests).
81 * Purely physical I/O always have unique ENAs. They are not related to a
82 * particular piece of logical data, and therefore cannot be chained together.
83 * We still generate an ereport, but the DE doesn't correlate it with any
84 * logical piece of data. When such an I/O fails, the delegated I/O requests
85 * will issue a retry, which will trigger the 'real' ereport with the correct
88 * We keep track of the ENA for a ZIO chain through the 'io_logical' member.
89 * When a new logical I/O is issued, we set this to point to itself. Child I/Os
90 * then inherit this pointer, so that when it is first set subsequent failures
91 * will use the same ENA. For vdev cache fill and queue aggregation I/O,
92 * this pointer is set to NULL, and no ereport will be generated (since it
93 * doesn't actually correspond to any particular device or piece of data,
94 * and the caller will always retry without caching or queueing anyway).
96 * For checksum errors, we want to include more information about the actual
97 * error which occurs. Accordingly, we build an ereport when the error is
98 * noticed, but instead of sending it in immediately, we hang it off of the
99 * io_cksum_report field of the logical IO. When the logical IO completes
100 * (successfully or not), zfs_ereport_finish_checksum() is called with the
101 * good and bad versions of the buffer (if available), and we annotate the
102 * ereport with information about the differences.
106 zfs_zevent_post_cb(nvlist_t *nvl, nvlist_t *detector)
109 fm_nvlist_destroy(nvl, FM_NVA_FREE);
112 fm_nvlist_destroy(detector, FM_NVA_FREE);
116 * We want to rate limit ZIO delay and checksum events so as to not
117 * flood ZED when a disk is acting up.
119 * Returns 1 if we're ratelimiting, 0 if not.
122 zfs_is_ratelimiting_event(const char *subclass, vdev_t *vd)
126 * __ratelimit() returns 1 if we're *not* ratelimiting and 0 if we
127 * are. Invert it to get our return value.
129 if (strcmp(subclass, FM_EREPORT_ZFS_DELAY) == 0) {
130 rc = !zfs_ratelimit(&vd->vdev_delay_rl);
131 } else if (strcmp(subclass, FM_EREPORT_ZFS_CHECKSUM) == 0) {
132 rc = !zfs_ratelimit(&vd->vdev_checksum_rl);
136 /* We're rate limiting */
137 fm_erpt_dropped_increment();
144 zfs_ereport_start(nvlist_t **ereport_out, nvlist_t **detector_out,
145 const char *subclass, spa_t *spa, vdev_t *vd, zio_t *zio,
146 uint64_t stateoroffset, uint64_t size)
148 nvlist_t *ereport, *detector;
154 * If we are doing a spa_tryimport() or in recovery mode,
157 if (spa_load_state(spa) == SPA_LOAD_TRYIMPORT ||
158 spa_load_state(spa) == SPA_LOAD_RECOVER)
162 * If we are in the middle of opening a pool, and the previous attempt
163 * failed, don't bother logging any new ereports - we're just going to
164 * get the same diagnosis anyway.
166 if (spa_load_state(spa) != SPA_LOAD_NONE &&
167 spa->spa_last_open_failed)
172 * If this is not a read or write zio, ignore the error. This
173 * can occur if the DKIOCFLUSHWRITECACHE ioctl fails.
175 if (zio->io_type != ZIO_TYPE_READ &&
176 zio->io_type != ZIO_TYPE_WRITE)
181 * If the vdev has already been marked as failing due
182 * to a failed probe, then ignore any subsequent I/O
183 * errors, as the DE will automatically fault the vdev
184 * on the first such failure. This also catches cases
185 * where vdev_remove_wanted is set and the device has
186 * not yet been asynchronously placed into the REMOVED
189 if (zio->io_vd == vd && !vdev_accessible(vd, zio))
193 * Ignore checksum errors for reads from DTL regions of
196 if (zio->io_type == ZIO_TYPE_READ &&
197 zio->io_error == ECKSUM &&
198 vd->vdev_ops->vdev_op_leaf &&
199 vdev_dtl_contains(vd, DTL_MISSING, zio->io_txg, 1))
205 * For probe failure, we want to avoid posting ereports if we've
206 * already removed the device in the meantime.
209 strcmp(subclass, FM_EREPORT_ZFS_PROBE_FAILURE) == 0 &&
210 (vd->vdev_remove_wanted || vd->vdev_state == VDEV_STATE_REMOVED))
213 if ((ereport = fm_nvlist_create(NULL)) == NULL)
216 if ((detector = fm_nvlist_create(NULL)) == NULL) {
217 fm_nvlist_destroy(ereport, FM_NVA_FREE);
221 if ((strcmp(subclass, FM_EREPORT_ZFS_DELAY) == 0) &&
222 (zio != NULL) && (!zio->io_timestamp)) {
223 /* Ignore bogus delay events */
228 * Serialize ereport generation
230 mutex_enter(&spa->spa_errlist_lock);
233 * Determine the ENA to use for this event. If we are in a loading
234 * state, use a SPA-wide ENA. Otherwise, if we are in an I/O state, use
235 * a root zio-wide ENA. Otherwise, simply use a unique ENA.
237 if (spa_load_state(spa) != SPA_LOAD_NONE) {
238 if (spa->spa_ena == 0)
239 spa->spa_ena = fm_ena_generate(0, FM_ENA_FMT1);
241 } else if (zio != NULL && zio->io_logical != NULL) {
242 if (zio->io_logical->io_ena == 0)
243 zio->io_logical->io_ena =
244 fm_ena_generate(0, FM_ENA_FMT1);
245 ena = zio->io_logical->io_ena;
247 ena = fm_ena_generate(0, FM_ENA_FMT1);
251 * Construct the full class, detector, and other standard FMA fields.
253 (void) snprintf(class, sizeof (class), "%s.%s",
254 ZFS_ERROR_CLASS, subclass);
256 fm_fmri_zfs_set(detector, FM_ZFS_SCHEME_VERSION, spa_guid(spa),
257 vd != NULL ? vd->vdev_guid : 0);
259 fm_ereport_set(ereport, FM_EREPORT_VERSION, class, ena, detector, NULL);
262 * Construct the per-ereport payload, depending on which parameters are
267 * Generic payload members common to all ereports.
269 fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_POOL,
270 DATA_TYPE_STRING, spa_name(spa), FM_EREPORT_PAYLOAD_ZFS_POOL_GUID,
271 DATA_TYPE_UINT64, spa_guid(spa),
272 FM_EREPORT_PAYLOAD_ZFS_POOL_CONTEXT, DATA_TYPE_INT32,
273 spa_load_state(spa), NULL);
275 fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_POOL_FAILMODE,
277 spa_get_failmode(spa) == ZIO_FAILURE_MODE_WAIT ?
278 FM_EREPORT_FAILMODE_WAIT :
279 spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE ?
280 FM_EREPORT_FAILMODE_CONTINUE : FM_EREPORT_FAILMODE_PANIC,
284 vdev_t *pvd = vd->vdev_parent;
285 vdev_queue_t *vq = &vd->vdev_queue;
286 vdev_stat_t *vs = &vd->vdev_stat;
288 uint64_t *spare_guids;
292 fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID,
293 DATA_TYPE_UINT64, vd->vdev_guid,
294 FM_EREPORT_PAYLOAD_ZFS_VDEV_TYPE,
295 DATA_TYPE_STRING, vd->vdev_ops->vdev_op_type, NULL);
296 if (vd->vdev_path != NULL)
297 fm_payload_set(ereport,
298 FM_EREPORT_PAYLOAD_ZFS_VDEV_PATH,
299 DATA_TYPE_STRING, vd->vdev_path, NULL);
300 if (vd->vdev_devid != NULL)
301 fm_payload_set(ereport,
302 FM_EREPORT_PAYLOAD_ZFS_VDEV_DEVID,
303 DATA_TYPE_STRING, vd->vdev_devid, NULL);
304 if (vd->vdev_fru != NULL)
305 fm_payload_set(ereport,
306 FM_EREPORT_PAYLOAD_ZFS_VDEV_FRU,
307 DATA_TYPE_STRING, vd->vdev_fru, NULL);
308 if (vd->vdev_enc_sysfs_path != NULL)
309 fm_payload_set(ereport,
310 FM_EREPORT_PAYLOAD_ZFS_VDEV_ENC_SYSFS_PATH,
311 DATA_TYPE_STRING, vd->vdev_enc_sysfs_path, NULL);
313 fm_payload_set(ereport,
314 FM_EREPORT_PAYLOAD_ZFS_VDEV_ASHIFT,
315 DATA_TYPE_UINT64, vd->vdev_ashift, NULL);
318 fm_payload_set(ereport,
319 FM_EREPORT_PAYLOAD_ZFS_VDEV_COMP_TS,
320 DATA_TYPE_UINT64, vq->vq_io_complete_ts, NULL);
321 fm_payload_set(ereport,
322 FM_EREPORT_PAYLOAD_ZFS_VDEV_DELTA_TS,
323 DATA_TYPE_UINT64, vq->vq_io_delta_ts, NULL);
327 fm_payload_set(ereport,
328 FM_EREPORT_PAYLOAD_ZFS_VDEV_READ_ERRORS,
329 DATA_TYPE_UINT64, vs->vs_read_errors,
330 FM_EREPORT_PAYLOAD_ZFS_VDEV_WRITE_ERRORS,
331 DATA_TYPE_UINT64, vs->vs_write_errors,
332 FM_EREPORT_PAYLOAD_ZFS_VDEV_CKSUM_ERRORS,
333 DATA_TYPE_UINT64, vs->vs_checksum_errors, NULL);
337 fm_payload_set(ereport,
338 FM_EREPORT_PAYLOAD_ZFS_PARENT_GUID,
339 DATA_TYPE_UINT64, pvd->vdev_guid,
340 FM_EREPORT_PAYLOAD_ZFS_PARENT_TYPE,
341 DATA_TYPE_STRING, pvd->vdev_ops->vdev_op_type,
344 fm_payload_set(ereport,
345 FM_EREPORT_PAYLOAD_ZFS_PARENT_PATH,
346 DATA_TYPE_STRING, pvd->vdev_path, NULL);
348 fm_payload_set(ereport,
349 FM_EREPORT_PAYLOAD_ZFS_PARENT_DEVID,
350 DATA_TYPE_STRING, pvd->vdev_devid, NULL);
353 spare_count = spa->spa_spares.sav_count;
354 spare_paths = kmem_zalloc(sizeof (char *) * spare_count,
356 spare_guids = kmem_zalloc(sizeof (uint64_t) * spare_count,
359 for (i = 0; i < spare_count; i++) {
360 spare_vd = spa->spa_spares.sav_vdevs[i];
362 spare_paths[i] = spare_vd->vdev_path;
363 spare_guids[i] = spare_vd->vdev_guid;
367 fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_VDEV_SPARE_PATHS,
368 DATA_TYPE_STRING_ARRAY, spare_count, spare_paths,
369 FM_EREPORT_PAYLOAD_ZFS_VDEV_SPARE_GUIDS,
370 DATA_TYPE_UINT64_ARRAY, spare_count, spare_guids, NULL);
372 kmem_free(spare_guids, sizeof (uint64_t) * spare_count);
373 kmem_free(spare_paths, sizeof (char *) * spare_count);
378 * Payload common to all I/Os.
380 fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_ZIO_ERR,
381 DATA_TYPE_INT32, zio->io_error, NULL);
382 fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_ZIO_FLAGS,
383 DATA_TYPE_INT32, zio->io_flags, NULL);
384 fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_ZIO_STAGE,
385 DATA_TYPE_UINT32, zio->io_stage, NULL);
386 fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_ZIO_PIPELINE,
387 DATA_TYPE_UINT32, zio->io_pipeline, NULL);
388 fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_ZIO_DELAY,
389 DATA_TYPE_UINT64, zio->io_delay, NULL);
390 fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_ZIO_TIMESTAMP,
391 DATA_TYPE_UINT64, zio->io_timestamp, NULL);
392 fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_ZIO_DELTA,
393 DATA_TYPE_UINT64, zio->io_delta, NULL);
396 * If the 'size' parameter is non-zero, it indicates this is a
397 * RAID-Z or other I/O where the physical offset and length are
398 * provided for us, instead of within the zio_t.
402 fm_payload_set(ereport,
403 FM_EREPORT_PAYLOAD_ZFS_ZIO_OFFSET,
404 DATA_TYPE_UINT64, stateoroffset,
405 FM_EREPORT_PAYLOAD_ZFS_ZIO_SIZE,
406 DATA_TYPE_UINT64, size, NULL);
408 fm_payload_set(ereport,
409 FM_EREPORT_PAYLOAD_ZFS_ZIO_OFFSET,
410 DATA_TYPE_UINT64, zio->io_offset,
411 FM_EREPORT_PAYLOAD_ZFS_ZIO_SIZE,
412 DATA_TYPE_UINT64, zio->io_size, NULL);
416 * Payload for I/Os with corresponding logical information.
418 if (zio->io_logical != NULL)
419 fm_payload_set(ereport,
420 FM_EREPORT_PAYLOAD_ZFS_ZIO_OBJSET,
422 zio->io_logical->io_bookmark.zb_objset,
423 FM_EREPORT_PAYLOAD_ZFS_ZIO_OBJECT,
425 zio->io_logical->io_bookmark.zb_object,
426 FM_EREPORT_PAYLOAD_ZFS_ZIO_LEVEL,
428 zio->io_logical->io_bookmark.zb_level,
429 FM_EREPORT_PAYLOAD_ZFS_ZIO_BLKID,
431 zio->io_logical->io_bookmark.zb_blkid, NULL);
432 } else if (vd != NULL) {
434 * If we have a vdev but no zio, this is a device fault, and the
435 * 'stateoroffset' parameter indicates the previous state of the
438 fm_payload_set(ereport,
439 FM_EREPORT_PAYLOAD_ZFS_PREV_STATE,
440 DATA_TYPE_UINT64, stateoroffset, NULL);
443 mutex_exit(&spa->spa_errlist_lock);
445 *ereport_out = ereport;
446 *detector_out = detector;
449 /* if it's <= 128 bytes, save the corruption directly */
450 #define ZFM_MAX_INLINE (128 / sizeof (uint64_t))
452 #define MAX_RANGES 16
454 typedef struct zfs_ecksum_info {
455 /* histograms of set and cleared bits by bit number in a 64-bit word */
456 uint16_t zei_histogram_set[sizeof (uint64_t) * NBBY];
457 uint16_t zei_histogram_cleared[sizeof (uint64_t) * NBBY];
459 /* inline arrays of bits set and cleared. */
460 uint64_t zei_bits_set[ZFM_MAX_INLINE];
461 uint64_t zei_bits_cleared[ZFM_MAX_INLINE];
464 * for each range, the number of bits set and cleared. The Hamming
465 * distance between the good and bad buffers is the sum of them all.
467 uint32_t zei_range_sets[MAX_RANGES];
468 uint32_t zei_range_clears[MAX_RANGES];
473 } zei_ranges[MAX_RANGES];
475 size_t zei_range_count;
477 uint32_t zei_allowed_mingap;
482 update_histogram(uint64_t value_arg, uint16_t *hist, uint32_t *count)
486 uint64_t value = BE_64(value_arg);
488 /* We store the bits in big-endian (largest-first) order */
489 for (i = 0; i < 64; i++) {
490 if (value & (1ull << i)) {
491 if (hist[63 - i] < UINT16_MAX)
496 /* update the count of bits changed */
501 * We've now filled up the range array, and need to increase "mingap" and
502 * shrink the range list accordingly. zei_mingap is always the smallest
503 * distance between array entries, so we set the new_allowed_gap to be
504 * one greater than that. We then go through the list, joining together
505 * any ranges which are closer than the new_allowed_gap.
507 * By construction, there will be at least one. We also update zei_mingap
508 * to the new smallest gap, to prepare for our next invocation.
511 zei_shrink_ranges(zfs_ecksum_info_t *eip)
513 uint32_t mingap = UINT32_MAX;
514 uint32_t new_allowed_gap = eip->zei_mingap + 1;
517 size_t max = eip->zei_range_count;
519 struct zei_ranges *r = eip->zei_ranges;
521 ASSERT3U(eip->zei_range_count, >, 0);
522 ASSERT3U(eip->zei_range_count, <=, MAX_RANGES);
525 while (idx < max - 1) {
526 uint32_t start = r[idx].zr_start;
527 uint32_t end = r[idx].zr_end;
529 while (idx < max - 1) {
530 uint32_t nstart, nend, gap;
533 nstart = r[idx].zr_start;
534 nend = r[idx].zr_end;
537 if (gap < new_allowed_gap) {
545 r[output].zr_start = start;
546 r[output].zr_end = end;
549 ASSERT3U(output, <, eip->zei_range_count);
550 eip->zei_range_count = output;
551 eip->zei_mingap = mingap;
552 eip->zei_allowed_mingap = new_allowed_gap;
556 zei_add_range(zfs_ecksum_info_t *eip, int start, int end)
558 struct zei_ranges *r = eip->zei_ranges;
559 size_t count = eip->zei_range_count;
561 if (count >= MAX_RANGES) {
562 zei_shrink_ranges(eip);
563 count = eip->zei_range_count;
566 eip->zei_mingap = UINT32_MAX;
567 eip->zei_allowed_mingap = 1;
569 int gap = start - r[count - 1].zr_end;
571 if (gap < eip->zei_allowed_mingap) {
572 r[count - 1].zr_end = end;
575 if (gap < eip->zei_mingap)
576 eip->zei_mingap = gap;
578 r[count].zr_start = start;
579 r[count].zr_end = end;
580 eip->zei_range_count++;
584 zei_range_total_size(zfs_ecksum_info_t *eip)
586 struct zei_ranges *r = eip->zei_ranges;
587 size_t count = eip->zei_range_count;
591 for (idx = 0; idx < count; idx++)
592 result += (r[idx].zr_end - r[idx].zr_start);
597 static zfs_ecksum_info_t *
598 annotate_ecksum(nvlist_t *ereport, zio_bad_cksum_t *info,
599 const uint8_t *goodbuf, const uint8_t *badbuf, size_t size,
600 boolean_t drop_if_identical)
602 const uint64_t *good = (const uint64_t *)goodbuf;
603 const uint64_t *bad = (const uint64_t *)badbuf;
606 uint64_t allcleared = 0;
608 size_t nui64s = size / sizeof (uint64_t);
618 zfs_ecksum_info_t *eip = kmem_zalloc(sizeof (*eip), KM_SLEEP);
620 /* don't do any annotation for injected checksum errors */
621 if (info != NULL && info->zbc_injected)
624 if (info != NULL && info->zbc_has_cksum) {
625 fm_payload_set(ereport,
626 FM_EREPORT_PAYLOAD_ZFS_CKSUM_EXPECTED,
627 DATA_TYPE_UINT64_ARRAY,
628 sizeof (info->zbc_expected) / sizeof (uint64_t),
629 (uint64_t *)&info->zbc_expected,
630 FM_EREPORT_PAYLOAD_ZFS_CKSUM_ACTUAL,
631 DATA_TYPE_UINT64_ARRAY,
632 sizeof (info->zbc_actual) / sizeof (uint64_t),
633 (uint64_t *)&info->zbc_actual,
634 FM_EREPORT_PAYLOAD_ZFS_CKSUM_ALGO,
636 info->zbc_checksum_name,
639 if (info->zbc_byteswapped) {
640 fm_payload_set(ereport,
641 FM_EREPORT_PAYLOAD_ZFS_CKSUM_BYTESWAP,
642 DATA_TYPE_BOOLEAN, 1,
647 if (badbuf == NULL || goodbuf == NULL)
650 ASSERT3U(size, ==, nui64s * sizeof (uint64_t));
651 ASSERT3U(size, <=, SPA_MAXBLOCKSIZE);
652 ASSERT3U(size, <=, UINT32_MAX);
654 /* build up the range list by comparing the two buffers. */
655 for (idx = 0; idx < nui64s; idx++) {
656 if (good[idx] == bad[idx]) {
660 zei_add_range(eip, start, idx);
670 zei_add_range(eip, start, idx);
672 /* See if it will fit in our inline buffers */
673 inline_size = zei_range_total_size(eip);
674 if (inline_size > ZFM_MAX_INLINE)
678 * If there is no change and we want to drop if the buffers are
681 if (inline_size == 0 && drop_if_identical) {
682 kmem_free(eip, sizeof (*eip));
687 * Now walk through the ranges, filling in the details of the
688 * differences. Also convert our uint64_t-array offsets to byte
691 for (range = 0; range < eip->zei_range_count; range++) {
692 size_t start = eip->zei_ranges[range].zr_start;
693 size_t end = eip->zei_ranges[range].zr_end;
695 for (idx = start; idx < end; idx++) {
696 uint64_t set, cleared;
698 // bits set in bad, but not in good
699 set = ((~good[idx]) & bad[idx]);
700 // bits set in good, but not in bad
701 cleared = (good[idx] & (~bad[idx]));
704 allcleared |= cleared;
707 ASSERT3U(offset, <, inline_size);
708 eip->zei_bits_set[offset] = set;
709 eip->zei_bits_cleared[offset] = cleared;
713 update_histogram(set, eip->zei_histogram_set,
714 &eip->zei_range_sets[range]);
715 update_histogram(cleared, eip->zei_histogram_cleared,
716 &eip->zei_range_clears[range]);
719 /* convert to byte offsets */
720 eip->zei_ranges[range].zr_start *= sizeof (uint64_t);
721 eip->zei_ranges[range].zr_end *= sizeof (uint64_t);
723 eip->zei_allowed_mingap *= sizeof (uint64_t);
724 inline_size *= sizeof (uint64_t);
726 /* fill in ereport */
727 fm_payload_set(ereport,
728 FM_EREPORT_PAYLOAD_ZFS_BAD_OFFSET_RANGES,
729 DATA_TYPE_UINT32_ARRAY, 2 * eip->zei_range_count,
730 (uint32_t *)eip->zei_ranges,
731 FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_MIN_GAP,
732 DATA_TYPE_UINT32, eip->zei_allowed_mingap,
733 FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_SETS,
734 DATA_TYPE_UINT32_ARRAY, eip->zei_range_count, eip->zei_range_sets,
735 FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_CLEARS,
736 DATA_TYPE_UINT32_ARRAY, eip->zei_range_count, eip->zei_range_clears,
740 fm_payload_set(ereport,
741 FM_EREPORT_PAYLOAD_ZFS_BAD_SET_BITS,
742 DATA_TYPE_UINT8_ARRAY,
743 inline_size, (uint8_t *)eip->zei_bits_set,
744 FM_EREPORT_PAYLOAD_ZFS_BAD_CLEARED_BITS,
745 DATA_TYPE_UINT8_ARRAY,
746 inline_size, (uint8_t *)eip->zei_bits_cleared,
749 fm_payload_set(ereport,
750 FM_EREPORT_PAYLOAD_ZFS_BAD_SET_HISTOGRAM,
751 DATA_TYPE_UINT16_ARRAY,
752 NBBY * sizeof (uint64_t), eip->zei_histogram_set,
753 FM_EREPORT_PAYLOAD_ZFS_BAD_CLEARED_HISTOGRAM,
754 DATA_TYPE_UINT16_ARRAY,
755 NBBY * sizeof (uint64_t), eip->zei_histogram_cleared,
763 zfs_ereport_post(const char *subclass, spa_t *spa, vdev_t *vd, zio_t *zio,
764 uint64_t stateoroffset, uint64_t size)
767 nvlist_t *ereport = NULL;
768 nvlist_t *detector = NULL;
770 zfs_ereport_start(&ereport, &detector,
771 subclass, spa, vd, zio, stateoroffset, size);
776 if (zfs_is_ratelimiting_event(subclass, vd))
779 /* Cleanup is handled by the callback function */
780 zfs_zevent_post(ereport, detector, zfs_zevent_post_cb);
785 zfs_ereport_start_checksum(spa_t *spa, vdev_t *vd,
786 struct zio *zio, uint64_t offset, uint64_t length, void *arg,
787 zio_bad_cksum_t *info)
789 zio_cksum_report_t *report;
793 if (zfs_is_ratelimiting_event(FM_EREPORT_ZFS_CHECKSUM, vd))
797 report = kmem_zalloc(sizeof (*report), KM_SLEEP);
799 if (zio->io_vsd != NULL)
800 zio->io_vsd_ops->vsd_cksum_report(zio, report, arg);
802 zio_vsd_default_cksum_report(zio, report, arg);
804 /* copy the checksum failure information if it was provided */
806 report->zcr_ckinfo = kmem_zalloc(sizeof (*info), KM_SLEEP);
807 bcopy(info, report->zcr_ckinfo, sizeof (*info));
810 report->zcr_align = 1ULL << vd->vdev_top->vdev_ashift;
811 report->zcr_length = length;
814 zfs_ereport_start(&report->zcr_ereport, &report->zcr_detector,
815 FM_EREPORT_ZFS_CHECKSUM, spa, vd, zio, offset, length);
817 if (report->zcr_ereport == NULL) {
818 zfs_ereport_free_checksum(report);
823 mutex_enter(&spa->spa_errlist_lock);
824 report->zcr_next = zio->io_logical->io_cksum_report;
825 zio->io_logical->io_cksum_report = report;
826 mutex_exit(&spa->spa_errlist_lock);
830 zfs_ereport_finish_checksum(zio_cksum_report_t *report,
831 const void *good_data, const void *bad_data, boolean_t drop_if_identical)
834 zfs_ecksum_info_t *info;
836 info = annotate_ecksum(report->zcr_ereport, report->zcr_ckinfo,
837 good_data, bad_data, report->zcr_length, drop_if_identical);
839 zfs_zevent_post(report->zcr_ereport,
840 report->zcr_detector, zfs_zevent_post_cb);
842 zfs_zevent_post_cb(report->zcr_ereport, report->zcr_detector);
844 report->zcr_ereport = report->zcr_detector = NULL;
846 kmem_free(info, sizeof (*info));
851 zfs_ereport_free_checksum(zio_cksum_report_t *rpt)
854 if (rpt->zcr_ereport != NULL) {
855 fm_nvlist_destroy(rpt->zcr_ereport,
857 fm_nvlist_destroy(rpt->zcr_detector,
861 rpt->zcr_free(rpt->zcr_cbdata, rpt->zcr_cbinfo);
863 if (rpt->zcr_ckinfo != NULL)
864 kmem_free(rpt->zcr_ckinfo, sizeof (*rpt->zcr_ckinfo));
866 kmem_free(rpt, sizeof (*rpt));
871 zfs_ereport_post_checksum(spa_t *spa, vdev_t *vd,
872 struct zio *zio, uint64_t offset, uint64_t length,
873 const void *good_data, const void *bad_data, zio_bad_cksum_t *zbc)
876 nvlist_t *ereport = NULL;
877 nvlist_t *detector = NULL;
878 zfs_ecksum_info_t *info;
880 zfs_ereport_start(&ereport, &detector,
881 FM_EREPORT_ZFS_CHECKSUM, spa, vd, zio, offset, length);
886 info = annotate_ecksum(ereport, zbc, good_data, bad_data, length,
890 zfs_zevent_post(ereport, detector, zfs_zevent_post_cb);
891 kmem_free(info, sizeof (*info));
897 zfs_post_common(spa_t *spa, vdev_t *vd, const char *type, const char *name,
904 if (spa_load_state(spa) == SPA_LOAD_TRYIMPORT)
907 if ((resource = fm_nvlist_create(NULL)) == NULL)
910 (void) snprintf(class, sizeof (class), "%s.%s.%s", type,
911 ZFS_ERROR_CLASS, name);
912 VERIFY0(nvlist_add_uint8(resource, FM_VERSION, FM_RSRC_VERSION));
913 VERIFY0(nvlist_add_string(resource, FM_CLASS, class));
914 VERIFY0(nvlist_add_uint64(resource,
915 FM_EREPORT_PAYLOAD_ZFS_POOL_GUID, spa_guid(spa)));
916 VERIFY0(nvlist_add_int32(resource,
917 FM_EREPORT_PAYLOAD_ZFS_POOL_CONTEXT, spa_load_state(spa)));
920 VERIFY0(nvlist_add_uint64(resource,
921 FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID, vd->vdev_guid));
922 VERIFY0(nvlist_add_uint64(resource,
923 FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE, vd->vdev_state));
924 if (vd->vdev_path != NULL)
925 VERIFY0(nvlist_add_string(resource,
926 FM_EREPORT_PAYLOAD_ZFS_VDEV_PATH, vd->vdev_path));
927 if (vd->vdev_devid != NULL)
928 VERIFY0(nvlist_add_string(resource,
929 FM_EREPORT_PAYLOAD_ZFS_VDEV_DEVID, vd->vdev_devid));
930 if (vd->vdev_fru != NULL)
931 VERIFY0(nvlist_add_string(resource,
932 FM_EREPORT_PAYLOAD_ZFS_VDEV_FRU, vd->vdev_fru));
933 if (vd->vdev_enc_sysfs_path != NULL)
934 VERIFY0(nvlist_add_string(resource,
935 FM_EREPORT_PAYLOAD_ZFS_VDEV_ENC_SYSFS_PATH,
936 vd->vdev_enc_sysfs_path));
937 /* also copy any optional payload data */
939 nvpair_t *elem = NULL;
941 while ((elem = nvlist_next_nvpair(aux, elem)) != NULL)
942 (void) nvlist_add_nvpair(resource, elem);
946 zfs_zevent_post(resource, NULL, zfs_zevent_post_cb);
951 * The 'resource.fs.zfs.removed' event is an internal signal that the given vdev
952 * has been removed from the system. This will cause the DE to ignore any
953 * recent I/O errors, inferring that they are due to the asynchronous device
957 zfs_post_remove(spa_t *spa, vdev_t *vd)
959 zfs_post_common(spa, vd, FM_RSRC_CLASS, FM_RESOURCE_REMOVED, NULL);
963 * The 'resource.fs.zfs.autoreplace' event is an internal signal that the pool
964 * has the 'autoreplace' property set, and therefore any broken vdevs will be
965 * handled by higher level logic, and no vdev fault should be generated.
968 zfs_post_autoreplace(spa_t *spa, vdev_t *vd)
970 zfs_post_common(spa, vd, FM_RSRC_CLASS, FM_RESOURCE_AUTOREPLACE, NULL);
974 * The 'resource.fs.zfs.statechange' event is an internal signal that the
975 * given vdev has transitioned its state to DEGRADED or HEALTHY. This will
976 * cause the retire agent to repair any outstanding fault management cases
977 * open because the device was not found (fault.fs.zfs.device).
980 zfs_post_state_change(spa_t *spa, vdev_t *vd, uint64_t laststate)
986 * Add optional supplemental keys to payload
988 aux = fm_nvlist_create(NULL);
990 if (vd->vdev_physpath) {
991 (void) nvlist_add_string(aux,
992 FM_EREPORT_PAYLOAD_ZFS_VDEV_PHYSPATH,
995 if (vd->vdev_enc_sysfs_path) {
996 (void) nvlist_add_string(aux,
997 FM_EREPORT_PAYLOAD_ZFS_VDEV_ENC_SYSFS_PATH,
998 vd->vdev_enc_sysfs_path);
1001 (void) nvlist_add_uint64(aux,
1002 FM_EREPORT_PAYLOAD_ZFS_VDEV_LASTSTATE, laststate);
1005 zfs_post_common(spa, vd, FM_RSRC_CLASS, FM_RESOURCE_STATECHANGE,
1009 fm_nvlist_destroy(aux, FM_NVA_FREE);
1014 * The 'sysevent.fs.zfs.*' events are signals posted to notify user space of
1015 * change in the pool. All sysevents are listed in sys/sysevent/eventdefs.h
1016 * and are designed to be consumed by the ZFS Event Daemon (ZED). For
1017 * additional details refer to the zed(8) man page.
1020 zfs_post_sysevent(spa_t *spa, vdev_t *vd, const char *name)
1022 zfs_post_common(spa, vd, FM_SYSEVENT_CLASS, name, NULL);
1025 #if defined(_KERNEL) && defined(HAVE_SPL)
1026 EXPORT_SYMBOL(zfs_ereport_post);
1027 EXPORT_SYMBOL(zfs_ereport_post_checksum);
1028 EXPORT_SYMBOL(zfs_post_remove);
1029 EXPORT_SYMBOL(zfs_post_autoreplace);
1030 EXPORT_SYMBOL(zfs_post_state_change);
1031 EXPORT_SYMBOL(zfs_post_sysevent);
1032 #endif /* _KERNEL */