4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
24 * Copyright (c) 2017, Intel Corporation.
30 * To handle fault injection, we keep track of a series of zinject_record_t
31 * structures which describe which logical block(s) should be injected with a
32 * fault. These are kept in a global list. Each record corresponds to a given
33 * spa_t and maintains a special hold on the spa_t so that it cannot be deleted
34 * or exported while the injection record exists.
36 * Device level injection is done using the 'zi_guid' field. If this is set, it
37 * means that the error is destined for a particular device, not a piece of
40 * This is a rather poor data structure and algorithm, but we don't expect more
41 * than a few faults at any one time, so it should be sufficient for our needs.
46 #include <sys/zfs_ioctl.h>
47 #include <sys/vdev_impl.h>
48 #include <sys/dmu_objset.h>
49 #include <sys/dsl_dataset.h>
50 #include <sys/fs/zfs.h>
52 uint32_t zio_injection_enabled = 0;
55 * Data describing each zinject handler registered on the system, and
56 * contains the list node linking the handler in the global zinject
59 typedef struct inject_handler {
62 zinject_record_t zi_record;
69 * List of all zinject handlers registered on the system, protected by
70 * the inject_lock defined below.
72 static list_t inject_handlers;
75 * This protects insertion into, and traversal of, the inject handler
76 * list defined above; as well as the inject_delay_count. Any time a
77 * handler is inserted or removed from the list, this lock should be
78 * taken as a RW_WRITER; and any time traversal is done over the list
79 * (without modification to it) this lock should be taken as a RW_READER.
81 static krwlock_t inject_lock;
84 * This holds the number of zinject delay handlers that have been
85 * registered on the system. It is protected by the inject_lock defined
86 * above. Thus modifications to this count must be a RW_WRITER of the
87 * inject_lock, and reads of this count must be (at least) a RW_READER
90 static int inject_delay_count = 0;
93 * This lock is used only in zio_handle_io_delay(), refer to the comment
94 * in that function for more details.
96 static kmutex_t inject_delay_mtx;
99 * Used to assign unique identifying numbers to each new zinject handler.
101 static int inject_next_id = 1;
104 * Test if the requested frequency was triggered
107 freq_triggered(uint32_t frequency)
110 * zero implies always (100%)
116 * Note: we still handle legacy (unscaled) frequecy values
118 uint32_t maximum = (frequency <= 100) ? 100 : ZI_PERCENTAGE_MAX;
120 return (spa_get_random(maximum) < frequency);
124 * Returns true if the given record matches the I/O in progress.
127 zio_match_handler(const zbookmark_phys_t *zb, uint64_t type,
128 zinject_record_t *record, int error)
131 * Check for a match against the MOS, which is based on type
133 if (zb->zb_objset == DMU_META_OBJSET &&
134 record->zi_objset == DMU_META_OBJSET &&
135 record->zi_object == DMU_META_DNODE_OBJECT) {
136 if (record->zi_type == DMU_OT_NONE ||
137 type == record->zi_type)
138 return (freq_triggered(record->zi_freq));
144 * Check for an exact match.
146 if (zb->zb_objset == record->zi_objset &&
147 zb->zb_object == record->zi_object &&
148 zb->zb_level == record->zi_level &&
149 zb->zb_blkid >= record->zi_start &&
150 zb->zb_blkid <= record->zi_end &&
151 error == record->zi_error)
152 return (freq_triggered(record->zi_freq));
158 * Panic the system when a config change happens in the function
162 zio_handle_panic_injection(spa_t *spa, char *tag, uint64_t type)
164 inject_handler_t *handler;
166 rw_enter(&inject_lock, RW_READER);
168 for (handler = list_head(&inject_handlers); handler != NULL;
169 handler = list_next(&inject_handlers, handler)) {
171 if (spa != handler->zi_spa)
174 if (handler->zi_record.zi_type == type &&
175 strcmp(tag, handler->zi_record.zi_func) == 0)
176 panic("Panic requested in function %s\n", tag);
179 rw_exit(&inject_lock);
183 * Inject a decryption failure. Decryption failures can occur in
184 * both the ARC and the ZIO layers.
187 zio_handle_decrypt_injection(spa_t *spa, const zbookmark_phys_t *zb,
188 uint64_t type, int error)
191 inject_handler_t *handler;
193 rw_enter(&inject_lock, RW_READER);
195 for (handler = list_head(&inject_handlers); handler != NULL;
196 handler = list_next(&inject_handlers, handler)) {
198 if (spa != handler->zi_spa ||
199 handler->zi_record.zi_cmd != ZINJECT_DECRYPT_FAULT)
202 if (zio_match_handler(zb, type, &handler->zi_record, error)) {
208 rw_exit(&inject_lock);
213 * Determine if the I/O in question should return failure. Returns the errno
214 * to be returned to the caller.
217 zio_handle_fault_injection(zio_t *zio, int error)
220 inject_handler_t *handler;
223 * Ignore I/O not associated with any logical data.
225 if (zio->io_logical == NULL)
229 * Currently, we only support fault injection on reads.
231 if (zio->io_type != ZIO_TYPE_READ)
234 rw_enter(&inject_lock, RW_READER);
236 for (handler = list_head(&inject_handlers); handler != NULL;
237 handler = list_next(&inject_handlers, handler)) {
239 if (zio->io_spa != handler->zi_spa ||
240 handler->zi_record.zi_cmd != ZINJECT_DATA_FAULT)
243 /* If this handler matches, return EIO */
244 if (zio_match_handler(&zio->io_logical->io_bookmark,
245 zio->io_bp ? BP_GET_TYPE(zio->io_bp) : DMU_OT_NONE,
246 &handler->zi_record, error)) {
252 rw_exit(&inject_lock);
258 * Determine if the zio is part of a label update and has an injection
259 * handler associated with that portion of the label. Currently, we
260 * allow error injection in either the nvlist or the uberblock region of
264 zio_handle_label_injection(zio_t *zio, int error)
266 inject_handler_t *handler;
267 vdev_t *vd = zio->io_vd;
268 uint64_t offset = zio->io_offset;
272 if (offset >= VDEV_LABEL_START_SIZE &&
273 offset < vd->vdev_psize - VDEV_LABEL_END_SIZE)
276 rw_enter(&inject_lock, RW_READER);
278 for (handler = list_head(&inject_handlers); handler != NULL;
279 handler = list_next(&inject_handlers, handler)) {
280 uint64_t start = handler->zi_record.zi_start;
281 uint64_t end = handler->zi_record.zi_end;
283 if (handler->zi_record.zi_cmd != ZINJECT_LABEL_FAULT)
287 * The injection region is the relative offsets within a
288 * vdev label. We must determine the label which is being
289 * updated and adjust our region accordingly.
291 label = vdev_label_number(vd->vdev_psize, offset);
292 start = vdev_label_offset(vd->vdev_psize, label, start);
293 end = vdev_label_offset(vd->vdev_psize, label, end);
295 if (zio->io_vd->vdev_guid == handler->zi_record.zi_guid &&
296 (offset >= start && offset <= end)) {
301 rw_exit(&inject_lock);
307 zio_inject_bitflip_cb(void *data, size_t len, void *private)
309 ASSERTV(zio_t *zio = private);
310 uint8_t *buffer = data;
311 uint_t byte = spa_get_random(len);
313 ASSERT(zio->io_type == ZIO_TYPE_READ);
315 /* flip a single random bit in an abd data buffer */
316 buffer[byte] ^= 1 << spa_get_random(8);
318 return (1); /* stop after first flip */
322 zio_handle_device_injection_impl(vdev_t *vd, zio_t *zio, int err1, int err2)
324 inject_handler_t *handler;
328 * We skip over faults in the labels unless it's during
329 * device open (i.e. zio == NULL).
332 uint64_t offset = zio->io_offset;
334 if (offset < VDEV_LABEL_START_SIZE ||
335 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE)
339 rw_enter(&inject_lock, RW_READER);
341 for (handler = list_head(&inject_handlers); handler != NULL;
342 handler = list_next(&inject_handlers, handler)) {
344 if (handler->zi_record.zi_cmd != ZINJECT_DEVICE_FAULT)
347 if (vd->vdev_guid == handler->zi_record.zi_guid) {
348 if (handler->zi_record.zi_failfast &&
349 (zio == NULL || (zio->io_flags &
350 (ZIO_FLAG_IO_RETRY | ZIO_FLAG_TRYHARD)))) {
354 /* Handle type specific I/O failures */
356 handler->zi_record.zi_iotype != ZIO_TYPES &&
357 handler->zi_record.zi_iotype != zio->io_type)
360 if (handler->zi_record.zi_error == err1 ||
361 handler->zi_record.zi_error == err2) {
363 * limit error injection if requested
365 if (!freq_triggered(handler->zi_record.zi_freq))
369 * For a failed open, pretend like the device
373 vd->vdev_stat.vs_aux =
374 VDEV_AUX_OPEN_FAILED;
377 * Treat these errors as if they had been
378 * retried so that all the appropriate stats
379 * and FMA events are generated.
381 if (!handler->zi_record.zi_failfast &&
383 zio->io_flags |= ZIO_FLAG_IO_RETRY;
386 * EILSEQ means flip a bit after a read
388 if (handler->zi_record.zi_error == EILSEQ) {
392 /* locate buffer data and flip a bit */
393 (void) abd_iterate_func(zio->io_abd, 0,
394 zio->io_size, zio_inject_bitflip_cb,
399 ret = handler->zi_record.zi_error;
402 if (handler->zi_record.zi_error == ENXIO) {
403 ret = SET_ERROR(EIO);
409 rw_exit(&inject_lock);
415 zio_handle_device_injection(vdev_t *vd, zio_t *zio, int error)
417 return (zio_handle_device_injection_impl(vd, zio, error, INT_MAX));
421 zio_handle_device_injections(vdev_t *vd, zio_t *zio, int err1, int err2)
423 return (zio_handle_device_injection_impl(vd, zio, err1, err2));
427 * Simulate hardware that ignores cache flushes. For requested number
428 * of seconds nix the actual writing to disk.
431 zio_handle_ignored_writes(zio_t *zio)
433 inject_handler_t *handler;
435 rw_enter(&inject_lock, RW_READER);
437 for (handler = list_head(&inject_handlers); handler != NULL;
438 handler = list_next(&inject_handlers, handler)) {
440 /* Ignore errors not destined for this pool */
441 if (zio->io_spa != handler->zi_spa ||
442 handler->zi_record.zi_cmd != ZINJECT_IGNORED_WRITES)
446 * Positive duration implies # of seconds, negative
449 if (handler->zi_record.zi_timer == 0) {
450 if (handler->zi_record.zi_duration > 0)
451 handler->zi_record.zi_timer = ddi_get_lbolt64();
453 handler->zi_record.zi_timer = zio->io_txg;
456 /* Have a "problem" writing 60% of the time */
457 if (spa_get_random(100) < 60)
458 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
462 rw_exit(&inject_lock);
466 spa_handle_ignored_writes(spa_t *spa)
468 inject_handler_t *handler;
470 if (zio_injection_enabled == 0)
473 rw_enter(&inject_lock, RW_READER);
475 for (handler = list_head(&inject_handlers); handler != NULL;
476 handler = list_next(&inject_handlers, handler)) {
478 if (spa != handler->zi_spa ||
479 handler->zi_record.zi_cmd != ZINJECT_IGNORED_WRITES)
482 if (handler->zi_record.zi_duration > 0) {
483 VERIFY(handler->zi_record.zi_timer == 0 ||
485 (int64_t)handler->zi_record.zi_timer +
486 handler->zi_record.zi_duration * hz,
489 /* duration is negative so the subtraction here adds */
490 VERIFY(handler->zi_record.zi_timer == 0 ||
491 handler->zi_record.zi_timer -
492 handler->zi_record.zi_duration >=
493 spa_syncing_txg(spa));
497 rw_exit(&inject_lock);
501 zio_handle_io_delay(zio_t *zio)
503 vdev_t *vd = zio->io_vd;
504 inject_handler_t *min_handler = NULL;
505 hrtime_t min_target = 0;
507 rw_enter(&inject_lock, RW_READER);
510 * inject_delay_count is a subset of zio_injection_enabled that
511 * is only incremented for delay handlers. These checks are
512 * mainly added to remind the reader why we're not explicitly
513 * checking zio_injection_enabled like the other functions.
515 IMPLY(inject_delay_count > 0, zio_injection_enabled > 0);
516 IMPLY(zio_injection_enabled == 0, inject_delay_count == 0);
519 * If there aren't any inject delay handlers registered, then we
520 * can short circuit and simply return 0 here. A value of zero
521 * informs zio_delay_interrupt() that this request should not be
522 * delayed. This short circuit keeps us from acquiring the
523 * inject_delay_mutex unnecessarily.
525 if (inject_delay_count == 0) {
526 rw_exit(&inject_lock);
531 * Each inject handler has a number of "lanes" associated with
532 * it. Each lane is able to handle requests independently of one
533 * another, and at a latency defined by the inject handler
534 * record's zi_timer field. Thus if a handler in configured with
535 * a single lane with a 10ms latency, it will delay requests
536 * such that only a single request is completed every 10ms. So,
537 * if more than one request is attempted per each 10ms interval,
538 * the average latency of the requests will be greater than
539 * 10ms; but if only a single request is submitted each 10ms
540 * interval the average latency will be 10ms.
542 * We need to acquire this mutex to prevent multiple concurrent
543 * threads being assigned to the same lane of a given inject
544 * handler. The mutex allows us to perform the following two
545 * operations atomically:
547 * 1. determine the minimum handler and minimum target
548 * value of all the possible handlers
549 * 2. update that minimum handler's lane array
551 * Without atomicity, two (or more) threads could pick the same
552 * lane in step (1), and then conflict with each other in step
553 * (2). This could allow a single lane handler to process
554 * multiple requests simultaneously, which shouldn't be possible.
556 mutex_enter(&inject_delay_mtx);
558 for (inject_handler_t *handler = list_head(&inject_handlers);
559 handler != NULL; handler = list_next(&inject_handlers, handler)) {
560 if (handler->zi_record.zi_cmd != ZINJECT_DELAY_IO)
563 if (!freq_triggered(handler->zi_record.zi_freq))
566 if (vd->vdev_guid != handler->zi_record.zi_guid)
570 * Defensive; should never happen as the array allocation
571 * occurs prior to inserting this handler on the list.
573 ASSERT3P(handler->zi_lanes, !=, NULL);
576 * This should never happen, the zinject command should
577 * prevent a user from setting an IO delay with zero lanes.
579 ASSERT3U(handler->zi_record.zi_nlanes, !=, 0);
581 ASSERT3U(handler->zi_record.zi_nlanes, >,
582 handler->zi_next_lane);
585 * We want to issue this IO to the lane that will become
586 * idle the soonest, so we compare the soonest this
587 * specific handler can complete the IO with all other
588 * handlers, to find the lowest value of all possible
589 * lanes. We then use this lane to submit the request.
591 * Since each handler has a constant value for its
592 * delay, we can just use the "next" lane for that
593 * handler; as it will always be the lane with the
594 * lowest value for that particular handler (i.e. the
595 * lane that will become idle the soonest). This saves a
596 * scan of each handler's lanes array.
598 * There's two cases to consider when determining when
599 * this specific IO request should complete. If this
600 * lane is idle, we want to "submit" the request now so
601 * it will complete after zi_timer milliseconds. Thus,
602 * we set the target to now + zi_timer.
604 * If the lane is busy, we want this request to complete
605 * zi_timer milliseconds after the lane becomes idle.
606 * Since the 'zi_lanes' array holds the time at which
607 * each lane will become idle, we use that value to
608 * determine when this request should complete.
610 hrtime_t idle = handler->zi_record.zi_timer + gethrtime();
611 hrtime_t busy = handler->zi_record.zi_timer +
612 handler->zi_lanes[handler->zi_next_lane];
613 hrtime_t target = MAX(idle, busy);
615 if (min_handler == NULL) {
616 min_handler = handler;
621 ASSERT3P(min_handler, !=, NULL);
622 ASSERT3U(min_target, !=, 0);
625 * We don't yet increment the "next lane" variable since
626 * we still might find a lower value lane in another
627 * handler during any remaining iterations. Once we're
628 * sure we've selected the absolute minimum, we'll claim
629 * the lane and increment the handler's "next lane"
633 if (target < min_target) {
634 min_handler = handler;
640 * 'min_handler' will be NULL if no IO delays are registered for
641 * this vdev, otherwise it will point to the handler containing
642 * the lane that will become idle the soonest.
644 if (min_handler != NULL) {
645 ASSERT3U(min_target, !=, 0);
646 min_handler->zi_lanes[min_handler->zi_next_lane] = min_target;
649 * If we've used all possible lanes for this handler,
650 * loop back and start using the first lane again;
651 * otherwise, just increment the lane index.
653 min_handler->zi_next_lane = (min_handler->zi_next_lane + 1) %
654 min_handler->zi_record.zi_nlanes;
657 mutex_exit(&inject_delay_mtx);
658 rw_exit(&inject_lock);
664 zio_calculate_range(const char *pool, zinject_record_t *record)
673 * Obtain the dnode for object using pool, objset, and object
675 error = dsl_pool_hold(pool, FTAG, &dp);
679 error = dsl_dataset_hold_obj(dp, record->zi_objset, FTAG, &ds);
680 dsl_pool_rele(dp, FTAG);
684 error = dmu_objset_from_ds(ds, &os);
685 dsl_dataset_rele(ds, FTAG);
689 error = dnode_hold(os, record->zi_object, FTAG, &dn);
694 * Translate the range into block IDs
696 if (record->zi_start != 0 || record->zi_end != -1ULL) {
697 record->zi_start >>= dn->dn_datablkshift;
698 record->zi_end >>= dn->dn_datablkshift;
700 if (record->zi_level > 0) {
701 if (record->zi_level >= dn->dn_nlevels) {
702 dnode_rele(dn, FTAG);
703 return (SET_ERROR(EDOM));
706 if (record->zi_start != 0 || record->zi_end != 0) {
707 int shift = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
709 for (int level = record->zi_level; level > 0; level--) {
710 record->zi_start >>= shift;
711 record->zi_end >>= shift;
716 dnode_rele(dn, FTAG);
721 * Create a new handler for the given record. We add it to the list, adding
722 * a reference to the spa_t in the process. We increment zio_injection_enabled,
723 * which is the switch to trigger all fault injection.
726 zio_inject_fault(char *name, int flags, int *id, zinject_record_t *record)
728 inject_handler_t *handler;
733 * If this is pool-wide metadata, make sure we unload the corresponding
734 * spa_t, so that the next attempt to load it will trigger the fault.
735 * We call spa_reset() to unload the pool appropriately.
737 if (flags & ZINJECT_UNLOAD_SPA)
738 if ((error = spa_reset(name)) != 0)
741 if (record->zi_cmd == ZINJECT_DELAY_IO) {
743 * A value of zero for the number of lanes or for the
744 * delay time doesn't make sense.
746 if (record->zi_timer == 0 || record->zi_nlanes == 0)
747 return (SET_ERROR(EINVAL));
750 * The number of lanes is directly mapped to the size of
751 * an array used by the handler. Thus, to ensure the
752 * user doesn't trigger an allocation that's "too large"
753 * we cap the number of lanes here.
755 if (record->zi_nlanes >= UINT16_MAX)
756 return (SET_ERROR(EINVAL));
760 * If the supplied range was in bytes -- calculate the actual blkid
762 if (flags & ZINJECT_CALC_RANGE) {
763 error = zio_calculate_range(name, record);
768 if (!(flags & ZINJECT_NULL)) {
770 * spa_inject_ref() will add an injection reference, which will
771 * prevent the pool from being removed from the namespace while
772 * still allowing it to be unloaded.
774 if ((spa = spa_inject_addref(name)) == NULL)
775 return (SET_ERROR(ENOENT));
777 handler = kmem_alloc(sizeof (inject_handler_t), KM_SLEEP);
779 handler->zi_spa = spa;
780 handler->zi_record = *record;
782 if (handler->zi_record.zi_cmd == ZINJECT_DELAY_IO) {
783 handler->zi_lanes = kmem_zalloc(
784 sizeof (*handler->zi_lanes) *
785 handler->zi_record.zi_nlanes, KM_SLEEP);
786 handler->zi_next_lane = 0;
788 handler->zi_lanes = NULL;
789 handler->zi_next_lane = 0;
792 rw_enter(&inject_lock, RW_WRITER);
795 * We can't move this increment into the conditional
796 * above because we need to hold the RW_WRITER lock of
797 * inject_lock, and we don't want to hold that while
798 * allocating the handler's zi_lanes array.
800 if (handler->zi_record.zi_cmd == ZINJECT_DELAY_IO) {
801 ASSERT3S(inject_delay_count, >=, 0);
802 inject_delay_count++;
803 ASSERT3S(inject_delay_count, >, 0);
806 *id = handler->zi_id = inject_next_id++;
807 list_insert_tail(&inject_handlers, handler);
808 atomic_inc_32(&zio_injection_enabled);
810 rw_exit(&inject_lock);
814 * Flush the ARC, so that any attempts to read this data will end up
815 * going to the ZIO layer. Note that this is a little overkill, but
816 * we don't have the necessary ARC interfaces to do anything else, and
817 * fault injection isn't a performance critical path.
819 if (flags & ZINJECT_FLUSH_ARC)
821 * We must use FALSE to ensure arc_flush returns, since
822 * we're not preventing concurrent ARC insertions.
824 arc_flush(NULL, FALSE);
830 * Returns the next record with an ID greater than that supplied to the
831 * function. Used to iterate over all handlers in the system.
834 zio_inject_list_next(int *id, char *name, size_t buflen,
835 zinject_record_t *record)
837 inject_handler_t *handler;
840 mutex_enter(&spa_namespace_lock);
841 rw_enter(&inject_lock, RW_READER);
843 for (handler = list_head(&inject_handlers); handler != NULL;
844 handler = list_next(&inject_handlers, handler))
845 if (handler->zi_id > *id)
849 *record = handler->zi_record;
850 *id = handler->zi_id;
851 (void) strncpy(name, spa_name(handler->zi_spa), buflen);
854 ret = SET_ERROR(ENOENT);
857 rw_exit(&inject_lock);
858 mutex_exit(&spa_namespace_lock);
864 * Clear the fault handler with the given identifier, or return ENOENT if none
868 zio_clear_fault(int id)
870 inject_handler_t *handler;
872 rw_enter(&inject_lock, RW_WRITER);
874 for (handler = list_head(&inject_handlers); handler != NULL;
875 handler = list_next(&inject_handlers, handler))
876 if (handler->zi_id == id)
879 if (handler == NULL) {
880 rw_exit(&inject_lock);
881 return (SET_ERROR(ENOENT));
884 if (handler->zi_record.zi_cmd == ZINJECT_DELAY_IO) {
885 ASSERT3S(inject_delay_count, >, 0);
886 inject_delay_count--;
887 ASSERT3S(inject_delay_count, >=, 0);
890 list_remove(&inject_handlers, handler);
891 rw_exit(&inject_lock);
893 if (handler->zi_record.zi_cmd == ZINJECT_DELAY_IO) {
894 ASSERT3P(handler->zi_lanes, !=, NULL);
895 kmem_free(handler->zi_lanes, sizeof (*handler->zi_lanes) *
896 handler->zi_record.zi_nlanes);
898 ASSERT3P(handler->zi_lanes, ==, NULL);
901 spa_inject_delref(handler->zi_spa);
902 kmem_free(handler, sizeof (inject_handler_t));
903 atomic_dec_32(&zio_injection_enabled);
909 zio_inject_init(void)
911 rw_init(&inject_lock, NULL, RW_DEFAULT, NULL);
912 mutex_init(&inject_delay_mtx, NULL, MUTEX_DEFAULT, NULL);
913 list_create(&inject_handlers, sizeof (inject_handler_t),
914 offsetof(inject_handler_t, zi_link));
918 zio_inject_fini(void)
920 list_destroy(&inject_handlers);
921 mutex_destroy(&inject_delay_mtx);
922 rw_destroy(&inject_lock);
926 EXPORT_SYMBOL(zio_injection_enabled);
927 EXPORT_SYMBOL(zio_inject_fault);
928 EXPORT_SYMBOL(zio_inject_list_next);
929 EXPORT_SYMBOL(zio_clear_fault);
930 EXPORT_SYMBOL(zio_handle_fault_injection);
931 EXPORT_SYMBOL(zio_handle_device_injection);
932 EXPORT_SYMBOL(zio_handle_label_injection);