4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
24 * Copyright (c) 2017, Intel Corporation.
30 * To handle fault injection, we keep track of a series of zinject_record_t
31 * structures which describe which logical block(s) should be injected with a
32 * fault. These are kept in a global list. Each record corresponds to a given
33 * spa_t and maintains a special hold on the spa_t so that it cannot be deleted
34 * or exported while the injection record exists.
36 * Device level injection is done using the 'zi_guid' field. If this is set, it
37 * means that the error is destined for a particular device, not a piece of
40 * This is a rather poor data structure and algorithm, but we don't expect more
41 * than a few faults at any one time, so it should be sufficient for our needs.
46 #include <sys/zfs_ioctl.h>
47 #include <sys/vdev_impl.h>
48 #include <sys/dmu_objset.h>
49 #include <sys/dsl_dataset.h>
50 #include <sys/fs/zfs.h>
52 uint32_t zio_injection_enabled = 0;
55 * Data describing each zinject handler registered on the system, and
56 * contains the list node linking the handler in the global zinject
59 typedef struct inject_handler {
62 zinject_record_t zi_record;
69 * List of all zinject handlers registered on the system, protected by
70 * the inject_lock defined below.
72 static list_t inject_handlers;
75 * This protects insertion into, and traversal of, the inject handler
76 * list defined above; as well as the inject_delay_count. Any time a
77 * handler is inserted or removed from the list, this lock should be
78 * taken as a RW_WRITER; and any time traversal is done over the list
79 * (without modification to it) this lock should be taken as a RW_READER.
81 static krwlock_t inject_lock;
84 * This holds the number of zinject delay handlers that have been
85 * registered on the system. It is protected by the inject_lock defined
86 * above. Thus modifications to this count must be a RW_WRITER of the
87 * inject_lock, and reads of this count must be (at least) a RW_READER
90 static int inject_delay_count = 0;
93 * This lock is used only in zio_handle_io_delay(), refer to the comment
94 * in that function for more details.
96 static kmutex_t inject_delay_mtx;
99 * Used to assign unique identifying numbers to each new zinject handler.
101 static int inject_next_id = 1;
104 * Test if the requested frequency was triggered
107 freq_triggered(uint32_t frequency)
110 * zero implies always (100%)
116 * Note: we still handle legacy (unscaled) frequency values
118 uint32_t maximum = (frequency <= 100) ? 100 : ZI_PERCENTAGE_MAX;
120 return (random_in_range(maximum) < frequency);
124 * Returns true if the given record matches the I/O in progress.
127 zio_match_handler(const zbookmark_phys_t *zb, uint64_t type, int dva,
128 zinject_record_t *record, int error)
131 * Check for a match against the MOS, which is based on type
133 if (zb->zb_objset == DMU_META_OBJSET &&
134 record->zi_objset == DMU_META_OBJSET &&
135 record->zi_object == DMU_META_DNODE_OBJECT) {
136 if (record->zi_type == DMU_OT_NONE ||
137 type == record->zi_type)
138 return (freq_triggered(record->zi_freq));
144 * Check for an exact match.
146 if (zb->zb_objset == record->zi_objset &&
147 zb->zb_object == record->zi_object &&
148 zb->zb_level == record->zi_level &&
149 zb->zb_blkid >= record->zi_start &&
150 zb->zb_blkid <= record->zi_end &&
151 (record->zi_dvas == 0 || (record->zi_dvas & (1ULL << dva))) &&
152 error == record->zi_error) {
153 return (freq_triggered(record->zi_freq));
160 * Panic the system when a config change happens in the function
164 zio_handle_panic_injection(spa_t *spa, char *tag, uint64_t type)
166 inject_handler_t *handler;
168 rw_enter(&inject_lock, RW_READER);
170 for (handler = list_head(&inject_handlers); handler != NULL;
171 handler = list_next(&inject_handlers, handler)) {
173 if (spa != handler->zi_spa)
176 if (handler->zi_record.zi_type == type &&
177 strcmp(tag, handler->zi_record.zi_func) == 0)
178 panic("Panic requested in function %s\n", tag);
181 rw_exit(&inject_lock);
185 * Inject a decryption failure. Decryption failures can occur in
186 * both the ARC and the ZIO layers.
189 zio_handle_decrypt_injection(spa_t *spa, const zbookmark_phys_t *zb,
190 uint64_t type, int error)
193 inject_handler_t *handler;
195 rw_enter(&inject_lock, RW_READER);
197 for (handler = list_head(&inject_handlers); handler != NULL;
198 handler = list_next(&inject_handlers, handler)) {
200 if (spa != handler->zi_spa ||
201 handler->zi_record.zi_cmd != ZINJECT_DECRYPT_FAULT)
204 if (zio_match_handler(zb, type, ZI_NO_DVA,
205 &handler->zi_record, error)) {
211 rw_exit(&inject_lock);
216 * If this is a physical I/O for a vdev child determine which DVA it is
217 * for. We iterate backwards through the DVAs matching on the offset so
218 * that we end up with ZI_NO_DVA (-1) if we don't find a match.
221 zio_match_dva(zio_t *zio)
225 if (zio->io_bp != NULL && zio->io_vd != NULL &&
226 zio->io_child_type == ZIO_CHILD_VDEV) {
227 for (i = BP_GET_NDVAS(zio->io_bp) - 1; i >= 0; i--) {
228 dva_t *dva = &zio->io_bp->blk_dva[i];
229 uint64_t off = DVA_GET_OFFSET(dva);
230 vdev_t *vd = vdev_lookup_top(zio->io_spa,
233 /* Compensate for vdev label added to leaves */
234 if (zio->io_vd->vdev_ops->vdev_op_leaf)
235 off += VDEV_LABEL_START_SIZE;
237 if (zio->io_vd == vd && zio->io_offset == off)
247 * Determine if the I/O in question should return failure. Returns the errno
248 * to be returned to the caller.
251 zio_handle_fault_injection(zio_t *zio, int error)
254 inject_handler_t *handler;
257 * Ignore I/O not associated with any logical data.
259 if (zio->io_logical == NULL)
263 * Currently, we only support fault injection on reads.
265 if (zio->io_type != ZIO_TYPE_READ)
269 * A rebuild I/O has no checksum to verify.
271 if (zio->io_priority == ZIO_PRIORITY_REBUILD && error == ECKSUM)
274 rw_enter(&inject_lock, RW_READER);
276 for (handler = list_head(&inject_handlers); handler != NULL;
277 handler = list_next(&inject_handlers, handler)) {
278 if (zio->io_spa != handler->zi_spa ||
279 handler->zi_record.zi_cmd != ZINJECT_DATA_FAULT)
282 /* If this handler matches, return the specified error */
283 if (zio_match_handler(&zio->io_logical->io_bookmark,
284 zio->io_bp ? BP_GET_TYPE(zio->io_bp) : DMU_OT_NONE,
285 zio_match_dva(zio), &handler->zi_record, error)) {
291 rw_exit(&inject_lock);
297 * Determine if the zio is part of a label update and has an injection
298 * handler associated with that portion of the label. Currently, we
299 * allow error injection in either the nvlist or the uberblock region of
303 zio_handle_label_injection(zio_t *zio, int error)
305 inject_handler_t *handler;
306 vdev_t *vd = zio->io_vd;
307 uint64_t offset = zio->io_offset;
311 if (offset >= VDEV_LABEL_START_SIZE &&
312 offset < vd->vdev_psize - VDEV_LABEL_END_SIZE)
315 rw_enter(&inject_lock, RW_READER);
317 for (handler = list_head(&inject_handlers); handler != NULL;
318 handler = list_next(&inject_handlers, handler)) {
319 uint64_t start = handler->zi_record.zi_start;
320 uint64_t end = handler->zi_record.zi_end;
322 if (handler->zi_record.zi_cmd != ZINJECT_LABEL_FAULT)
326 * The injection region is the relative offsets within a
327 * vdev label. We must determine the label which is being
328 * updated and adjust our region accordingly.
330 label = vdev_label_number(vd->vdev_psize, offset);
331 start = vdev_label_offset(vd->vdev_psize, label, start);
332 end = vdev_label_offset(vd->vdev_psize, label, end);
334 if (zio->io_vd->vdev_guid == handler->zi_record.zi_guid &&
335 (offset >= start && offset <= end)) {
340 rw_exit(&inject_lock);
346 zio_inject_bitflip_cb(void *data, size_t len, void *private)
348 zio_t *zio __maybe_unused = private;
349 uint8_t *buffer = data;
350 uint_t byte = random_in_range(len);
352 ASSERT(zio->io_type == ZIO_TYPE_READ);
354 /* flip a single random bit in an abd data buffer */
355 buffer[byte] ^= 1 << random_in_range(8);
357 return (1); /* stop after first flip */
361 zio_handle_device_injection_impl(vdev_t *vd, zio_t *zio, int err1, int err2)
363 inject_handler_t *handler;
367 * We skip over faults in the labels unless it's during
368 * device open (i.e. zio == NULL).
371 uint64_t offset = zio->io_offset;
373 if (offset < VDEV_LABEL_START_SIZE ||
374 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE)
378 rw_enter(&inject_lock, RW_READER);
380 for (handler = list_head(&inject_handlers); handler != NULL;
381 handler = list_next(&inject_handlers, handler)) {
383 if (handler->zi_record.zi_cmd != ZINJECT_DEVICE_FAULT)
386 if (vd->vdev_guid == handler->zi_record.zi_guid) {
387 if (handler->zi_record.zi_failfast &&
388 (zio == NULL || (zio->io_flags &
389 (ZIO_FLAG_IO_RETRY | ZIO_FLAG_TRYHARD)))) {
393 /* Handle type specific I/O failures */
395 handler->zi_record.zi_iotype != ZIO_TYPES &&
396 handler->zi_record.zi_iotype != zio->io_type)
399 if (handler->zi_record.zi_error == err1 ||
400 handler->zi_record.zi_error == err2) {
402 * limit error injection if requested
404 if (!freq_triggered(handler->zi_record.zi_freq))
408 * For a failed open, pretend like the device
412 vd->vdev_stat.vs_aux =
413 VDEV_AUX_OPEN_FAILED;
416 * Treat these errors as if they had been
417 * retried so that all the appropriate stats
418 * and FMA events are generated.
420 if (!handler->zi_record.zi_failfast &&
422 zio->io_flags |= ZIO_FLAG_IO_RETRY;
425 * EILSEQ means flip a bit after a read
427 if (handler->zi_record.zi_error == EILSEQ) {
431 /* locate buffer data and flip a bit */
432 (void) abd_iterate_func(zio->io_abd, 0,
433 zio->io_size, zio_inject_bitflip_cb,
438 ret = handler->zi_record.zi_error;
441 if (handler->zi_record.zi_error == ENXIO) {
442 ret = SET_ERROR(EIO);
448 rw_exit(&inject_lock);
454 zio_handle_device_injection(vdev_t *vd, zio_t *zio, int error)
456 return (zio_handle_device_injection_impl(vd, zio, error, INT_MAX));
460 zio_handle_device_injections(vdev_t *vd, zio_t *zio, int err1, int err2)
462 return (zio_handle_device_injection_impl(vd, zio, err1, err2));
466 * Simulate hardware that ignores cache flushes. For requested number
467 * of seconds nix the actual writing to disk.
470 zio_handle_ignored_writes(zio_t *zio)
472 inject_handler_t *handler;
474 rw_enter(&inject_lock, RW_READER);
476 for (handler = list_head(&inject_handlers); handler != NULL;
477 handler = list_next(&inject_handlers, handler)) {
479 /* Ignore errors not destined for this pool */
480 if (zio->io_spa != handler->zi_spa ||
481 handler->zi_record.zi_cmd != ZINJECT_IGNORED_WRITES)
485 * Positive duration implies # of seconds, negative
488 if (handler->zi_record.zi_timer == 0) {
489 if (handler->zi_record.zi_duration > 0)
490 handler->zi_record.zi_timer = ddi_get_lbolt64();
492 handler->zi_record.zi_timer = zio->io_txg;
495 /* Have a "problem" writing 60% of the time */
496 if (random_in_range(100) < 60)
497 zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
501 rw_exit(&inject_lock);
505 spa_handle_ignored_writes(spa_t *spa)
507 inject_handler_t *handler;
509 if (zio_injection_enabled == 0)
512 rw_enter(&inject_lock, RW_READER);
514 for (handler = list_head(&inject_handlers); handler != NULL;
515 handler = list_next(&inject_handlers, handler)) {
517 if (spa != handler->zi_spa ||
518 handler->zi_record.zi_cmd != ZINJECT_IGNORED_WRITES)
521 if (handler->zi_record.zi_duration > 0) {
522 VERIFY(handler->zi_record.zi_timer == 0 ||
524 (int64_t)handler->zi_record.zi_timer +
525 handler->zi_record.zi_duration * hz,
528 /* duration is negative so the subtraction here adds */
529 VERIFY(handler->zi_record.zi_timer == 0 ||
530 handler->zi_record.zi_timer -
531 handler->zi_record.zi_duration >=
532 spa_syncing_txg(spa));
536 rw_exit(&inject_lock);
540 zio_handle_io_delay(zio_t *zio)
542 vdev_t *vd = zio->io_vd;
543 inject_handler_t *min_handler = NULL;
544 hrtime_t min_target = 0;
546 rw_enter(&inject_lock, RW_READER);
549 * inject_delay_count is a subset of zio_injection_enabled that
550 * is only incremented for delay handlers. These checks are
551 * mainly added to remind the reader why we're not explicitly
552 * checking zio_injection_enabled like the other functions.
554 IMPLY(inject_delay_count > 0, zio_injection_enabled > 0);
555 IMPLY(zio_injection_enabled == 0, inject_delay_count == 0);
558 * If there aren't any inject delay handlers registered, then we
559 * can short circuit and simply return 0 here. A value of zero
560 * informs zio_delay_interrupt() that this request should not be
561 * delayed. This short circuit keeps us from acquiring the
562 * inject_delay_mutex unnecessarily.
564 if (inject_delay_count == 0) {
565 rw_exit(&inject_lock);
570 * Each inject handler has a number of "lanes" associated with
571 * it. Each lane is able to handle requests independently of one
572 * another, and at a latency defined by the inject handler
573 * record's zi_timer field. Thus if a handler in configured with
574 * a single lane with a 10ms latency, it will delay requests
575 * such that only a single request is completed every 10ms. So,
576 * if more than one request is attempted per each 10ms interval,
577 * the average latency of the requests will be greater than
578 * 10ms; but if only a single request is submitted each 10ms
579 * interval the average latency will be 10ms.
581 * We need to acquire this mutex to prevent multiple concurrent
582 * threads being assigned to the same lane of a given inject
583 * handler. The mutex allows us to perform the following two
584 * operations atomically:
586 * 1. determine the minimum handler and minimum target
587 * value of all the possible handlers
588 * 2. update that minimum handler's lane array
590 * Without atomicity, two (or more) threads could pick the same
591 * lane in step (1), and then conflict with each other in step
592 * (2). This could allow a single lane handler to process
593 * multiple requests simultaneously, which shouldn't be possible.
595 mutex_enter(&inject_delay_mtx);
597 for (inject_handler_t *handler = list_head(&inject_handlers);
598 handler != NULL; handler = list_next(&inject_handlers, handler)) {
599 if (handler->zi_record.zi_cmd != ZINJECT_DELAY_IO)
602 if (!freq_triggered(handler->zi_record.zi_freq))
605 if (vd->vdev_guid != handler->zi_record.zi_guid)
609 * Defensive; should never happen as the array allocation
610 * occurs prior to inserting this handler on the list.
612 ASSERT3P(handler->zi_lanes, !=, NULL);
615 * This should never happen, the zinject command should
616 * prevent a user from setting an IO delay with zero lanes.
618 ASSERT3U(handler->zi_record.zi_nlanes, !=, 0);
620 ASSERT3U(handler->zi_record.zi_nlanes, >,
621 handler->zi_next_lane);
624 * We want to issue this IO to the lane that will become
625 * idle the soonest, so we compare the soonest this
626 * specific handler can complete the IO with all other
627 * handlers, to find the lowest value of all possible
628 * lanes. We then use this lane to submit the request.
630 * Since each handler has a constant value for its
631 * delay, we can just use the "next" lane for that
632 * handler; as it will always be the lane with the
633 * lowest value for that particular handler (i.e. the
634 * lane that will become idle the soonest). This saves a
635 * scan of each handler's lanes array.
637 * There's two cases to consider when determining when
638 * this specific IO request should complete. If this
639 * lane is idle, we want to "submit" the request now so
640 * it will complete after zi_timer milliseconds. Thus,
641 * we set the target to now + zi_timer.
643 * If the lane is busy, we want this request to complete
644 * zi_timer milliseconds after the lane becomes idle.
645 * Since the 'zi_lanes' array holds the time at which
646 * each lane will become idle, we use that value to
647 * determine when this request should complete.
649 hrtime_t idle = handler->zi_record.zi_timer + gethrtime();
650 hrtime_t busy = handler->zi_record.zi_timer +
651 handler->zi_lanes[handler->zi_next_lane];
652 hrtime_t target = MAX(idle, busy);
654 if (min_handler == NULL) {
655 min_handler = handler;
660 ASSERT3P(min_handler, !=, NULL);
661 ASSERT3U(min_target, !=, 0);
664 * We don't yet increment the "next lane" variable since
665 * we still might find a lower value lane in another
666 * handler during any remaining iterations. Once we're
667 * sure we've selected the absolute minimum, we'll claim
668 * the lane and increment the handler's "next lane"
672 if (target < min_target) {
673 min_handler = handler;
679 * 'min_handler' will be NULL if no IO delays are registered for
680 * this vdev, otherwise it will point to the handler containing
681 * the lane that will become idle the soonest.
683 if (min_handler != NULL) {
684 ASSERT3U(min_target, !=, 0);
685 min_handler->zi_lanes[min_handler->zi_next_lane] = min_target;
688 * If we've used all possible lanes for this handler,
689 * loop back and start using the first lane again;
690 * otherwise, just increment the lane index.
692 min_handler->zi_next_lane = (min_handler->zi_next_lane + 1) %
693 min_handler->zi_record.zi_nlanes;
696 mutex_exit(&inject_delay_mtx);
697 rw_exit(&inject_lock);
703 zio_calculate_range(const char *pool, zinject_record_t *record)
712 * Obtain the dnode for object using pool, objset, and object
714 error = dsl_pool_hold(pool, FTAG, &dp);
718 error = dsl_dataset_hold_obj(dp, record->zi_objset, FTAG, &ds);
719 dsl_pool_rele(dp, FTAG);
723 error = dmu_objset_from_ds(ds, &os);
724 dsl_dataset_rele(ds, FTAG);
728 error = dnode_hold(os, record->zi_object, FTAG, &dn);
733 * Translate the range into block IDs
735 if (record->zi_start != 0 || record->zi_end != -1ULL) {
736 record->zi_start >>= dn->dn_datablkshift;
737 record->zi_end >>= dn->dn_datablkshift;
739 if (record->zi_level > 0) {
740 if (record->zi_level >= dn->dn_nlevels) {
741 dnode_rele(dn, FTAG);
742 return (SET_ERROR(EDOM));
745 if (record->zi_start != 0 || record->zi_end != 0) {
746 int shift = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
748 for (int level = record->zi_level; level > 0; level--) {
749 record->zi_start >>= shift;
750 record->zi_end >>= shift;
755 dnode_rele(dn, FTAG);
760 * Create a new handler for the given record. We add it to the list, adding
761 * a reference to the spa_t in the process. We increment zio_injection_enabled,
762 * which is the switch to trigger all fault injection.
765 zio_inject_fault(char *name, int flags, int *id, zinject_record_t *record)
767 inject_handler_t *handler;
772 * If this is pool-wide metadata, make sure we unload the corresponding
773 * spa_t, so that the next attempt to load it will trigger the fault.
774 * We call spa_reset() to unload the pool appropriately.
776 if (flags & ZINJECT_UNLOAD_SPA)
777 if ((error = spa_reset(name)) != 0)
780 if (record->zi_cmd == ZINJECT_DELAY_IO) {
782 * A value of zero for the number of lanes or for the
783 * delay time doesn't make sense.
785 if (record->zi_timer == 0 || record->zi_nlanes == 0)
786 return (SET_ERROR(EINVAL));
789 * The number of lanes is directly mapped to the size of
790 * an array used by the handler. Thus, to ensure the
791 * user doesn't trigger an allocation that's "too large"
792 * we cap the number of lanes here.
794 if (record->zi_nlanes >= UINT16_MAX)
795 return (SET_ERROR(EINVAL));
799 * If the supplied range was in bytes -- calculate the actual blkid
801 if (flags & ZINJECT_CALC_RANGE) {
802 error = zio_calculate_range(name, record);
807 if (!(flags & ZINJECT_NULL)) {
809 * spa_inject_ref() will add an injection reference, which will
810 * prevent the pool from being removed from the namespace while
811 * still allowing it to be unloaded.
813 if ((spa = spa_inject_addref(name)) == NULL)
814 return (SET_ERROR(ENOENT));
816 handler = kmem_alloc(sizeof (inject_handler_t), KM_SLEEP);
818 handler->zi_spa = spa;
819 handler->zi_record = *record;
821 if (handler->zi_record.zi_cmd == ZINJECT_DELAY_IO) {
822 handler->zi_lanes = kmem_zalloc(
823 sizeof (*handler->zi_lanes) *
824 handler->zi_record.zi_nlanes, KM_SLEEP);
825 handler->zi_next_lane = 0;
827 handler->zi_lanes = NULL;
828 handler->zi_next_lane = 0;
831 rw_enter(&inject_lock, RW_WRITER);
834 * We can't move this increment into the conditional
835 * above because we need to hold the RW_WRITER lock of
836 * inject_lock, and we don't want to hold that while
837 * allocating the handler's zi_lanes array.
839 if (handler->zi_record.zi_cmd == ZINJECT_DELAY_IO) {
840 ASSERT3S(inject_delay_count, >=, 0);
841 inject_delay_count++;
842 ASSERT3S(inject_delay_count, >, 0);
845 *id = handler->zi_id = inject_next_id++;
846 list_insert_tail(&inject_handlers, handler);
847 atomic_inc_32(&zio_injection_enabled);
849 rw_exit(&inject_lock);
853 * Flush the ARC, so that any attempts to read this data will end up
854 * going to the ZIO layer. Note that this is a little overkill, but
855 * we don't have the necessary ARC interfaces to do anything else, and
856 * fault injection isn't a performance critical path.
858 if (flags & ZINJECT_FLUSH_ARC)
860 * We must use FALSE to ensure arc_flush returns, since
861 * we're not preventing concurrent ARC insertions.
863 arc_flush(NULL, FALSE);
869 * Returns the next record with an ID greater than that supplied to the
870 * function. Used to iterate over all handlers in the system.
873 zio_inject_list_next(int *id, char *name, size_t buflen,
874 zinject_record_t *record)
876 inject_handler_t *handler;
879 mutex_enter(&spa_namespace_lock);
880 rw_enter(&inject_lock, RW_READER);
882 for (handler = list_head(&inject_handlers); handler != NULL;
883 handler = list_next(&inject_handlers, handler))
884 if (handler->zi_id > *id)
888 *record = handler->zi_record;
889 *id = handler->zi_id;
890 (void) strncpy(name, spa_name(handler->zi_spa), buflen);
893 ret = SET_ERROR(ENOENT);
896 rw_exit(&inject_lock);
897 mutex_exit(&spa_namespace_lock);
903 * Clear the fault handler with the given identifier, or return ENOENT if none
907 zio_clear_fault(int id)
909 inject_handler_t *handler;
911 rw_enter(&inject_lock, RW_WRITER);
913 for (handler = list_head(&inject_handlers); handler != NULL;
914 handler = list_next(&inject_handlers, handler))
915 if (handler->zi_id == id)
918 if (handler == NULL) {
919 rw_exit(&inject_lock);
920 return (SET_ERROR(ENOENT));
923 if (handler->zi_record.zi_cmd == ZINJECT_DELAY_IO) {
924 ASSERT3S(inject_delay_count, >, 0);
925 inject_delay_count--;
926 ASSERT3S(inject_delay_count, >=, 0);
929 list_remove(&inject_handlers, handler);
930 rw_exit(&inject_lock);
932 if (handler->zi_record.zi_cmd == ZINJECT_DELAY_IO) {
933 ASSERT3P(handler->zi_lanes, !=, NULL);
934 kmem_free(handler->zi_lanes, sizeof (*handler->zi_lanes) *
935 handler->zi_record.zi_nlanes);
937 ASSERT3P(handler->zi_lanes, ==, NULL);
940 spa_inject_delref(handler->zi_spa);
941 kmem_free(handler, sizeof (inject_handler_t));
942 atomic_dec_32(&zio_injection_enabled);
948 zio_inject_init(void)
950 rw_init(&inject_lock, NULL, RW_DEFAULT, NULL);
951 mutex_init(&inject_delay_mtx, NULL, MUTEX_DEFAULT, NULL);
952 list_create(&inject_handlers, sizeof (inject_handler_t),
953 offsetof(inject_handler_t, zi_link));
957 zio_inject_fini(void)
959 list_destroy(&inject_handlers);
960 mutex_destroy(&inject_delay_mtx);
961 rw_destroy(&inject_lock);
965 EXPORT_SYMBOL(zio_injection_enabled);
966 EXPORT_SYMBOL(zio_inject_fault);
967 EXPORT_SYMBOL(zio_inject_list_next);
968 EXPORT_SYMBOL(zio_clear_fault);
969 EXPORT_SYMBOL(zio_handle_fault_injection);
970 EXPORT_SYMBOL(zio_handle_device_injection);
971 EXPORT_SYMBOL(zio_handle_label_injection);