4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #pragma ident "%Z%%M% %I% %E% SMI"
28 #include <sys/zfs_context.h>
29 #include <sys/fm/fs/zfs.h>
32 #include <sys/spa_impl.h>
33 #include <sys/vdev_impl.h>
34 #include <sys/zio_impl.h>
35 #include <sys/zio_compress.h>
36 #include <sys/zio_checksum.h>
39 * ==========================================================================
41 * ==========================================================================
43 uint8_t zio_priority_table[ZIO_PRIORITY_TABLE_SIZE] = {
44 0, /* ZIO_PRIORITY_NOW */
45 0, /* ZIO_PRIORITY_SYNC_READ */
46 0, /* ZIO_PRIORITY_SYNC_WRITE */
47 6, /* ZIO_PRIORITY_ASYNC_READ */
48 4, /* ZIO_PRIORITY_ASYNC_WRITE */
49 4, /* ZIO_PRIORITY_FREE */
50 0, /* ZIO_PRIORITY_CACHE_FILL */
51 0, /* ZIO_PRIORITY_LOG_WRITE */
52 10, /* ZIO_PRIORITY_RESILVER */
53 20, /* ZIO_PRIORITY_SCRUB */
57 * ==========================================================================
58 * I/O type descriptions
59 * ==========================================================================
61 char *zio_type_name[ZIO_TYPES] = {
62 "null", "read", "write", "free", "claim", "ioctl" };
64 /* At or above this size, force gang blocking - for testing */
65 uint64_t zio_gang_bang = SPA_MAXBLOCKSIZE + 1;
67 /* Force an allocation failure when non-zero */
68 uint16_t zio_zil_fail_shift = 0;
70 typedef struct zio_sync_pass {
71 int zp_defer_free; /* defer frees after this pass */
72 int zp_dontcompress; /* don't compress after this pass */
73 int zp_rewrite; /* rewrite new bps after this pass */
76 zio_sync_pass_t zio_sync_pass = {
77 1, /* zp_defer_free */
78 4, /* zp_dontcompress */
83 * ==========================================================================
85 * ==========================================================================
87 kmem_cache_t *zio_cache;
89 kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
90 kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
94 extern vmem_t *zio_alloc_arena;
104 vmem_t *data_alloc_arena = NULL;
107 data_alloc_arena = zio_alloc_arena;
111 zio_cache = kmem_cache_create("zio_cache", sizeof (zio_t), 0,
112 NULL, NULL, NULL, NULL, NULL, 0);
116 * For small buffers, we want a cache for each multiple of
117 * SPA_MINBLOCKSIZE. For medium-size buffers, we want a cache
118 * for each quarter-power of 2. For large buffers, we want
119 * a cache for each multiple of PAGESIZE.
121 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) {
122 size_t size = (c + 1) << SPA_MINBLOCKSHIFT;
126 while (p2 & (p2 - 1))
129 if (size <= 4 * SPA_MINBLOCKSIZE) {
130 align = SPA_MINBLOCKSIZE;
131 } else if (P2PHASE(size, PAGESIZE) == 0) {
133 } else if (P2PHASE(size, p2 >> 2) == 0) {
139 (void) sprintf(name, "zio_buf_%lu", (ulong_t)size);
140 zio_buf_cache[c] = kmem_cache_create(name, size,
141 align, NULL, NULL, NULL, NULL, NULL, KMC_NODEBUG);
143 (void) sprintf(name, "zio_data_buf_%lu", (ulong_t)size);
144 zio_data_buf_cache[c] = kmem_cache_create(name, size,
145 align, NULL, NULL, NULL, NULL, data_alloc_arena,
148 dprintf("creating cache for size %5lx align %5lx\n",
154 ASSERT(zio_buf_cache[c] != NULL);
155 if (zio_buf_cache[c - 1] == NULL)
156 zio_buf_cache[c - 1] = zio_buf_cache[c];
158 ASSERT(zio_data_buf_cache[c] != NULL);
159 if (zio_data_buf_cache[c - 1] == NULL)
160 zio_data_buf_cache[c - 1] = zio_data_buf_cache[c];
172 kmem_cache_t *last_cache = NULL;
173 kmem_cache_t *last_data_cache = NULL;
175 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) {
176 if (zio_buf_cache[c] != last_cache) {
177 last_cache = zio_buf_cache[c];
178 kmem_cache_destroy(zio_buf_cache[c]);
180 zio_buf_cache[c] = NULL;
182 if (zio_data_buf_cache[c] != last_data_cache) {
183 last_data_cache = zio_data_buf_cache[c];
184 kmem_cache_destroy(zio_data_buf_cache[c]);
186 zio_data_buf_cache[c] = NULL;
190 kmem_cache_destroy(zio_cache);
196 * ==========================================================================
197 * Allocate and free I/O buffers
198 * ==========================================================================
202 * Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a
203 * crashdump if the kernel panics, so use it judiciously. Obviously, it's
204 * useful to inspect ZFS metadata, but if possible, we should avoid keeping
205 * excess / transient data in-core during a crashdump.
208 zio_buf_alloc(size_t size)
211 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
213 ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
215 return (kmem_cache_alloc(zio_buf_cache[c], KM_SLEEP));
217 return (kmem_alloc(size, KM_SLEEP));
222 * Use zio_data_buf_alloc to allocate data. The data will not appear in a
223 * crashdump if the kernel panics. This exists so that we will limit the amount
224 * of ZFS data that shows up in a kernel crashdump. (Thus reducing the amount
225 * of kernel heap dumped to disk when the kernel panics)
228 zio_data_buf_alloc(size_t size)
231 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
233 ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
235 return (kmem_cache_alloc(zio_data_buf_cache[c], KM_SLEEP));
237 return (kmem_alloc(size, KM_SLEEP));
242 zio_buf_free(void *buf, size_t size)
245 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
247 ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
249 kmem_cache_free(zio_buf_cache[c], buf);
251 kmem_free(buf, size);
256 zio_data_buf_free(void *buf, size_t size)
259 size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
261 ASSERT(c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
263 kmem_cache_free(zio_data_buf_cache[c], buf);
265 kmem_free(buf, size);
270 * ==========================================================================
271 * Push and pop I/O transform buffers
272 * ==========================================================================
275 zio_push_transform(zio_t *zio, void *data, uint64_t size, uint64_t bufsize)
277 zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP);
281 zt->zt_bufsize = bufsize;
283 zt->zt_next = zio->io_transform_stack;
284 zio->io_transform_stack = zt;
291 zio_pop_transform(zio_t *zio, void **data, uint64_t *size, uint64_t *bufsize)
293 zio_transform_t *zt = zio->io_transform_stack;
297 *bufsize = zt->zt_bufsize;
299 zio->io_transform_stack = zt->zt_next;
300 kmem_free(zt, sizeof (zio_transform_t));
302 if ((zt = zio->io_transform_stack) != NULL) {
303 zio->io_data = zt->zt_data;
304 zio->io_size = zt->zt_size;
309 zio_clear_transform_stack(zio_t *zio)
312 uint64_t size, bufsize;
314 ASSERT(zio->io_transform_stack != NULL);
316 zio_pop_transform(zio, &data, &size, &bufsize);
317 while (zio->io_transform_stack != NULL) {
318 zio_buf_free(data, bufsize);
319 zio_pop_transform(zio, &data, &size, &bufsize);
324 * ==========================================================================
325 * Create the various types of I/O (read, write, free)
326 * ==========================================================================
329 zio_create(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
330 void *data, uint64_t size, zio_done_func_t *done, void *private,
331 zio_type_t type, int priority, int flags, uint8_t stage, uint32_t pipeline)
335 ASSERT3U(size, <=, SPA_MAXBLOCKSIZE);
336 ASSERT(P2PHASE(size, SPA_MINBLOCKSIZE) == 0);
338 zio = kmem_cache_alloc(zio_cache, KM_SLEEP);
339 bzero(zio, sizeof (zio_t));
340 zio->io_parent = pio;
345 zio->io_bp_copy = *bp;
346 zio->io_bp_orig = *bp;
349 zio->io_private = private;
351 zio->io_priority = priority;
352 zio->io_stage = stage;
353 zio->io_pipeline = pipeline;
354 zio->io_async_stages = ZIO_ASYNC_PIPELINE_STAGES;
355 zio->io_timestamp = lbolt64;
356 zio->io_flags = flags;
357 mutex_init(&zio->io_lock, NULL, MUTEX_DEFAULT, NULL);
358 cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL);
359 zio_push_transform(zio, data, size, size);
362 * Note on config lock:
364 * If CONFIG_HELD is set, then the caller already has the config
365 * lock, so we don't need it for this io.
367 * We set CONFIG_GRABBED to indicate that we have grabbed the
368 * config lock on behalf of this io, so it should be released
371 * Unless CONFIG_HELD is set, we will grab the config lock for
372 * any top-level (parent-less) io, *except* NULL top-level ios.
373 * The NULL top-level ios rarely have any children, so we delay
374 * grabbing the lock until the first child is added (but it is
375 * still grabbed on behalf of the top-level i/o, so additional
376 * children don't need to also grab it). This greatly reduces
377 * contention on the config lock.
380 if (type != ZIO_TYPE_NULL &&
381 !(flags & ZIO_FLAG_CONFIG_HELD)) {
382 spa_config_enter(zio->io_spa, RW_READER, zio);
383 zio->io_flags |= ZIO_FLAG_CONFIG_GRABBED;
387 zio->io_root = pio->io_root;
388 if (!(flags & ZIO_FLAG_NOBOOKMARK))
389 zio->io_logical = pio->io_logical;
390 mutex_enter(&pio->io_lock);
391 if (pio->io_parent == NULL &&
392 pio->io_type == ZIO_TYPE_NULL &&
393 !(pio->io_flags & ZIO_FLAG_CONFIG_GRABBED) &&
394 !(pio->io_flags & ZIO_FLAG_CONFIG_HELD)) {
395 pio->io_flags |= ZIO_FLAG_CONFIG_GRABBED;
396 spa_config_enter(zio->io_spa, RW_READER, pio);
398 if (stage < ZIO_STAGE_READY)
399 pio->io_children_notready++;
400 pio->io_children_notdone++;
401 zio->io_sibling_next = pio->io_child;
402 zio->io_sibling_prev = NULL;
403 if (pio->io_child != NULL)
404 pio->io_child->io_sibling_prev = zio;
406 zio->io_ndvas = pio->io_ndvas;
407 mutex_exit(&pio->io_lock);
414 zio_null(zio_t *pio, spa_t *spa, zio_done_func_t *done, void *private,
419 zio = zio_create(pio, spa, 0, NULL, NULL, 0, done, private,
420 ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, ZIO_STAGE_OPEN,
421 ZIO_WAIT_FOR_CHILDREN_PIPELINE);
427 zio_root(spa_t *spa, zio_done_func_t *done, void *private, int flags)
429 return (zio_null(NULL, spa, done, private, flags));
433 zio_read(zio_t *pio, spa_t *spa, blkptr_t *bp, void *data,
434 uint64_t size, zio_done_func_t *done, void *private,
435 int priority, int flags, zbookmark_t *zb)
439 ASSERT3U(size, ==, BP_GET_LSIZE(bp));
441 zio = zio_create(pio, spa, bp->blk_birth, bp, data, size, done, private,
442 ZIO_TYPE_READ, priority, flags | ZIO_FLAG_USER,
443 ZIO_STAGE_OPEN, ZIO_READ_PIPELINE);
444 zio->io_bookmark = *zb;
446 zio->io_logical = zio;
449 * Work off our copy of the bp so the caller can free it.
451 zio->io_bp = &zio->io_bp_copy;
453 if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF) {
454 uint64_t csize = BP_GET_PSIZE(bp);
455 void *cbuf = zio_buf_alloc(csize);
457 zio_push_transform(zio, cbuf, csize, csize);
458 zio->io_pipeline |= 1U << ZIO_STAGE_READ_DECOMPRESS;
461 if (BP_IS_GANG(bp)) {
462 uint64_t gsize = SPA_GANGBLOCKSIZE;
463 void *gbuf = zio_buf_alloc(gsize);
465 zio_push_transform(zio, gbuf, gsize, gsize);
466 zio->io_pipeline |= 1U << ZIO_STAGE_READ_GANG_MEMBERS;
473 zio_write(zio_t *pio, spa_t *spa, int checksum, int compress, int ncopies,
474 uint64_t txg, blkptr_t *bp, void *data, uint64_t size,
475 zio_done_func_t *ready, zio_done_func_t *done, void *private, int priority,
476 int flags, zbookmark_t *zb)
480 ASSERT(checksum >= ZIO_CHECKSUM_OFF &&
481 checksum < ZIO_CHECKSUM_FUNCTIONS);
483 ASSERT(compress >= ZIO_COMPRESS_OFF &&
484 compress < ZIO_COMPRESS_FUNCTIONS);
486 zio = zio_create(pio, spa, txg, bp, data, size, done, private,
487 ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_USER,
488 ZIO_STAGE_OPEN, ZIO_WRITE_PIPELINE);
490 zio->io_ready = ready;
492 zio->io_bookmark = *zb;
494 zio->io_logical = zio;
496 zio->io_checksum = checksum;
497 zio->io_compress = compress;
498 zio->io_ndvas = ncopies;
500 if (compress != ZIO_COMPRESS_OFF)
501 zio->io_async_stages |= 1U << ZIO_STAGE_WRITE_COMPRESS;
503 if (bp->blk_birth != txg) {
504 /* XXX the bp usually (always?) gets re-zeroed later */
506 BP_SET_LSIZE(bp, size);
507 BP_SET_PSIZE(bp, size);
509 /* Make sure someone doesn't change their mind on overwrites */
510 ASSERT(MIN(zio->io_ndvas + BP_IS_GANG(bp),
511 spa_max_replication(spa)) == BP_GET_NDVAS(bp));
518 zio_rewrite(zio_t *pio, spa_t *spa, int checksum,
519 uint64_t txg, blkptr_t *bp, void *data, uint64_t size,
520 zio_done_func_t *done, void *private, int priority, int flags,
525 zio = zio_create(pio, spa, txg, bp, data, size, done, private,
526 ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_USER,
527 ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE);
529 zio->io_bookmark = *zb;
530 zio->io_checksum = checksum;
531 zio->io_compress = ZIO_COMPRESS_OFF;
534 ASSERT3U(zio->io_ndvas, <=, BP_GET_NDVAS(bp));
540 zio_write_allocate(zio_t *pio, spa_t *spa, int checksum,
541 uint64_t txg, blkptr_t *bp, void *data, uint64_t size,
542 zio_done_func_t *done, void *private, int priority, int flags)
547 BP_SET_LSIZE(bp, size);
548 BP_SET_PSIZE(bp, size);
549 BP_SET_COMPRESS(bp, ZIO_COMPRESS_OFF);
551 zio = zio_create(pio, spa, txg, bp, data, size, done, private,
552 ZIO_TYPE_WRITE, priority, flags,
553 ZIO_STAGE_OPEN, ZIO_WRITE_ALLOCATE_PIPELINE);
555 zio->io_checksum = checksum;
556 zio->io_compress = ZIO_COMPRESS_OFF;
562 zio_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
563 zio_done_func_t *done, void *private)
567 ASSERT(!BP_IS_HOLE(bp));
569 if (txg == spa->spa_syncing_txg &&
570 spa->spa_sync_pass > zio_sync_pass.zp_defer_free) {
571 bplist_enqueue_deferred(&spa->spa_sync_bplist, bp);
572 return (zio_null(pio, spa, NULL, NULL, 0));
575 zio = zio_create(pio, spa, txg, bp, NULL, 0, done, private,
576 ZIO_TYPE_FREE, ZIO_PRIORITY_FREE, ZIO_FLAG_USER,
577 ZIO_STAGE_OPEN, ZIO_FREE_PIPELINE);
579 zio->io_bp = &zio->io_bp_copy;
585 zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
586 zio_done_func_t *done, void *private)
591 * A claim is an allocation of a specific block. Claims are needed
592 * to support immediate writes in the intent log. The issue is that
593 * immediate writes contain committed data, but in a txg that was
594 * *not* committed. Upon opening the pool after an unclean shutdown,
595 * the intent log claims all blocks that contain immediate write data
596 * so that the SPA knows they're in use.
598 * All claims *must* be resolved in the first txg -- before the SPA
599 * starts allocating blocks -- so that nothing is allocated twice.
601 ASSERT3U(spa->spa_uberblock.ub_rootbp.blk_birth, <, spa_first_txg(spa));
602 ASSERT3U(spa_first_txg(spa), <=, txg);
604 zio = zio_create(pio, spa, txg, bp, NULL, 0, done, private,
605 ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW, 0,
606 ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE);
608 zio->io_bp = &zio->io_bp_copy;
614 zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd,
615 zio_done_func_t *done, void *private, int priority, int flags)
620 if (vd->vdev_children == 0) {
621 zio = zio_create(pio, spa, 0, NULL, NULL, 0, done, private,
622 ZIO_TYPE_IOCTL, priority, flags,
623 ZIO_STAGE_OPEN, ZIO_IOCTL_PIPELINE);
628 zio = zio_null(pio, spa, NULL, NULL, flags);
630 for (c = 0; c < vd->vdev_children; c++)
631 zio_nowait(zio_ioctl(zio, spa, vd->vdev_child[c], cmd,
632 done, private, priority, flags));
639 zio_phys_bp_init(vdev_t *vd, blkptr_t *bp, uint64_t offset, uint64_t size,
642 ASSERT(vd->vdev_children == 0);
644 ASSERT(size <= SPA_MAXBLOCKSIZE);
645 ASSERT(P2PHASE(size, SPA_MINBLOCKSIZE) == 0);
646 ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0);
648 ASSERT(offset + size <= VDEV_LABEL_START_SIZE ||
649 offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
650 ASSERT3U(offset + size, <=, vd->vdev_psize);
654 BP_SET_LSIZE(bp, size);
655 BP_SET_PSIZE(bp, size);
657 BP_SET_CHECKSUM(bp, checksum);
658 BP_SET_COMPRESS(bp, ZIO_COMPRESS_OFF);
659 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
661 if (checksum != ZIO_CHECKSUM_OFF)
662 ZIO_SET_CHECKSUM(&bp->blk_cksum, offset, 0, 0, 0);
666 zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
667 void *data, int checksum, zio_done_func_t *done, void *private,
668 int priority, int flags)
673 zio_phys_bp_init(vd, &blk, offset, size, checksum);
675 zio = zio_create(pio, vd->vdev_spa, 0, &blk, data, size, done, private,
676 ZIO_TYPE_READ, priority, flags | ZIO_FLAG_PHYSICAL,
677 ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE);
680 zio->io_offset = offset;
683 * Work off our copy of the bp so the caller can free it.
685 zio->io_bp = &zio->io_bp_copy;
691 zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
692 void *data, int checksum, zio_done_func_t *done, void *private,
693 int priority, int flags)
695 zio_block_tail_t *zbt;
700 zio_phys_bp_init(vd, &blk, offset, size, checksum);
702 zio = zio_create(pio, vd->vdev_spa, 0, &blk, data, size, done, private,
703 ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_PHYSICAL,
704 ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE);
707 zio->io_offset = offset;
709 zio->io_bp = &zio->io_bp_copy;
710 zio->io_checksum = checksum;
712 if (zio_checksum_table[checksum].ci_zbt) {
714 * zbt checksums are necessarily destructive -- they modify
715 * one word of the write buffer to hold the verifier/checksum.
716 * Therefore, we must make a local copy in case the data is
717 * being written to multiple places.
719 wbuf = zio_buf_alloc(size);
720 bcopy(data, wbuf, size);
721 zio_push_transform(zio, wbuf, size, size);
723 zbt = (zio_block_tail_t *)((char *)wbuf + size) - 1;
724 zbt->zbt_cksum = blk.blk_cksum;
731 * Create a child I/O to do some work for us. It has no associated bp.
734 zio_vdev_child_io(zio_t *zio, blkptr_t *bp, vdev_t *vd, uint64_t offset,
735 void *data, uint64_t size, int type, int priority, int flags,
736 zio_done_func_t *done, void *private)
738 uint32_t pipeline = ZIO_VDEV_CHILD_PIPELINE;
741 if (type == ZIO_TYPE_READ && bp != NULL) {
743 * If we have the bp, then the child should perform the
744 * checksum and the parent need not. This pushes error
745 * detection as close to the leaves as possible and
746 * eliminates redundant checksums in the interior nodes.
748 pipeline |= 1U << ZIO_STAGE_CHECKSUM_VERIFY;
749 zio->io_pipeline &= ~(1U << ZIO_STAGE_CHECKSUM_VERIFY);
752 cio = zio_create(zio, zio->io_spa, zio->io_txg, bp, data, size,
753 done, private, type, priority,
754 (zio->io_flags & ZIO_FLAG_VDEV_INHERIT) | ZIO_FLAG_CANFAIL | flags,
755 ZIO_STAGE_VDEV_IO_START - 1, pipeline);
758 cio->io_offset = offset;
764 * ==========================================================================
765 * Initiate I/O, either sync or async
766 * ==========================================================================
773 ASSERT(zio->io_stage == ZIO_STAGE_OPEN);
775 zio->io_waiter = curthread;
777 zio_next_stage_async(zio);
779 mutex_enter(&zio->io_lock);
780 while (zio->io_stalled != ZIO_STAGE_DONE)
781 cv_wait(&zio->io_cv, &zio->io_lock);
782 mutex_exit(&zio->io_lock);
784 error = zio->io_error;
785 cv_destroy(&zio->io_cv);
786 mutex_destroy(&zio->io_lock);
787 kmem_cache_free(zio_cache, zio);
793 zio_nowait(zio_t *zio)
795 zio_next_stage_async(zio);
799 * ==========================================================================
800 * I/O pipeline interlocks: parent/child dependency scoreboarding
801 * ==========================================================================
804 zio_wait_for_children(zio_t *zio, uint32_t stage, uint64_t *countp)
806 mutex_enter(&zio->io_lock);
808 ASSERT(zio->io_stalled == 0);
809 mutex_exit(&zio->io_lock);
812 zio->io_stalled = stage;
813 mutex_exit(&zio->io_lock);
818 zio_notify_parent(zio_t *zio, uint32_t stage, uint64_t *countp)
820 zio_t *pio = zio->io_parent;
822 mutex_enter(&pio->io_lock);
823 if (pio->io_error == 0 && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE))
824 pio->io_error = zio->io_error;
825 if (--*countp == 0 && pio->io_stalled == stage) {
827 mutex_exit(&pio->io_lock);
828 zio_next_stage_async(pio);
830 mutex_exit(&pio->io_lock);
835 zio_wait_children_ready(zio_t *zio)
837 zio_wait_for_children(zio, ZIO_STAGE_WAIT_CHILDREN_READY,
838 &zio->io_children_notready);
842 zio_wait_children_done(zio_t *zio)
844 zio_wait_for_children(zio, ZIO_STAGE_WAIT_CHILDREN_DONE,
845 &zio->io_children_notdone);
849 zio_ready(zio_t *zio)
851 zio_t *pio = zio->io_parent;
857 zio_notify_parent(zio, ZIO_STAGE_WAIT_CHILDREN_READY,
858 &pio->io_children_notready);
861 zio->io_bp_copy = *zio->io_bp;
869 zio_t *pio = zio->io_parent;
870 spa_t *spa = zio->io_spa;
871 blkptr_t *bp = zio->io_bp;
872 vdev_t *vd = zio->io_vd;
874 ASSERT(zio->io_children_notready == 0);
875 ASSERT(zio->io_children_notdone == 0);
878 ASSERT(bp->blk_pad[0] == 0);
879 ASSERT(bp->blk_pad[1] == 0);
880 ASSERT(bp->blk_pad[2] == 0);
881 ASSERT(bcmp(bp, &zio->io_bp_copy, sizeof (blkptr_t)) == 0);
882 if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(bp) &&
883 !(zio->io_flags & ZIO_FLAG_IO_REPAIR)) {
884 ASSERT(!BP_SHOULD_BYTESWAP(bp));
885 if (zio->io_ndvas != 0)
886 ASSERT3U(zio->io_ndvas, <=, BP_GET_NDVAS(bp));
887 ASSERT(BP_COUNT_GANG(bp) == 0 ||
888 (BP_COUNT_GANG(bp) == BP_GET_NDVAS(bp)));
893 vdev_stat_update(zio);
897 * If this I/O is attached to a particular vdev,
898 * generate an error message describing the I/O failure
899 * at the block level. We ignore these errors if the
900 * device is currently unavailable.
902 if (zio->io_error != ECKSUM && vd != NULL && !vdev_is_dead(vd))
903 zfs_ereport_post(FM_EREPORT_ZFS_IO,
904 zio->io_spa, vd, zio, 0, 0);
906 if ((zio->io_error == EIO ||
907 !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) &&
908 zio->io_logical == zio) {
910 * For root I/O requests, tell the SPA to log the error
911 * appropriately. Also, generate a logical data
914 spa_log_error(zio->io_spa, zio);
916 zfs_ereport_post(FM_EREPORT_ZFS_DATA,
917 zio->io_spa, NULL, zio, 0, 0);
921 * For I/O requests that cannot fail, panic appropriately.
923 if (!(zio->io_flags & ZIO_FLAG_CANFAIL)) {
926 blkbuf = kmem_alloc(BP_SPRINTF_LEN, KM_NOSLEEP);
928 sprintf_blkptr(blkbuf, BP_SPRINTF_LEN,
929 bp ? bp : &zio->io_bp_copy);
931 panic("ZFS: %s (%s on %s off %llx: zio %p %s): error "
932 "%d", zio->io_error == ECKSUM ?
933 "bad checksum" : "I/O failure",
934 zio_type_name[zio->io_type],
935 vdev_description(vd),
936 (u_longlong_t)zio->io_offset,
937 zio, blkbuf ? blkbuf : "", zio->io_error);
940 zio_clear_transform_stack(zio);
945 ASSERT(zio->io_delegate_list == NULL);
946 ASSERT(zio->io_delegate_next == NULL);
951 mutex_enter(&pio->io_lock);
952 next = zio->io_sibling_next;
953 prev = zio->io_sibling_prev;
955 next->io_sibling_prev = prev;
957 prev->io_sibling_next = next;
958 if (pio->io_child == zio)
959 pio->io_child = next;
960 mutex_exit(&pio->io_lock);
962 zio_notify_parent(zio, ZIO_STAGE_WAIT_CHILDREN_DONE,
963 &pio->io_children_notdone);
967 * Note: this I/O is now done, and will shortly be freed, so there is no
968 * need to clear this (or any other) flag.
970 if (zio->io_flags & ZIO_FLAG_CONFIG_GRABBED)
971 spa_config_exit(spa, zio);
973 if (zio->io_waiter != NULL) {
974 mutex_enter(&zio->io_lock);
975 ASSERT(zio->io_stage == ZIO_STAGE_DONE);
976 zio->io_stalled = zio->io_stage;
977 cv_broadcast(&zio->io_cv);
978 mutex_exit(&zio->io_lock);
980 cv_destroy(&zio->io_cv);
981 mutex_destroy(&zio->io_lock);
982 kmem_cache_free(zio_cache, zio);
987 * ==========================================================================
988 * Compression support
989 * ==========================================================================
992 zio_write_compress(zio_t *zio)
994 int compress = zio->io_compress;
995 blkptr_t *bp = zio->io_bp;
997 uint64_t lsize = zio->io_size;
998 uint64_t csize = lsize;
999 uint64_t cbufsize = 0;
1002 if (bp->blk_birth == zio->io_txg) {
1004 * We're rewriting an existing block, which means we're
1005 * working on behalf of spa_sync(). For spa_sync() to
1006 * converge, it must eventually be the case that we don't
1007 * have to allocate new blocks. But compression changes
1008 * the blocksize, which forces a reallocate, and makes
1009 * convergence take longer. Therefore, after the first
1010 * few passes, stop compressing to ensure convergence.
1012 pass = spa_sync_pass(zio->io_spa);
1013 if (pass > zio_sync_pass.zp_dontcompress)
1014 compress = ZIO_COMPRESS_OFF;
1016 ASSERT(BP_IS_HOLE(bp));
1020 if (compress != ZIO_COMPRESS_OFF)
1021 if (!zio_compress_data(compress, zio->io_data, zio->io_size,
1022 &cbuf, &csize, &cbufsize))
1023 compress = ZIO_COMPRESS_OFF;
1025 if (compress != ZIO_COMPRESS_OFF && csize != 0)
1026 zio_push_transform(zio, cbuf, csize, cbufsize);
1029 * The final pass of spa_sync() must be all rewrites, but the first
1030 * few passes offer a trade-off: allocating blocks defers convergence,
1031 * but newly allocated blocks are sequential, so they can be written
1032 * to disk faster. Therefore, we allow the first few passes of
1033 * spa_sync() to reallocate new blocks, but force rewrites after that.
1034 * There should only be a handful of blocks after pass 1 in any case.
1036 if (bp->blk_birth == zio->io_txg && BP_GET_PSIZE(bp) == csize &&
1037 pass > zio_sync_pass.zp_rewrite) {
1039 BP_SET_LSIZE(bp, lsize);
1040 BP_SET_COMPRESS(bp, compress);
1041 zio->io_pipeline = ZIO_REWRITE_PIPELINE;
1043 if (bp->blk_birth == zio->io_txg)
1047 zio->io_pipeline = ZIO_WAIT_FOR_CHILDREN_PIPELINE;
1049 ASSERT3U(BP_GET_NDVAS(bp), ==, 0);
1050 BP_SET_LSIZE(bp, lsize);
1051 BP_SET_PSIZE(bp, csize);
1052 BP_SET_COMPRESS(bp, compress);
1053 zio->io_pipeline = ZIO_WRITE_ALLOCATE_PIPELINE;
1057 zio_next_stage(zio);
1061 zio_read_decompress(zio_t *zio)
1063 blkptr_t *bp = zio->io_bp;
1067 int compress = BP_GET_COMPRESS(bp);
1069 ASSERT(compress != ZIO_COMPRESS_OFF);
1071 zio_pop_transform(zio, &data, &size, &bufsize);
1073 if (zio_decompress_data(compress, data, size,
1074 zio->io_data, zio->io_size))
1075 zio->io_error = EIO;
1077 zio_buf_free(data, bufsize);
1079 zio_next_stage(zio);
1083 * ==========================================================================
1084 * Gang block support
1085 * ==========================================================================
1088 zio_gang_pipeline(zio_t *zio)
1091 * By default, the pipeline assumes that we're dealing with a gang
1092 * block. If we're not, strip out any gang-specific stages.
1094 if (!BP_IS_GANG(zio->io_bp))
1095 zio->io_pipeline &= ~ZIO_GANG_STAGES;
1097 zio_next_stage(zio);
1101 zio_gang_byteswap(zio_t *zio)
1103 ASSERT(zio->io_size == SPA_GANGBLOCKSIZE);
1105 if (BP_SHOULD_BYTESWAP(zio->io_bp))
1106 byteswap_uint64_array(zio->io_data, zio->io_size);
1110 zio_get_gang_header(zio_t *zio)
1112 blkptr_t *bp = zio->io_bp;
1113 uint64_t gsize = SPA_GANGBLOCKSIZE;
1114 void *gbuf = zio_buf_alloc(gsize);
1116 ASSERT(BP_IS_GANG(bp));
1118 zio_push_transform(zio, gbuf, gsize, gsize);
1120 zio_nowait(zio_create(zio, zio->io_spa, bp->blk_birth, bp, gbuf, gsize,
1121 NULL, NULL, ZIO_TYPE_READ, zio->io_priority,
1122 zio->io_flags & ZIO_FLAG_GANG_INHERIT,
1123 ZIO_STAGE_OPEN, ZIO_READ_PIPELINE));
1125 zio_wait_children_done(zio);
1129 zio_read_gang_members(zio_t *zio)
1131 zio_gbh_phys_t *gbh;
1132 uint64_t gsize, gbufsize, loff, lsize;
1135 ASSERT(BP_IS_GANG(zio->io_bp));
1137 zio_gang_byteswap(zio);
1138 zio_pop_transform(zio, (void **)&gbh, &gsize, &gbufsize);
1140 for (loff = 0, i = 0; loff != zio->io_size; loff += lsize, i++) {
1141 blkptr_t *gbp = &gbh->zg_blkptr[i];
1142 lsize = BP_GET_PSIZE(gbp);
1144 ASSERT(BP_GET_COMPRESS(gbp) == ZIO_COMPRESS_OFF);
1145 ASSERT3U(lsize, ==, BP_GET_LSIZE(gbp));
1146 ASSERT3U(loff + lsize, <=, zio->io_size);
1147 ASSERT(i < SPA_GBH_NBLKPTRS);
1148 ASSERT(!BP_IS_HOLE(gbp));
1150 zio_nowait(zio_read(zio, zio->io_spa, gbp,
1151 (char *)zio->io_data + loff, lsize, NULL, NULL,
1152 zio->io_priority, zio->io_flags & ZIO_FLAG_GANG_INHERIT,
1153 &zio->io_bookmark));
1156 zio_buf_free(gbh, gbufsize);
1157 zio_wait_children_done(zio);
1161 zio_rewrite_gang_members(zio_t *zio)
1163 zio_gbh_phys_t *gbh;
1164 uint64_t gsize, gbufsize, loff, lsize;
1167 ASSERT(BP_IS_GANG(zio->io_bp));
1168 ASSERT3U(zio->io_size, ==, SPA_GANGBLOCKSIZE);
1170 zio_gang_byteswap(zio);
1171 zio_pop_transform(zio, (void **)&gbh, &gsize, &gbufsize);
1173 ASSERT(gsize == gbufsize);
1175 for (loff = 0, i = 0; loff != zio->io_size; loff += lsize, i++) {
1176 blkptr_t *gbp = &gbh->zg_blkptr[i];
1177 lsize = BP_GET_PSIZE(gbp);
1179 ASSERT(BP_GET_COMPRESS(gbp) == ZIO_COMPRESS_OFF);
1180 ASSERT3U(lsize, ==, BP_GET_LSIZE(gbp));
1181 ASSERT3U(loff + lsize, <=, zio->io_size);
1182 ASSERT(i < SPA_GBH_NBLKPTRS);
1183 ASSERT(!BP_IS_HOLE(gbp));
1185 zio_nowait(zio_rewrite(zio, zio->io_spa, zio->io_checksum,
1186 zio->io_txg, gbp, (char *)zio->io_data + loff, lsize,
1187 NULL, NULL, zio->io_priority, zio->io_flags,
1188 &zio->io_bookmark));
1191 zio_push_transform(zio, gbh, gsize, gbufsize);
1192 zio_wait_children_ready(zio);
1196 zio_free_gang_members(zio_t *zio)
1198 zio_gbh_phys_t *gbh;
1199 uint64_t gsize, gbufsize;
1202 ASSERT(BP_IS_GANG(zio->io_bp));
1204 zio_gang_byteswap(zio);
1205 zio_pop_transform(zio, (void **)&gbh, &gsize, &gbufsize);
1207 for (i = 0; i < SPA_GBH_NBLKPTRS; i++) {
1208 blkptr_t *gbp = &gbh->zg_blkptr[i];
1210 if (BP_IS_HOLE(gbp))
1212 zio_nowait(zio_free(zio, zio->io_spa, zio->io_txg,
1216 zio_buf_free(gbh, gbufsize);
1217 zio_next_stage(zio);
1221 zio_claim_gang_members(zio_t *zio)
1223 zio_gbh_phys_t *gbh;
1224 uint64_t gsize, gbufsize;
1227 ASSERT(BP_IS_GANG(zio->io_bp));
1229 zio_gang_byteswap(zio);
1230 zio_pop_transform(zio, (void **)&gbh, &gsize, &gbufsize);
1232 for (i = 0; i < SPA_GBH_NBLKPTRS; i++) {
1233 blkptr_t *gbp = &gbh->zg_blkptr[i];
1234 if (BP_IS_HOLE(gbp))
1236 zio_nowait(zio_claim(zio, zio->io_spa, zio->io_txg,
1240 zio_buf_free(gbh, gbufsize);
1241 zio_next_stage(zio);
1245 zio_write_allocate_gang_member_done(zio_t *zio)
1247 zio_t *pio = zio->io_parent;
1248 dva_t *cdva = zio->io_bp->blk_dva;
1249 dva_t *pdva = pio->io_bp->blk_dva;
1253 ASSERT3U(pio->io_ndvas, ==, zio->io_ndvas);
1254 ASSERT3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp));
1255 ASSERT3U(zio->io_ndvas, <=, BP_GET_NDVAS(zio->io_bp));
1256 ASSERT3U(pio->io_ndvas, <=, BP_GET_NDVAS(pio->io_bp));
1258 mutex_enter(&pio->io_lock);
1259 for (d = 0; d < BP_GET_NDVAS(pio->io_bp); d++) {
1260 ASSERT(DVA_GET_GANG(&pdva[d]));
1261 asize = DVA_GET_ASIZE(&pdva[d]);
1262 asize += DVA_GET_ASIZE(&cdva[d]);
1263 DVA_SET_ASIZE(&pdva[d], asize);
1265 mutex_exit(&pio->io_lock);
1269 zio_write_allocate_gang_members(zio_t *zio)
1271 blkptr_t *bp = zio->io_bp;
1272 dva_t *dva = bp->blk_dva;
1273 spa_t *spa = zio->io_spa;
1274 zio_gbh_phys_t *gbh;
1275 uint64_t txg = zio->io_txg;
1276 uint64_t resid = zio->io_size;
1277 uint64_t maxalloc = P2ROUNDUP(zio->io_size >> 1, SPA_MINBLOCKSIZE);
1278 uint64_t gsize, loff, lsize;
1280 int ndvas = zio->io_ndvas;
1281 int gbh_ndvas = MIN(ndvas + 1, spa_max_replication(spa));
1285 gsize = SPA_GANGBLOCKSIZE;
1286 gbps_left = SPA_GBH_NBLKPTRS;
1288 error = metaslab_alloc(spa, gsize, bp, gbh_ndvas, txg, NULL, B_FALSE);
1289 if (error == ENOSPC)
1290 panic("can't allocate gang block header");
1293 for (d = 0; d < gbh_ndvas; d++)
1294 DVA_SET_GANG(&dva[d], 1);
1296 bp->blk_birth = txg;
1298 gbh = zio_buf_alloc(gsize);
1301 /* We need to test multi-level gang blocks */
1302 if (maxalloc >= zio_gang_bang && (LBOLT & 0x1) == 0)
1303 maxalloc = MAX(maxalloc >> 2, SPA_MINBLOCKSIZE);
1305 for (loff = 0, i = 0; loff != zio->io_size;
1306 loff += lsize, resid -= lsize, gbps_left--, i++) {
1307 blkptr_t *gbp = &gbh->zg_blkptr[i];
1310 ASSERT(gbps_left != 0);
1311 maxalloc = MIN(maxalloc, resid);
1313 while (resid <= maxalloc * gbps_left) {
1314 error = metaslab_alloc(spa, maxalloc, gbp, ndvas,
1318 ASSERT3U(error, ==, ENOSPC);
1319 if (maxalloc == SPA_MINBLOCKSIZE)
1320 panic("really out of space");
1321 maxalloc = P2ROUNDUP(maxalloc >> 1, SPA_MINBLOCKSIZE);
1324 if (resid <= maxalloc * gbps_left) {
1326 BP_SET_LSIZE(gbp, lsize);
1327 BP_SET_PSIZE(gbp, lsize);
1328 BP_SET_COMPRESS(gbp, ZIO_COMPRESS_OFF);
1329 gbp->blk_birth = txg;
1330 zio_nowait(zio_rewrite(zio, spa,
1331 zio->io_checksum, txg, gbp,
1332 (char *)zio->io_data + loff, lsize,
1333 zio_write_allocate_gang_member_done, NULL,
1334 zio->io_priority, zio->io_flags,
1335 &zio->io_bookmark));
1337 lsize = P2ROUNDUP(resid / gbps_left, SPA_MINBLOCKSIZE);
1338 ASSERT(lsize != SPA_MINBLOCKSIZE);
1339 zio_nowait(zio_write_allocate(zio, spa,
1340 zio->io_checksum, txg, gbp,
1341 (char *)zio->io_data + loff, lsize,
1342 zio_write_allocate_gang_member_done, NULL,
1343 zio->io_priority, zio->io_flags));
1347 ASSERT(resid == 0 && loff == zio->io_size);
1349 zio->io_pipeline |= 1U << ZIO_STAGE_GANG_CHECKSUM_GENERATE;
1351 zio_push_transform(zio, gbh, gsize, gsize);
1353 * As much as we'd like this to be zio_wait_children_ready(),
1354 * updating our ASIZE doesn't happen until the io_done callback,
1355 * so we have to wait for that to finish in order for our BP
1358 zio_wait_children_done(zio);
1362 * ==========================================================================
1363 * Allocate and free blocks
1364 * ==========================================================================
1367 zio_dva_allocate(zio_t *zio)
1369 blkptr_t *bp = zio->io_bp;
1372 ASSERT(BP_IS_HOLE(bp));
1373 ASSERT3U(BP_GET_NDVAS(bp), ==, 0);
1374 ASSERT3U(zio->io_ndvas, >, 0);
1375 ASSERT3U(zio->io_ndvas, <=, spa_max_replication(zio->io_spa));
1377 /* For testing, make some blocks above a certain size be gang blocks */
1378 if (zio->io_size >= zio_gang_bang && (LBOLT & 0x3) == 0) {
1379 zio_write_allocate_gang_members(zio);
1383 ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp));
1385 error = metaslab_alloc(zio->io_spa, zio->io_size, bp, zio->io_ndvas,
1386 zio->io_txg, NULL, B_FALSE);
1389 bp->blk_birth = zio->io_txg;
1390 } else if (error == ENOSPC) {
1391 if (zio->io_size == SPA_MINBLOCKSIZE)
1392 panic("really, truly out of space");
1393 zio_write_allocate_gang_members(zio);
1396 zio->io_error = error;
1398 zio_next_stage(zio);
1402 zio_dva_free(zio_t *zio)
1404 blkptr_t *bp = zio->io_bp;
1406 metaslab_free(zio->io_spa, bp, zio->io_txg, B_FALSE);
1410 zio_next_stage(zio);
1414 zio_dva_claim(zio_t *zio)
1416 zio->io_error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg);
1418 zio_next_stage(zio);
1422 * ==========================================================================
1423 * Read and write to physical devices
1424 * ==========================================================================
1428 zio_vdev_io_start(zio_t *zio)
1430 vdev_t *vd = zio->io_vd;
1431 vdev_t *tvd = vd ? vd->vdev_top : NULL;
1432 blkptr_t *bp = zio->io_bp;
1436 /* The mirror_ops handle multiple DVAs in a single BP */
1437 vdev_mirror_ops.vdev_op_io_start(zio);
1441 align = 1ULL << tvd->vdev_ashift;
1443 if (zio->io_retries == 0 && vd == tvd)
1444 zio->io_flags |= ZIO_FLAG_FAILFAST;
1446 if (!(zio->io_flags & ZIO_FLAG_PHYSICAL) &&
1447 vd->vdev_children == 0) {
1448 zio->io_flags |= ZIO_FLAG_PHYSICAL;
1449 zio->io_offset += VDEV_LABEL_START_SIZE;
1452 if (P2PHASE(zio->io_size, align) != 0) {
1453 uint64_t asize = P2ROUNDUP(zio->io_size, align);
1454 char *abuf = zio_buf_alloc(asize);
1456 if (zio->io_type == ZIO_TYPE_WRITE) {
1457 bcopy(zio->io_data, abuf, zio->io_size);
1458 bzero(abuf + zio->io_size, asize - zio->io_size);
1460 zio_push_transform(zio, abuf, asize, asize);
1461 ASSERT(!(zio->io_flags & ZIO_FLAG_SUBBLOCK));
1462 zio->io_flags |= ZIO_FLAG_SUBBLOCK;
1465 ASSERT(P2PHASE(zio->io_offset, align) == 0);
1466 ASSERT(P2PHASE(zio->io_size, align) == 0);
1467 ASSERT(bp == NULL ||
1468 P2ROUNDUP(ZIO_GET_IOSIZE(zio), align) == zio->io_size);
1469 ASSERT(zio->io_type != ZIO_TYPE_WRITE || (spa_mode & FWRITE));
1473 /* zio_next_stage_async() gets called from io completion interrupt */
1477 zio_vdev_io_done(zio_t *zio)
1479 if (zio->io_vd == NULL)
1480 /* The mirror_ops handle multiple DVAs in a single BP */
1481 vdev_mirror_ops.vdev_op_io_done(zio);
1488 zio_should_retry(zio_t *zio)
1490 vdev_t *vd = zio->io_vd;
1492 if (zio->io_error == 0)
1494 if (zio->io_delegate_list != NULL)
1496 if (vd && vd != vd->vdev_top)
1498 if (zio->io_flags & ZIO_FLAG_DONT_RETRY)
1500 if (zio->io_retries > 0)
1507 zio_vdev_io_assess(zio_t *zio)
1509 vdev_t *vd = zio->io_vd;
1510 vdev_t *tvd = vd ? vd->vdev_top : NULL;
1512 ASSERT(zio->io_vsd == NULL);
1514 if (zio->io_flags & ZIO_FLAG_SUBBLOCK) {
1518 zio_pop_transform(zio, &abuf, &asize, &asize);
1519 if (zio->io_type == ZIO_TYPE_READ)
1520 bcopy(abuf, zio->io_data, zio->io_size);
1521 zio_buf_free(abuf, asize);
1522 zio->io_flags &= ~ZIO_FLAG_SUBBLOCK;
1525 if (zio_injection_enabled && !zio->io_error)
1526 zio->io_error = zio_handle_fault_injection(zio, EIO);
1529 * If the I/O failed, determine whether we should attempt to retry it.
1532 if (zio_should_retry(zio)) {
1537 zio->io_flags &= ZIO_FLAG_VDEV_INHERIT |
1538 ZIO_FLAG_CONFIG_GRABBED;
1540 zio->io_flags &= ~ZIO_FLAG_FAILFAST;
1541 zio->io_flags |= ZIO_FLAG_DONT_CACHE;
1542 zio->io_stage = ZIO_STAGE_VDEV_IO_START - 1;
1544 dprintf("retry #%d for %s to %s offset %llx\n",
1545 zio->io_retries, zio_type_name[zio->io_type],
1546 vdev_description(vd), zio->io_offset);
1548 zio_next_stage_async(zio);
1552 if (zio->io_error != 0 && zio->io_error != ECKSUM &&
1553 !(zio->io_flags & ZIO_FLAG_SPECULATIVE) && vd) {
1555 * Poor man's hotplug support. Even if we're done retrying this
1556 * I/O, try to reopen the vdev to see if it's still attached.
1557 * To avoid excessive thrashing, we only try it once a minute.
1558 * This also has the effect of detecting when missing devices
1559 * have come back, by polling the device once a minute.
1561 * We need to do this asynchronously because we can't grab
1562 * all the necessary locks way down here.
1564 if (gethrtime() - vd->vdev_last_try > 60ULL * NANOSEC) {
1565 vd->vdev_last_try = gethrtime();
1566 tvd->vdev_reopen_wanted = 1;
1567 spa_async_request(vd->vdev_spa, SPA_ASYNC_REOPEN);
1571 zio_next_stage(zio);
1575 zio_vdev_io_reissue(zio_t *zio)
1577 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
1578 ASSERT(zio->io_error == 0);
1584 zio_vdev_io_redone(zio_t *zio)
1586 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE);
1592 zio_vdev_io_bypass(zio_t *zio)
1594 ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
1595 ASSERT(zio->io_error == 0);
1597 zio->io_flags |= ZIO_FLAG_IO_BYPASS;
1598 zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS - 1;
1602 * ==========================================================================
1603 * Generate and verify checksums
1604 * ==========================================================================
1607 zio_checksum_generate(zio_t *zio)
1609 int checksum = zio->io_checksum;
1610 blkptr_t *bp = zio->io_bp;
1612 ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp));
1614 BP_SET_CHECKSUM(bp, checksum);
1615 BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
1617 zio_checksum(checksum, &bp->blk_cksum, zio->io_data, zio->io_size);
1619 zio_next_stage(zio);
1623 zio_gang_checksum_generate(zio_t *zio)
1626 zio_gbh_phys_t *gbh = zio->io_data;
1628 ASSERT(BP_IS_GANG(zio->io_bp));
1629 ASSERT3U(zio->io_size, ==, SPA_GANGBLOCKSIZE);
1631 zio_set_gang_verifier(zio, &gbh->zg_tail.zbt_cksum);
1633 zio_checksum(ZIO_CHECKSUM_GANG_HEADER, &zc, zio->io_data, zio->io_size);
1635 zio_next_stage(zio);
1639 zio_checksum_verify(zio_t *zio)
1641 if (zio->io_bp != NULL) {
1642 zio->io_error = zio_checksum_error(zio);
1643 if (zio->io_error && !(zio->io_flags & ZIO_FLAG_SPECULATIVE))
1644 zfs_ereport_post(FM_EREPORT_ZFS_CHECKSUM,
1645 zio->io_spa, zio->io_vd, zio, 0, 0);
1648 zio_next_stage(zio);
1652 * Called by RAID-Z to ensure we don't compute the checksum twice.
1655 zio_checksum_verified(zio_t *zio)
1657 zio->io_pipeline &= ~(1U << ZIO_STAGE_CHECKSUM_VERIFY);
1661 * Set the external verifier for a gang block based on stuff in the bp
1664 zio_set_gang_verifier(zio_t *zio, zio_cksum_t *zcp)
1666 blkptr_t *bp = zio->io_bp;
1668 zcp->zc_word[0] = DVA_GET_VDEV(BP_IDENTITY(bp));
1669 zcp->zc_word[1] = DVA_GET_OFFSET(BP_IDENTITY(bp));
1670 zcp->zc_word[2] = bp->blk_birth;
1671 zcp->zc_word[3] = 0;
1675 * ==========================================================================
1676 * Define the pipeline
1677 * ==========================================================================
1679 typedef void zio_pipe_stage_t(zio_t *zio);
1682 zio_badop(zio_t *zio)
1684 panic("Invalid I/O pipeline stage %u for zio %p", zio->io_stage, zio);
1687 zio_pipe_stage_t *zio_pipeline[ZIO_STAGE_DONE + 2] = {
1689 zio_wait_children_ready,
1691 zio_checksum_generate,
1693 zio_get_gang_header,
1694 zio_rewrite_gang_members,
1695 zio_free_gang_members,
1696 zio_claim_gang_members,
1700 zio_gang_checksum_generate,
1705 zio_wait_children_done,
1706 zio_checksum_verify,
1707 zio_read_gang_members,
1708 zio_read_decompress,
1714 * Move an I/O to the next stage of the pipeline and execute that stage.
1715 * There's no locking on io_stage because there's no legitimate way for
1716 * multiple threads to be attempting to process the same I/O.
1719 zio_next_stage(zio_t *zio)
1721 uint32_t pipeline = zio->io_pipeline;
1723 ASSERT(!MUTEX_HELD(&zio->io_lock));
1725 if (zio->io_error) {
1726 dprintf("zio %p vdev %s offset %llx stage %d error %d\n",
1727 zio, vdev_description(zio->io_vd),
1728 zio->io_offset, zio->io_stage, zio->io_error);
1729 if (((1U << zio->io_stage) & ZIO_VDEV_IO_PIPELINE) == 0)
1730 pipeline &= ZIO_ERROR_PIPELINE_MASK;
1733 while (((1U << ++zio->io_stage) & pipeline) == 0)
1736 ASSERT(zio->io_stage <= ZIO_STAGE_DONE);
1737 ASSERT(zio->io_stalled == 0);
1740 * See the comment in zio_next_stage_async() about per-CPU taskqs.
1742 if (((1U << zio->io_stage) & zio->io_async_stages) &&
1743 (zio->io_stage == ZIO_STAGE_WRITE_COMPRESS) &&
1744 !(zio->io_flags & ZIO_FLAG_METADATA)) {
1745 taskq_t *tq = zio->io_spa->spa_zio_issue_taskq[zio->io_type];
1746 (void) taskq_dispatch(tq,
1747 (task_func_t *)zio_pipeline[zio->io_stage], zio, TQ_SLEEP);
1749 zio_pipeline[zio->io_stage](zio);
1754 zio_next_stage_async(zio_t *zio)
1757 uint32_t pipeline = zio->io_pipeline;
1759 ASSERT(!MUTEX_HELD(&zio->io_lock));
1761 if (zio->io_error) {
1762 dprintf("zio %p vdev %s offset %llx stage %d error %d\n",
1763 zio, vdev_description(zio->io_vd),
1764 zio->io_offset, zio->io_stage, zio->io_error);
1765 if (((1U << zio->io_stage) & ZIO_VDEV_IO_PIPELINE) == 0)
1766 pipeline &= ZIO_ERROR_PIPELINE_MASK;
1769 while (((1U << ++zio->io_stage) & pipeline) == 0)
1772 ASSERT(zio->io_stage <= ZIO_STAGE_DONE);
1773 ASSERT(zio->io_stalled == 0);
1776 * For performance, we'll probably want two sets of task queues:
1777 * per-CPU issue taskqs and per-CPU completion taskqs. The per-CPU
1778 * part is for read performance: since we have to make a pass over
1779 * the data to checksum it anyway, we want to do this on the same CPU
1780 * that issued the read, because (assuming CPU scheduling affinity)
1781 * that thread is probably still there. Getting this optimization
1782 * right avoids performance-hostile cache-to-cache transfers.
1784 * Note that having two sets of task queues is also necessary for
1785 * correctness: if all of the issue threads get bogged down waiting
1786 * for dependent reads (e.g. metaslab freelist) to complete, then
1787 * there won't be any threads available to service I/O completion
1790 if ((1U << zio->io_stage) & zio->io_async_stages) {
1791 if (zio->io_stage < ZIO_STAGE_VDEV_IO_DONE)
1792 tq = zio->io_spa->spa_zio_issue_taskq[zio->io_type];
1794 tq = zio->io_spa->spa_zio_intr_taskq[zio->io_type];
1795 (void) taskq_dispatch(tq,
1796 (task_func_t *)zio_pipeline[zio->io_stage], zio, TQ_SLEEP);
1798 zio_pipeline[zio->io_stage](zio);
1803 zio_alloc_should_fail(void)
1805 static uint16_t allocs = 0;
1807 return (P2PHASE(allocs++, 1U<<zio_zil_fail_shift) == 0);
1811 * Try to allocate an intent log block. Return 0 on success, errno on failure.
1814 zio_alloc_blk(spa_t *spa, uint64_t size, blkptr_t *new_bp, blkptr_t *old_bp,
1819 spa_config_enter(spa, RW_READER, FTAG);
1821 if (zio_zil_fail_shift && zio_alloc_should_fail()) {
1822 spa_config_exit(spa, FTAG);
1827 * We were passed the previous log blocks dva_t in bp->blk_dva[0].
1829 error = metaslab_alloc(spa, size, new_bp, 1, txg, old_bp, B_TRUE);
1832 BP_SET_LSIZE(new_bp, size);
1833 BP_SET_PSIZE(new_bp, size);
1834 BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF);
1835 BP_SET_CHECKSUM(new_bp, ZIO_CHECKSUM_ZILOG);
1836 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG);
1837 BP_SET_LEVEL(new_bp, 0);
1838 BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER);
1839 new_bp->blk_birth = txg;
1842 spa_config_exit(spa, FTAG);
1848 * Free an intent log block. We know it can't be a gang block, so there's
1849 * nothing to do except metaslab_free() it.
1852 zio_free_blk(spa_t *spa, blkptr_t *bp, uint64_t txg)
1854 ASSERT(!BP_IS_GANG(bp));
1856 spa_config_enter(spa, RW_READER, FTAG);
1858 metaslab_free(spa, bp, txg, B_FALSE);
1860 spa_config_exit(spa, FTAG);