4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
23 * All rights reserved.
25 * Portions Copyright (c) 2012 Martin Matuska <mm@FreeBSD.org>
28 #include <sys/zfs_context.h>
29 #include <sys/param.h>
30 #include <sys/kernel.h>
34 #include <sys/spa_impl.h>
35 #include <sys/vdev_impl.h>
36 #include <sys/fs/zfs.h>
38 #include <geom/geom.h>
39 #include <geom/geom_int.h>
42 * Virtual device vector for GEOM.
45 static g_attrchanged_t vdev_geom_attrchanged;
46 struct g_class zfs_vdev_class = {
49 .attrchanged = vdev_geom_attrchanged,
52 DECLARE_GEOM_CLASS(zfs_vdev_class, zfs_vdev);
54 SYSCTL_DECL(_vfs_zfs_vdev);
55 /* Don't send BIO_FLUSH. */
56 static int vdev_geom_bio_flush_disable = 0;
57 TUNABLE_INT("vfs.zfs.vdev.bio_flush_disable", &vdev_geom_bio_flush_disable);
58 SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, bio_flush_disable, CTLFLAG_RW,
59 &vdev_geom_bio_flush_disable, 0, "Disable BIO_FLUSH");
60 /* Don't send BIO_DELETE. */
61 static int vdev_geom_bio_delete_disable = 0;
62 TUNABLE_INT("vfs.zfs.vdev.bio_delete_disable", &vdev_geom_bio_delete_disable);
63 SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, bio_delete_disable, CTLFLAG_RW,
64 &vdev_geom_bio_delete_disable, 0, "Disable BIO_DELETE");
67 * Thread local storage used to indicate when a thread is probing geoms
68 * for their guids. If NULL, this thread is not tasting geoms. If non NULL,
69 * it is looking for a replacement for the vdev_t* that is its value.
71 uint_t zfs_geom_probe_vdev_key;
74 vdev_geom_set_rotation_rate(vdev_t *vd, struct g_consumer *cp)
79 error = g_getattr("GEOM::rotation_rate", cp, &rate);
81 vd->vdev_rotation_rate = rate;
83 vd->vdev_rotation_rate = VDEV_RATE_UNKNOWN;
87 vdev_geom_attrchanged(struct g_consumer *cp, const char *attr)
92 int error, physpath_len;
98 if (strcmp(attr, "GEOM::rotation_rate") == 0) {
99 vdev_geom_set_rotation_rate(vd, cp);
103 if (strcmp(attr, "GEOM::physpath") != 0)
106 if (g_access(cp, 1, 0, 0) != 0)
110 * Record/Update physical path information for this device.
113 physpath_len = MAXPATHLEN;
114 physpath = g_malloc(physpath_len, M_WAITOK|M_ZERO);
115 error = g_io_getattr("GEOM::physpath", cp, &physpath_len, physpath);
116 g_access(cp, -1, 0, 0);
120 /* g_topology lock ensures that vdev has not been closed */
122 old_physpath = vd->vdev_physpath;
123 vd->vdev_physpath = spa_strdup(physpath);
124 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
126 if (old_physpath != NULL)
127 spa_strfree(old_physpath);
133 vdev_geom_orphan(struct g_consumer *cp)
141 /* Vdev close in progress. Ignore the event. */
146 * Orphan callbacks occur from the GEOM event thread.
147 * Concurrent with this call, new I/O requests may be
148 * working their way through GEOM about to find out
149 * (only once executed by the g_down thread) that we've
150 * been orphaned from our disk provider. These I/Os
151 * must be retired before we can detach our consumer.
152 * This is most easily achieved by acquiring the
153 * SPA ZIO configuration lock as a writer, but doing
154 * so with the GEOM topology lock held would cause
155 * a lock order reversal. Instead, rely on the SPA's
156 * async removal support to invoke a close on this
157 * vdev once it is safe to do so.
159 vd->vdev_remove_wanted = B_TRUE;
160 spa_async_request(vd->vdev_spa, SPA_ASYNC_REMOVE);
163 static struct g_consumer *
164 vdev_geom_attach(struct g_provider *pp, vdev_t *vd)
167 struct g_consumer *cp;
172 ZFS_LOG(1, "Attaching to %s.", pp->name);
173 /* Do we have geom already? No? Create one. */
174 LIST_FOREACH(gp, &zfs_vdev_class.geom, geom) {
175 if (gp->flags & G_GEOM_WITHER)
177 if (strcmp(gp->name, "zfs::vdev") != 0)
182 gp = g_new_geomf(&zfs_vdev_class, "zfs::vdev");
183 gp->orphan = vdev_geom_orphan;
184 gp->attrchanged = vdev_geom_attrchanged;
185 cp = g_new_consumer(gp);
186 error = g_attach(cp, pp);
188 ZFS_LOG(1, "%s(%d): g_attach failed: %d\n", __func__,
190 g_wither_geom(gp, ENXIO);
193 error = g_access(cp, 1, 0, 1);
195 ZFS_LOG(1, "%s(%d): g_access failed: %d\n", __func__,
197 g_wither_geom(gp, ENXIO);
200 ZFS_LOG(1, "Created geom and consumer for %s.", pp->name);
202 /* Check if we are already connected to this provider. */
203 LIST_FOREACH(cp, &gp->consumer, consumer) {
204 if (cp->provider == pp) {
205 ZFS_LOG(1, "Found consumer for %s.", pp->name);
210 cp = g_new_consumer(gp);
211 error = g_attach(cp, pp);
213 ZFS_LOG(1, "%s(%d): g_attach failed: %d\n",
214 __func__, __LINE__, error);
215 g_destroy_consumer(cp);
218 error = g_access(cp, 1, 0, 1);
220 ZFS_LOG(1, "%s(%d): g_access failed: %d\n",
221 __func__, __LINE__, error);
223 g_destroy_consumer(cp);
226 ZFS_LOG(1, "Created consumer for %s.", pp->name);
228 error = g_access(cp, 1, 0, 1);
230 ZFS_LOG(1, "%s(%d): g_access failed: %d\n",
231 __func__, __LINE__, error);
234 ZFS_LOG(1, "Used existing consumer for %s.", pp->name);
239 * BUG: cp may already belong to a vdev. This could happen if:
240 * 1) That vdev is a shared spare, or
241 * 2) We are trying to reopen a missing vdev and we are scanning by
242 * guid. In that case, we'll ultimately fail to open this consumer,
243 * but not until after setting the private field.
244 * The solution is to:
245 * 1) Don't set the private field until after the open succeeds, and
246 * 2) Set it to a linked list of vdevs, not just a single vdev
251 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
256 vdev_geom_close_locked(vdev_t *vd)
259 struct g_consumer *cp;
267 ZFS_LOG(1, "Closing access to %s.", cp->provider->name);
268 KASSERT(vd->vdev_tsd == cp, ("%s: vdev_tsd is not cp", __func__));
270 vd->vdev_delayed_close = B_FALSE;
274 g_access(cp, -1, 0, -1);
275 /* Destroy consumer on last close. */
276 if (cp->acr == 0 && cp->ace == 0) {
278 g_access(cp, 0, -cp->acw, 0);
279 if (cp->provider != NULL) {
280 ZFS_LOG(1, "Destroyed consumer to %s.",
284 g_destroy_consumer(cp);
286 /* Destroy geom if there are no consumers left. */
287 if (LIST_EMPTY(&gp->consumer)) {
288 ZFS_LOG(1, "Destroyed geom %s.", gp->name);
289 g_wither_geom(gp, ENXIO);
294 nvlist_get_guids(nvlist_t *list, uint64_t *pguid, uint64_t *vguid)
297 (void) nvlist_lookup_uint64(list, ZPOOL_CONFIG_GUID, vguid);
298 (void) nvlist_lookup_uint64(list, ZPOOL_CONFIG_POOL_GUID, pguid);
302 vdev_geom_io(struct g_consumer *cp, int cmd, void *data, off_t offset, off_t size)
309 ASSERT((offset % cp->provider->sectorsize) == 0);
310 ASSERT((size % cp->provider->sectorsize) == 0);
316 maxio = MAXPHYS - (MAXPHYS % cp->provider->sectorsize);
319 for (; off < offset; off += maxio, p += maxio, size -= maxio) {
320 bzero(bp, sizeof(*bp));
323 bp->bio_offset = off;
324 bp->bio_length = MIN(size, maxio);
326 g_io_request(bp, cp);
327 error = biowait(bp, "vdev_geom_io");
337 vdev_geom_taste_orphan(struct g_consumer *cp)
339 ZFS_LOG(0, "WARNING: Orphan %s while tasting its VDev GUID.",
344 vdev_geom_read_config(struct g_consumer *cp, nvlist_t **config)
346 struct g_provider *pp;
355 g_topology_assert_not();
358 ZFS_LOG(1, "Reading config from %s...", pp->name);
360 psize = pp->mediasize;
361 psize = P2ALIGN(psize, (uint64_t)sizeof(vdev_label_t));
363 size = sizeof(*label) + pp->sectorsize -
364 ((sizeof(*label) - 1) % pp->sectorsize) - 1;
366 label = kmem_alloc(size, KM_SLEEP);
367 buflen = sizeof(label->vl_vdev_phys.vp_nvlist);
370 for (l = 0; l < VDEV_LABELS; l++) {
372 offset = vdev_label_offset(psize, l, 0);
373 if ((offset % pp->sectorsize) != 0)
376 if (vdev_geom_io(cp, BIO_READ, label, offset, size) != 0)
378 buf = label->vl_vdev_phys.vp_nvlist;
380 if (nvlist_unpack(buf, buflen, config, 0) != 0)
383 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
384 &state) != 0 || state > POOL_STATE_L2CACHE) {
385 nvlist_free(*config);
390 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
391 (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
392 &txg) != 0 || txg == 0)) {
393 nvlist_free(*config);
401 kmem_free(label, size);
402 return (*config == NULL ? ENOENT : 0);
406 resize_configs(nvlist_t ***configs, uint64_t *count, uint64_t id)
408 nvlist_t **new_configs;
413 new_configs = kmem_zalloc((id + 1) * sizeof(nvlist_t *),
415 for (i = 0; i < *count; i++)
416 new_configs[i] = (*configs)[i];
417 if (*configs != NULL)
418 kmem_free(*configs, *count * sizeof(void *));
419 *configs = new_configs;
424 process_vdev_config(nvlist_t ***configs, uint64_t *count, nvlist_t *cfg,
425 const char *name, uint64_t* known_pool_guid)
429 uint64_t vdev_guid, known_guid;
430 uint64_t id, txg, known_txg;
434 if (nvlist_lookup_string(cfg, ZPOOL_CONFIG_POOL_NAME, &pname) != 0 ||
435 strcmp(pname, name) != 0)
438 if (nvlist_lookup_uint64(cfg, ZPOOL_CONFIG_POOL_GUID, &pool_guid) != 0)
441 if (nvlist_lookup_uint64(cfg, ZPOOL_CONFIG_TOP_GUID, &vdev_guid) != 0)
444 if (nvlist_lookup_nvlist(cfg, ZPOOL_CONFIG_VDEV_TREE, &vdev_tree) != 0)
447 if (nvlist_lookup_uint64(vdev_tree, ZPOOL_CONFIG_ID, &id) != 0)
450 VERIFY(nvlist_lookup_uint64(cfg, ZPOOL_CONFIG_POOL_TXG, &txg) == 0);
452 if (*known_pool_guid != 0) {
453 if (pool_guid != *known_pool_guid)
456 *known_pool_guid = pool_guid;
458 resize_configs(configs, count, id);
460 if ((*configs)[id] != NULL) {
461 VERIFY(nvlist_lookup_uint64((*configs)[id],
462 ZPOOL_CONFIG_POOL_TXG, &known_txg) == 0);
463 if (txg <= known_txg)
465 nvlist_free((*configs)[id]);
468 (*configs)[id] = cfg;
476 vdev_geom_attach_taster(struct g_consumer *cp, struct g_provider *pp)
480 if (pp->flags & G_PF_WITHER)
483 error = g_access(cp, 1, 0, 0);
485 if (pp->sectorsize > VDEV_PAD_SIZE || !ISP2(pp->sectorsize))
487 else if (pp->mediasize < SPA_MINDEVSIZE)
490 g_access(cp, -1, 0, 0);
498 vdev_geom_detach_taster(struct g_consumer *cp)
500 g_access(cp, -1, 0, 0);
505 vdev_geom_read_pool_label(const char *name,
506 nvlist_t ***configs, uint64_t *count)
509 struct g_geom *gp, *zgp;
510 struct g_provider *pp;
511 struct g_consumer *zcp;
519 zgp = g_new_geomf(&zfs_vdev_class, "zfs::vdev::taste");
520 /* This orphan function should be never called. */
521 zgp->orphan = vdev_geom_taste_orphan;
522 zcp = g_new_consumer(zgp);
527 LIST_FOREACH(mp, &g_classes, class) {
528 if (mp == &zfs_vdev_class)
530 LIST_FOREACH(gp, &mp->geom, geom) {
531 if (gp->flags & G_GEOM_WITHER)
533 LIST_FOREACH(pp, &gp->provider, provider) {
534 if (pp->flags & G_PF_WITHER)
536 if (vdev_geom_attach_taster(zcp, pp) != 0)
539 error = vdev_geom_read_config(zcp, &vdev_cfg);
541 vdev_geom_detach_taster(zcp);
544 ZFS_LOG(1, "successfully read vdev config");
546 process_vdev_config(configs, count,
547 vdev_cfg, name, &pool_guid);
552 g_destroy_consumer(zcp);
557 return (*count > 0 ? 0 : ENOENT);
561 vdev_geom_read_guids(struct g_consumer *cp, uint64_t *pguid, uint64_t *vguid)
565 g_topology_assert_not();
569 if (vdev_geom_read_config(cp, &config) == 0) {
570 nvlist_get_guids(config, pguid, vguid);
575 static struct g_consumer *
576 vdev_geom_attach_by_guids(vdev_t *vd)
579 struct g_geom *gp, *zgp;
580 struct g_provider *pp;
581 struct g_consumer *cp, *zcp;
582 uint64_t pguid, vguid;
586 zgp = g_new_geomf(&zfs_vdev_class, "zfs::vdev::taste");
587 zgp->orphan = vdev_geom_taste_orphan;
588 zcp = g_new_consumer(zgp);
591 LIST_FOREACH(mp, &g_classes, class) {
592 if (mp == &zfs_vdev_class)
594 LIST_FOREACH(gp, &mp->geom, geom) {
595 if (gp->flags & G_GEOM_WITHER)
597 LIST_FOREACH(pp, &gp->provider, provider) {
598 if (vdev_geom_attach_taster(zcp, pp) != 0)
601 vdev_geom_read_guids(zcp, &pguid, &vguid);
603 vdev_geom_detach_taster(zcp);
605 * Check that the label's vdev guid matches the
606 * desired guid. If the label has a pool guid,
607 * check that it matches too. (Inactive spares
608 * and L2ARCs do not have any pool guid in the
612 pguid != spa_guid(vd->vdev_spa)) ||
613 vguid != vd->vdev_guid)
615 cp = vdev_geom_attach(pp, vd);
617 printf("ZFS WARNING: Unable to "
618 "attach to %s.\n", pp->name);
630 g_destroy_consumer(zcp);
635 static struct g_consumer *
636 vdev_geom_open_by_guids(vdev_t *vd)
638 struct g_consumer *cp;
644 ZFS_LOG(1, "Searching by guids [%ju:%ju].",
645 (uintmax_t)spa_guid(vd->vdev_spa), (uintmax_t)vd->vdev_guid);
646 cp = vdev_geom_attach_by_guids(vd);
648 len = strlen(cp->provider->name) + strlen("/dev/") + 1;
649 buf = kmem_alloc(len, KM_SLEEP);
651 snprintf(buf, len, "/dev/%s", cp->provider->name);
652 spa_strfree(vd->vdev_path);
655 ZFS_LOG(1, "Attach by guid [%ju:%ju] succeeded, provider %s.",
656 (uintmax_t)spa_guid(vd->vdev_spa),
657 (uintmax_t)vd->vdev_guid, vd->vdev_path);
659 ZFS_LOG(1, "Search by guid [%ju:%ju] failed.",
660 (uintmax_t)spa_guid(vd->vdev_spa),
661 (uintmax_t)vd->vdev_guid);
667 static struct g_consumer *
668 vdev_geom_open_by_path(vdev_t *vd, int check_guid)
670 struct g_provider *pp;
671 struct g_consumer *cp;
672 uint64_t pguid, vguid;
677 pp = g_provider_by_name(vd->vdev_path + sizeof("/dev/") - 1);
679 ZFS_LOG(1, "Found provider by name %s.", vd->vdev_path);
680 cp = vdev_geom_attach(pp, vd);
681 if (cp != NULL && check_guid && ISP2(pp->sectorsize) &&
682 pp->sectorsize <= VDEV_PAD_SIZE) {
684 vdev_geom_read_guids(cp, &pguid, &vguid);
687 * Check that the label's vdev guid matches the
688 * desired guid. If the label has a pool guid,
689 * check that it matches too. (Inactive spares
690 * and L2ARCs do not have any pool guid in the
694 pguid != spa_guid(vd->vdev_spa)) ||
695 vguid != vd->vdev_guid) {
696 vdev_geom_close_locked(vd);
698 ZFS_LOG(1, "guid mismatch for provider %s: "
699 "%ju:%ju != %ju:%ju.", vd->vdev_path,
700 (uintmax_t)spa_guid(vd->vdev_spa),
701 (uintmax_t)vd->vdev_guid,
702 (uintmax_t)pguid, (uintmax_t)vguid);
704 ZFS_LOG(1, "guid match for provider %s.",
714 vdev_geom_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
715 uint64_t *logical_ashift, uint64_t *physical_ashift)
717 struct g_provider *pp;
718 struct g_consumer *cp;
722 /* Set the TLS to indicate downstack that we should not access zvols*/
723 VERIFY(tsd_set(zfs_geom_probe_vdev_key, vd) == 0);
726 * We must have a pathname, and it must be absolute.
728 if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') {
729 vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
739 if (vd->vdev_spa->spa_splitting_newspa ||
740 (vd->vdev_prevstate == VDEV_STATE_UNKNOWN &&
741 vd->vdev_spa->spa_load_state == SPA_LOAD_NONE ||
742 vd->vdev_spa->spa_load_state == SPA_LOAD_CREATE)) {
744 * We are dealing with a vdev that hasn't been previously
745 * opened (since boot), and we are not loading an
746 * existing pool configuration. This looks like a
747 * vdev add operation to a new or existing pool.
748 * Assume the user knows what he/she is doing and find
749 * GEOM provider by its name, ignoring GUID mismatches.
751 * XXPOLICY: It would be safer to only allow a device
752 * that is unlabeled or labeled but missing
753 * GUID information to be opened in this fashion,
754 * unless we are doing a split, in which case we
755 * should allow any guid.
757 cp = vdev_geom_open_by_path(vd, 0);
760 * Try using the recorded path for this device, but only
761 * accept it if its label data contains the expected GUIDs.
763 cp = vdev_geom_open_by_path(vd, 1);
766 * The device at vd->vdev_path doesn't have the
767 * expected GUIDs. The disks might have merely
768 * moved around so try all other GEOM providers
769 * to find one with the right GUIDs.
771 cp = vdev_geom_open_by_guids(vd);
775 /* Clear the TLS now that tasting is done */
776 VERIFY(tsd_set(zfs_geom_probe_vdev_key, NULL) == 0);
779 ZFS_LOG(1, "Provider %s not found.", vd->vdev_path);
781 } else if (cp->provider->sectorsize > VDEV_PAD_SIZE ||
782 !ISP2(cp->provider->sectorsize)) {
783 ZFS_LOG(1, "Provider %s has unsupported sectorsize.",
786 vdev_geom_close_locked(vd);
789 } else if (cp->acw == 0 && (spa_mode(vd->vdev_spa) & FWRITE) != 0) {
792 for (i = 0; i < 5; i++) {
793 error = g_access(cp, 0, 1, 0);
797 tsleep(vd, 0, "vdev", hz / 2);
801 printf("ZFS WARNING: Unable to open %s for writing (error=%d).\n",
802 vd->vdev_path, error);
803 vdev_geom_close_locked(vd);
808 /* Fetch initial physical path information for this device. */
810 vdev_geom_attrchanged(cp, "GEOM::physpath");
815 vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
821 * Determine the actual size of the device.
823 *max_psize = *psize = pp->mediasize;
826 * Determine the device's minimum transfer size and preferred
829 *logical_ashift = highbit(MAX(pp->sectorsize, SPA_MINBLOCKSIZE)) - 1;
830 *physical_ashift = 0;
831 if (pp->stripesize > (1 << *logical_ashift) && ISP2(pp->stripesize) &&
832 pp->stripesize <= (1 << SPA_MAXASHIFT) && pp->stripeoffset == 0)
833 *physical_ashift = highbit(pp->stripesize) - 1;
836 * Clear the nowritecache settings, so that on a vdev_reopen()
839 vd->vdev_nowritecache = B_FALSE;
842 * Determine the device's rotation rate.
844 vdev_geom_set_rotation_rate(vd, cp);
850 vdev_geom_close(vdev_t *vd)
855 vdev_geom_close_locked(vd);
861 vdev_geom_io_intr(struct bio *bp)
866 zio = bp->bio_caller1;
868 zio->io_error = bp->bio_error;
869 if (zio->io_error == 0 && bp->bio_resid != 0)
870 zio->io_error = SET_ERROR(EIO);
872 switch(zio->io_error) {
875 * If we get ENOTSUP for BIO_FLUSH or BIO_DELETE we know
876 * that future attempts will never succeed. In this case
877 * we set a persistent flag so that we don't bother with
878 * requests in the future.
880 switch(bp->bio_cmd) {
882 vd->vdev_nowritecache = B_TRUE;
885 vd->vdev_notrim = B_TRUE;
890 if (!vd->vdev_remove_wanted) {
892 * If provider's error is set we assume it is being
895 if (bp->bio_to->error != 0) {
896 vd->vdev_remove_wanted = B_TRUE;
897 spa_async_request(zio->io_spa,
899 } else if (!vd->vdev_delayed_close) {
900 vd->vdev_delayed_close = B_TRUE;
906 zio_delay_interrupt(zio);
910 vdev_geom_io_start(zio_t *zio)
913 struct g_consumer *cp;
919 switch (zio->io_type) {
922 if (!vdev_readable(vd)) {
923 zio->io_error = SET_ERROR(ENXIO);
927 switch (zio->io_cmd) {
928 case DKIOCFLUSHWRITECACHE:
929 if (zfs_nocacheflush || vdev_geom_bio_flush_disable)
931 if (vd->vdev_nowritecache) {
932 zio->io_error = SET_ERROR(ENOTSUP);
937 zio->io_error = SET_ERROR(ENOTSUP);
944 if (vd->vdev_notrim) {
945 zio->io_error = SET_ERROR(ENOTSUP);
946 } else if (!vdev_geom_bio_delete_disable) {
953 ASSERT(zio->io_type == ZIO_TYPE_READ ||
954 zio->io_type == ZIO_TYPE_WRITE ||
955 zio->io_type == ZIO_TYPE_FREE ||
956 zio->io_type == ZIO_TYPE_IOCTL);
960 zio->io_error = SET_ERROR(ENXIO);
965 bp->bio_caller1 = zio;
966 switch (zio->io_type) {
969 zio->io_target_timestamp = zio_handle_io_delay(zio);
970 bp->bio_cmd = zio->io_type == ZIO_TYPE_READ ? BIO_READ : BIO_WRITE;
971 bp->bio_data = zio->io_data;
972 bp->bio_offset = zio->io_offset;
973 bp->bio_length = zio->io_size;
976 bp->bio_cmd = BIO_DELETE;
978 bp->bio_offset = zio->io_offset;
979 bp->bio_length = zio->io_size;
982 bp->bio_cmd = BIO_FLUSH;
983 bp->bio_flags |= BIO_ORDERED;
985 bp->bio_offset = cp->provider->mediasize;
989 bp->bio_done = vdev_geom_io_intr;
991 g_io_request(bp, cp);
995 vdev_geom_io_done(zio_t *zio)
1000 vdev_geom_hold(vdev_t *vd)
1005 vdev_geom_rele(vdev_t *vd)
1009 vdev_ops_t vdev_geom_ops = {
1018 VDEV_TYPE_DISK, /* name of this vdev type */
1019 B_TRUE /* leaf vdev */