4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
23 * All rights reserved.
25 * Portions Copyright (c) 2012 Martin Matuska <mm@FreeBSD.org>
28 #include <sys/zfs_context.h>
29 #include <sys/param.h>
30 #include <sys/kernel.h>
34 #include <sys/spa_impl.h>
35 #include <sys/vdev_impl.h>
36 #include <sys/fs/zfs.h>
38 #include <geom/geom.h>
39 #include <geom/geom_int.h>
42 * Virtual device vector for GEOM.
45 static g_attrchanged_t vdev_geom_attrchanged;
46 struct g_class zfs_vdev_class = {
49 .attrchanged = vdev_geom_attrchanged,
52 DECLARE_GEOM_CLASS(zfs_vdev_class, zfs_vdev);
54 SYSCTL_DECL(_vfs_zfs_vdev);
55 /* Don't send BIO_FLUSH. */
56 static int vdev_geom_bio_flush_disable = 0;
57 TUNABLE_INT("vfs.zfs.vdev.bio_flush_disable", &vdev_geom_bio_flush_disable);
58 SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, bio_flush_disable, CTLFLAG_RW,
59 &vdev_geom_bio_flush_disable, 0, "Disable BIO_FLUSH");
60 /* Don't send BIO_DELETE. */
61 static int vdev_geom_bio_delete_disable = 0;
62 TUNABLE_INT("vfs.zfs.vdev.bio_delete_disable", &vdev_geom_bio_delete_disable);
63 SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, bio_delete_disable, CTLFLAG_RW,
64 &vdev_geom_bio_delete_disable, 0, "Disable BIO_DELETE");
67 vdev_geom_set_rotation_rate(vdev_t *vd, struct g_consumer *cp)
72 error = g_getattr("GEOM::rotation_rate", cp, &rate);
74 vd->vdev_rotation_rate = rate;
76 vd->vdev_rotation_rate = VDEV_RATE_UNKNOWN;
80 vdev_geom_attrchanged(struct g_consumer *cp, const char *attr)
85 int error, physpath_len;
91 if (strcmp(attr, "GEOM::rotation_rate") == 0) {
92 vdev_geom_set_rotation_rate(vd, cp);
96 if (strcmp(attr, "GEOM::physpath") != 0)
99 if (g_access(cp, 1, 0, 0) != 0)
103 * Record/Update physical path information for this device.
106 physpath_len = MAXPATHLEN;
107 physpath = g_malloc(physpath_len, M_WAITOK|M_ZERO);
108 error = g_io_getattr("GEOM::physpath", cp, &physpath_len, physpath);
109 g_access(cp, -1, 0, 0);
113 /* g_topology lock ensures that vdev has not been closed */
115 old_physpath = vd->vdev_physpath;
116 vd->vdev_physpath = spa_strdup(physpath);
117 spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
119 if (old_physpath != NULL)
120 spa_strfree(old_physpath);
126 vdev_geom_orphan(struct g_consumer *cp)
134 /* Vdev close in progress. Ignore the event. */
139 * Orphan callbacks occur from the GEOM event thread.
140 * Concurrent with this call, new I/O requests may be
141 * working their way through GEOM about to find out
142 * (only once executed by the g_down thread) that we've
143 * been orphaned from our disk provider. These I/Os
144 * must be retired before we can detach our consumer.
145 * This is most easily achieved by acquiring the
146 * SPA ZIO configuration lock as a writer, but doing
147 * so with the GEOM topology lock held would cause
148 * a lock order reversal. Instead, rely on the SPA's
149 * async removal support to invoke a close on this
150 * vdev once it is safe to do so.
152 vd->vdev_remove_wanted = B_TRUE;
153 spa_async_request(vd->vdev_spa, SPA_ASYNC_REMOVE);
156 static struct g_consumer *
157 vdev_geom_attach(struct g_provider *pp, vdev_t *vd)
160 struct g_consumer *cp;
165 ZFS_LOG(1, "Attaching to %s.", pp->name);
166 /* Do we have geom already? No? Create one. */
167 LIST_FOREACH(gp, &zfs_vdev_class.geom, geom) {
168 if (gp->flags & G_GEOM_WITHER)
170 if (strcmp(gp->name, "zfs::vdev") != 0)
175 gp = g_new_geomf(&zfs_vdev_class, "zfs::vdev");
176 gp->orphan = vdev_geom_orphan;
177 gp->attrchanged = vdev_geom_attrchanged;
178 cp = g_new_consumer(gp);
179 error = g_attach(cp, pp);
181 ZFS_LOG(1, "%s(%d): g_attach failed: %d\n", __func__,
183 g_wither_geom(gp, ENXIO);
186 error = g_access(cp, 1, 0, 1);
188 ZFS_LOG(1, "%s(%d): g_access failed: %d\n", __func__,
190 g_wither_geom(gp, ENXIO);
193 ZFS_LOG(1, "Created geom and consumer for %s.", pp->name);
195 /* Check if we are already connected to this provider. */
196 LIST_FOREACH(cp, &gp->consumer, consumer) {
197 if (cp->provider == pp) {
198 ZFS_LOG(1, "Found consumer for %s.", pp->name);
203 cp = g_new_consumer(gp);
204 error = g_attach(cp, pp);
206 ZFS_LOG(1, "%s(%d): g_attach failed: %d\n",
207 __func__, __LINE__, error);
208 g_destroy_consumer(cp);
211 error = g_access(cp, 1, 0, 1);
213 ZFS_LOG(1, "%s(%d): g_access failed: %d\n",
214 __func__, __LINE__, error);
216 g_destroy_consumer(cp);
219 ZFS_LOG(1, "Created consumer for %s.", pp->name);
221 error = g_access(cp, 1, 0, 1);
223 ZFS_LOG(1, "%s(%d): g_access failed: %d\n",
224 __func__, __LINE__, error);
227 ZFS_LOG(1, "Used existing consumer for %s.", pp->name);
232 * BUG: cp may already belong to a vdev. This could happen if:
233 * 1) That vdev is a shared spare, or
234 * 2) We are trying to reopen a missing vdev and we are scanning by
235 * guid. In that case, we'll ultimately fail to open this consumer,
236 * but not until after setting the private field.
237 * The solution is to:
238 * 1) Don't set the private field until after the open succeeds, and
239 * 2) Set it to a linked list of vdevs, not just a single vdev
244 /* Fetch initial physical path information for this device. */
245 vdev_geom_attrchanged(cp, "GEOM::physpath");
247 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
252 vdev_geom_close_locked(vdev_t *vd)
255 struct g_consumer *cp;
263 ZFS_LOG(1, "Closing access to %s.", cp->provider->name);
264 KASSERT(vd->vdev_tsd == cp, ("%s: vdev_tsd is not cp", __func__));
266 vd->vdev_delayed_close = B_FALSE;
270 g_access(cp, -1, 0, -1);
271 /* Destroy consumer on last close. */
272 if (cp->acr == 0 && cp->ace == 0) {
274 g_access(cp, 0, -cp->acw, 0);
275 if (cp->provider != NULL) {
276 ZFS_LOG(1, "Destroyed consumer to %s.",
280 g_destroy_consumer(cp);
282 /* Destroy geom if there are no consumers left. */
283 if (LIST_EMPTY(&gp->consumer)) {
284 ZFS_LOG(1, "Destroyed geom %s.", gp->name);
285 g_wither_geom(gp, ENXIO);
290 nvlist_get_guids(nvlist_t *list, uint64_t *pguid, uint64_t *vguid)
293 (void) nvlist_lookup_uint64(list, ZPOOL_CONFIG_GUID, vguid);
294 (void) nvlist_lookup_uint64(list, ZPOOL_CONFIG_POOL_GUID, pguid);
298 vdev_geom_io(struct g_consumer *cp, int cmd, void *data, off_t offset, off_t size)
305 ASSERT((offset % cp->provider->sectorsize) == 0);
306 ASSERT((size % cp->provider->sectorsize) == 0);
312 maxio = MAXPHYS - (MAXPHYS % cp->provider->sectorsize);
315 for (; off < offset; off += maxio, p += maxio, size -= maxio) {
316 bzero(bp, sizeof(*bp));
319 bp->bio_offset = off;
320 bp->bio_length = MIN(size, maxio);
322 g_io_request(bp, cp);
323 error = biowait(bp, "vdev_geom_io");
333 vdev_geom_taste_orphan(struct g_consumer *cp)
336 KASSERT(1 == 0, ("%s called while tasting %s.", __func__,
337 cp->provider->name));
341 vdev_geom_read_config(struct g_consumer *cp, nvlist_t **config)
343 struct g_provider *pp;
352 g_topology_assert_not();
355 ZFS_LOG(1, "Reading config from %s...", pp->name);
357 psize = pp->mediasize;
358 psize = P2ALIGN(psize, (uint64_t)sizeof(vdev_label_t));
360 size = sizeof(*label) + pp->sectorsize -
361 ((sizeof(*label) - 1) % pp->sectorsize) - 1;
363 label = kmem_alloc(size, KM_SLEEP);
364 buflen = sizeof(label->vl_vdev_phys.vp_nvlist);
367 for (l = 0; l < VDEV_LABELS; l++) {
369 offset = vdev_label_offset(psize, l, 0);
370 if ((offset % pp->sectorsize) != 0)
373 if (vdev_geom_io(cp, BIO_READ, label, offset, size) != 0)
375 buf = label->vl_vdev_phys.vp_nvlist;
377 if (nvlist_unpack(buf, buflen, config, 0) != 0)
380 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
381 &state) != 0 || state > POOL_STATE_L2CACHE) {
382 nvlist_free(*config);
387 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
388 (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
389 &txg) != 0 || txg == 0)) {
390 nvlist_free(*config);
398 kmem_free(label, size);
399 return (*config == NULL ? ENOENT : 0);
403 resize_configs(nvlist_t ***configs, uint64_t *count, uint64_t id)
405 nvlist_t **new_configs;
410 new_configs = kmem_zalloc((id + 1) * sizeof(nvlist_t *),
412 for (i = 0; i < *count; i++)
413 new_configs[i] = (*configs)[i];
414 if (*configs != NULL)
415 kmem_free(*configs, *count * sizeof(void *));
416 *configs = new_configs;
421 process_vdev_config(nvlist_t ***configs, uint64_t *count, nvlist_t *cfg,
422 const char *name, uint64_t* known_pool_guid)
426 uint64_t vdev_guid, known_guid;
427 uint64_t id, txg, known_txg;
431 if (nvlist_lookup_string(cfg, ZPOOL_CONFIG_POOL_NAME, &pname) != 0 ||
432 strcmp(pname, name) != 0)
435 if (nvlist_lookup_uint64(cfg, ZPOOL_CONFIG_POOL_GUID, &pool_guid) != 0)
438 if (nvlist_lookup_uint64(cfg, ZPOOL_CONFIG_TOP_GUID, &vdev_guid) != 0)
441 if (nvlist_lookup_nvlist(cfg, ZPOOL_CONFIG_VDEV_TREE, &vdev_tree) != 0)
444 if (nvlist_lookup_uint64(vdev_tree, ZPOOL_CONFIG_ID, &id) != 0)
447 VERIFY(nvlist_lookup_uint64(cfg, ZPOOL_CONFIG_POOL_TXG, &txg) == 0);
449 if (*known_pool_guid != 0) {
450 if (pool_guid != *known_pool_guid)
453 *known_pool_guid = pool_guid;
455 resize_configs(configs, count, id);
457 if ((*configs)[id] != NULL) {
458 VERIFY(nvlist_lookup_uint64((*configs)[id],
459 ZPOOL_CONFIG_POOL_TXG, &known_txg) == 0);
460 if (txg <= known_txg)
462 nvlist_free((*configs)[id]);
465 (*configs)[id] = cfg;
473 vdev_geom_attach_taster(struct g_consumer *cp, struct g_provider *pp)
477 if (pp->flags & G_PF_WITHER)
480 error = g_access(cp, 1, 0, 0);
482 if (pp->sectorsize > VDEV_PAD_SIZE || !ISP2(pp->sectorsize))
484 else if (pp->mediasize < SPA_MINDEVSIZE)
487 g_access(cp, -1, 0, 0);
495 vdev_geom_detach_taster(struct g_consumer *cp)
497 g_access(cp, -1, 0, 0);
502 vdev_geom_read_pool_label(const char *name,
503 nvlist_t ***configs, uint64_t *count)
506 struct g_geom *gp, *zgp;
507 struct g_provider *pp;
508 struct g_consumer *zcp;
516 zgp = g_new_geomf(&zfs_vdev_class, "zfs::vdev::taste");
517 /* This orphan function should be never called. */
518 zgp->orphan = vdev_geom_taste_orphan;
519 zcp = g_new_consumer(zgp);
524 LIST_FOREACH(mp, &g_classes, class) {
525 if (mp == &zfs_vdev_class)
527 LIST_FOREACH(gp, &mp->geom, geom) {
528 if (gp->flags & G_GEOM_WITHER)
530 LIST_FOREACH(pp, &gp->provider, provider) {
531 if (pp->flags & G_PF_WITHER)
533 if (vdev_geom_attach_taster(zcp, pp) != 0)
536 error = vdev_geom_read_config(zcp, &vdev_cfg);
538 vdev_geom_detach_taster(zcp);
541 ZFS_LOG(1, "successfully read vdev config");
543 process_vdev_config(configs, count,
544 vdev_cfg, name, &pool_guid);
549 g_destroy_consumer(zcp);
554 return (*count > 0 ? 0 : ENOENT);
558 vdev_geom_read_guids(struct g_consumer *cp, uint64_t *pguid, uint64_t *vguid)
562 g_topology_assert_not();
566 if (vdev_geom_read_config(cp, &config) == 0) {
567 nvlist_get_guids(config, pguid, vguid);
572 static struct g_consumer *
573 vdev_geom_attach_by_guids(vdev_t *vd)
576 struct g_geom *gp, *zgp;
577 struct g_provider *pp;
578 struct g_consumer *cp, *zcp;
579 uint64_t pguid, vguid;
583 zgp = g_new_geomf(&zfs_vdev_class, "zfs::vdev::taste");
584 /* This orphan function should be never called. */
585 zgp->orphan = vdev_geom_taste_orphan;
586 zcp = g_new_consumer(zgp);
589 LIST_FOREACH(mp, &g_classes, class) {
590 if (mp == &zfs_vdev_class)
592 LIST_FOREACH(gp, &mp->geom, geom) {
593 if (gp->flags & G_GEOM_WITHER)
595 LIST_FOREACH(pp, &gp->provider, provider) {
596 if (vdev_geom_attach_taster(zcp, pp) != 0)
599 vdev_geom_read_guids(zcp, &pguid, &vguid);
601 vdev_geom_detach_taster(zcp);
603 * Check that the label's vdev guid matches the
604 * desired guid. If the label has a pool guid,
605 * check that it matches too. (Inactive spares
606 * and L2ARCs do not have any pool guid in the
610 pguid != spa_guid(vd->vdev_spa)) ||
611 vguid != vd->vdev_guid)
613 cp = vdev_geom_attach(pp, vd);
615 printf("ZFS WARNING: Unable to "
616 "attach to %s.\n", pp->name);
628 g_destroy_consumer(zcp);
633 static struct g_consumer *
634 vdev_geom_open_by_guids(vdev_t *vd)
636 struct g_consumer *cp;
642 ZFS_LOG(1, "Searching by guids [%ju:%ju].",
643 (uintmax_t)spa_guid(vd->vdev_spa), (uintmax_t)vd->vdev_guid);
644 cp = vdev_geom_attach_by_guids(vd);
646 len = strlen(cp->provider->name) + strlen("/dev/") + 1;
647 buf = kmem_alloc(len, KM_SLEEP);
649 snprintf(buf, len, "/dev/%s", cp->provider->name);
650 spa_strfree(vd->vdev_path);
653 ZFS_LOG(1, "Attach by guid [%ju:%ju] succeeded, provider %s.",
654 (uintmax_t)spa_guid(vd->vdev_spa),
655 (uintmax_t)vd->vdev_guid, vd->vdev_path);
657 ZFS_LOG(1, "Search by guid [%ju:%ju] failed.",
658 (uintmax_t)spa_guid(vd->vdev_spa),
659 (uintmax_t)vd->vdev_guid);
665 static struct g_consumer *
666 vdev_geom_open_by_path(vdev_t *vd, int check_guid)
668 struct g_provider *pp;
669 struct g_consumer *cp;
670 uint64_t pguid, vguid;
675 pp = g_provider_by_name(vd->vdev_path + sizeof("/dev/") - 1);
677 ZFS_LOG(1, "Found provider by name %s.", vd->vdev_path);
678 cp = vdev_geom_attach(pp, vd);
679 if (cp != NULL && check_guid && ISP2(pp->sectorsize) &&
680 pp->sectorsize <= VDEV_PAD_SIZE) {
682 vdev_geom_read_guids(cp, &pguid, &vguid);
685 * Check that the label's vdev guid matches the
686 * desired guid. If the label has a pool guid,
687 * check that it matches too. (Inactive spares
688 * and L2ARCs do not have any pool guid in the
692 pguid != spa_guid(vd->vdev_spa)) ||
693 vguid != vd->vdev_guid) {
694 vdev_geom_close_locked(vd);
696 ZFS_LOG(1, "guid mismatch for provider %s: "
697 "%ju:%ju != %ju:%ju.", vd->vdev_path,
698 (uintmax_t)spa_guid(vd->vdev_spa),
699 (uintmax_t)vd->vdev_guid,
700 (uintmax_t)pguid, (uintmax_t)vguid);
702 ZFS_LOG(1, "guid match for provider %s.",
712 vdev_geom_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
713 uint64_t *logical_ashift, uint64_t *physical_ashift)
715 struct g_provider *pp;
716 struct g_consumer *cp;
721 * We must have a pathname, and it must be absolute.
723 if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') {
724 vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
734 if (vd->vdev_spa->spa_splitting_newspa ||
735 (vd->vdev_prevstate == VDEV_STATE_UNKNOWN &&
736 vd->vdev_spa->spa_load_state == SPA_LOAD_NONE)) {
738 * We are dealing with a vdev that hasn't been previously
739 * opened (since boot), and we are not loading an
740 * existing pool configuration. This looks like a
741 * vdev add operation to a new or existing pool.
742 * Assume the user knows what he/she is doing and find
743 * GEOM provider by its name, ignoring GUID mismatches.
745 * XXPOLICY: It would be safer to only allow a device
746 * that is unlabeled or labeled but missing
747 * GUID information to be opened in this fashion,
748 * unless we are doing a split, in which case we
749 * should allow any guid.
751 cp = vdev_geom_open_by_path(vd, 0);
754 * Try using the recorded path for this device, but only
755 * accept it if its label data contains the expected GUIDs.
757 cp = vdev_geom_open_by_path(vd, 1);
760 * The device at vd->vdev_path doesn't have the
761 * expected GUIDs. The disks might have merely
762 * moved around so try all other GEOM providers
763 * to find one with the right GUIDs.
765 cp = vdev_geom_open_by_guids(vd);
770 ZFS_LOG(1, "Provider %s not found.", vd->vdev_path);
772 } else if (cp->provider->sectorsize > VDEV_PAD_SIZE ||
773 !ISP2(cp->provider->sectorsize)) {
774 ZFS_LOG(1, "Provider %s has unsupported sectorsize.",
777 vdev_geom_close_locked(vd);
780 } else if (cp->acw == 0 && (spa_mode(vd->vdev_spa) & FWRITE) != 0) {
783 for (i = 0; i < 5; i++) {
784 error = g_access(cp, 0, 1, 0);
788 tsleep(vd, 0, "vdev", hz / 2);
792 printf("ZFS WARNING: Unable to open %s for writing (error=%d).\n",
793 vd->vdev_path, error);
794 vdev_geom_close_locked(vd);
802 vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
808 * Determine the actual size of the device.
810 *max_psize = *psize = pp->mediasize;
813 * Determine the device's minimum transfer size and preferred
816 *logical_ashift = highbit(MAX(pp->sectorsize, SPA_MINBLOCKSIZE)) - 1;
817 *physical_ashift = 0;
818 if (pp->stripesize > (1 << *logical_ashift) && ISP2(pp->stripesize) &&
819 pp->stripesize <= (1 << SPA_MAXASHIFT) && pp->stripeoffset == 0)
820 *physical_ashift = highbit(pp->stripesize) - 1;
823 * Clear the nowritecache settings, so that on a vdev_reopen()
826 vd->vdev_nowritecache = B_FALSE;
829 * Determine the device's rotation rate.
831 vdev_geom_set_rotation_rate(vd, cp);
837 vdev_geom_close(vdev_t *vd)
842 vdev_geom_close_locked(vd);
848 vdev_geom_io_intr(struct bio *bp)
853 zio = bp->bio_caller1;
855 zio->io_error = bp->bio_error;
856 if (zio->io_error == 0 && bp->bio_resid != 0)
857 zio->io_error = SET_ERROR(EIO);
859 switch(zio->io_error) {
862 * If we get ENOTSUP for BIO_FLUSH or BIO_DELETE we know
863 * that future attempts will never succeed. In this case
864 * we set a persistent flag so that we don't bother with
865 * requests in the future.
867 switch(bp->bio_cmd) {
869 vd->vdev_nowritecache = B_TRUE;
872 vd->vdev_notrim = B_TRUE;
877 if (!vd->vdev_remove_wanted) {
879 * If provider's error is set we assume it is being
882 if (bp->bio_to->error != 0) {
883 vd->vdev_remove_wanted = B_TRUE;
884 spa_async_request(zio->io_spa,
886 } else if (!vd->vdev_delayed_close) {
887 vd->vdev_delayed_close = B_TRUE;
893 zio_delay_interrupt(zio);
897 vdev_geom_io_start(zio_t *zio)
900 struct g_consumer *cp;
906 switch (zio->io_type) {
909 if (!vdev_readable(vd)) {
910 zio->io_error = SET_ERROR(ENXIO);
914 switch (zio->io_cmd) {
915 case DKIOCFLUSHWRITECACHE:
916 if (zfs_nocacheflush || vdev_geom_bio_flush_disable)
918 if (vd->vdev_nowritecache) {
919 zio->io_error = SET_ERROR(ENOTSUP);
924 zio->io_error = SET_ERROR(ENOTSUP);
931 if (vd->vdev_notrim) {
932 zio->io_error = SET_ERROR(ENOTSUP);
933 } else if (!vdev_geom_bio_delete_disable) {
940 ASSERT(zio->io_type == ZIO_TYPE_READ ||
941 zio->io_type == ZIO_TYPE_WRITE ||
942 zio->io_type == ZIO_TYPE_FREE ||
943 zio->io_type == ZIO_TYPE_IOCTL);
947 zio->io_error = SET_ERROR(ENXIO);
952 bp->bio_caller1 = zio;
953 switch (zio->io_type) {
956 zio->io_target_timestamp = zio_handle_io_delay(zio);
957 bp->bio_cmd = zio->io_type == ZIO_TYPE_READ ? BIO_READ : BIO_WRITE;
958 bp->bio_data = zio->io_data;
959 bp->bio_offset = zio->io_offset;
960 bp->bio_length = zio->io_size;
963 bp->bio_cmd = BIO_DELETE;
965 bp->bio_offset = zio->io_offset;
966 bp->bio_length = zio->io_size;
969 bp->bio_cmd = BIO_FLUSH;
970 bp->bio_flags |= BIO_ORDERED;
972 bp->bio_offset = cp->provider->mediasize;
976 bp->bio_done = vdev_geom_io_intr;
978 g_io_request(bp, cp);
982 vdev_geom_io_done(zio_t *zio)
987 vdev_geom_hold(vdev_t *vd)
992 vdev_geom_rele(vdev_t *vd)
996 vdev_ops_t vdev_geom_ops = {
1005 VDEV_TYPE_DISK, /* name of this vdev type */
1006 B_TRUE /* leaf vdev */