4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
23 * All rights reserved.
25 * Portions Copyright (c) 2012 Martin Matuska <mm@FreeBSD.org>
28 #include <sys/zfs_context.h>
29 #include <sys/param.h>
30 #include <sys/kernel.h>
34 #include <sys/spa_impl.h>
35 #include <sys/vdev_impl.h>
36 #include <sys/fs/zfs.h>
38 #include <geom/geom.h>
39 #include <geom/geom_int.h>
42 * Virtual device vector for GEOM.
45 struct g_class zfs_vdev_class = {
50 DECLARE_GEOM_CLASS(zfs_vdev_class, zfs_vdev);
53 * Don't send BIO_FLUSH.
55 static int vdev_geom_bio_flush_disable = 0;
56 TUNABLE_INT("vfs.zfs.vdev.bio_flush_disable", &vdev_geom_bio_flush_disable);
57 SYSCTL_DECL(_vfs_zfs_vdev);
58 SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, bio_flush_disable, CTLFLAG_RW,
59 &vdev_geom_bio_flush_disable, 0, "Disable BIO_FLUSH");
62 vdev_geom_orphan(struct g_consumer *cp)
71 * Orphan callbacks occur from the GEOM event thread.
72 * Concurrent with this call, new I/O requests may be
73 * working their way through GEOM about to find out
74 * (only once executed by the g_down thread) that we've
75 * been orphaned from our disk provider. These I/Os
76 * must be retired before we can detach our consumer.
77 * This is most easily achieved by acquiring the
78 * SPA ZIO configuration lock as a writer, but doing
79 * so with the GEOM topology lock held would cause
80 * a lock order reversal. Instead, rely on the SPA's
81 * async removal support to invoke a close on this
82 * vdev once it is safe to do so.
84 zfs_post_remove(vd->vdev_spa, vd);
85 vd->vdev_remove_wanted = B_TRUE;
86 spa_async_request(vd->vdev_spa, SPA_ASYNC_REMOVE);
89 static struct g_consumer *
90 vdev_geom_attach(struct g_provider *pp)
93 struct g_consumer *cp;
97 ZFS_LOG(1, "Attaching to %s.", pp->name);
98 /* Do we have geom already? No? Create one. */
99 LIST_FOREACH(gp, &zfs_vdev_class.geom, geom) {
100 if (gp->flags & G_GEOM_WITHER)
102 if (strcmp(gp->name, "zfs::vdev") != 0)
107 gp = g_new_geomf(&zfs_vdev_class, "zfs::vdev");
108 gp->orphan = vdev_geom_orphan;
109 cp = g_new_consumer(gp);
110 if (g_attach(cp, pp) != 0) {
111 g_wither_geom(gp, ENXIO);
114 if (g_access(cp, 1, 0, 1) != 0) {
115 g_wither_geom(gp, ENXIO);
118 ZFS_LOG(1, "Created geom and consumer for %s.", pp->name);
120 /* Check if we are already connected to this provider. */
121 LIST_FOREACH(cp, &gp->consumer, consumer) {
122 if (cp->provider == pp) {
123 ZFS_LOG(1, "Found consumer for %s.", pp->name);
128 cp = g_new_consumer(gp);
129 if (g_attach(cp, pp) != 0) {
130 g_destroy_consumer(cp);
133 if (g_access(cp, 1, 0, 1) != 0) {
135 g_destroy_consumer(cp);
138 ZFS_LOG(1, "Created consumer for %s.", pp->name);
140 if (g_access(cp, 1, 0, 1) != 0)
142 ZFS_LOG(1, "Used existing consumer for %s.", pp->name);
149 vdev_geom_detach(void *arg, int flag __unused)
152 struct g_consumer *cp;
158 ZFS_LOG(1, "Closing access to %s.", cp->provider->name);
159 g_access(cp, -1, 0, -1);
160 /* Destroy consumer on last close. */
161 if (cp->acr == 0 && cp->ace == 0) {
162 ZFS_LOG(1, "Destroyed consumer to %s.", cp->provider->name);
164 g_access(cp, 0, -cp->acw, 0);
166 g_destroy_consumer(cp);
168 /* Destroy geom if there are no consumers left. */
169 if (LIST_EMPTY(&gp->consumer)) {
170 ZFS_LOG(1, "Destroyed geom %s.", gp->name);
171 g_wither_geom(gp, ENXIO);
176 nvlist_get_guid(nvlist_t *list)
181 nvlist_lookup_uint64(list, ZPOOL_CONFIG_GUID, &value);
186 vdev_geom_io(struct g_consumer *cp, int cmd, void *data, off_t offset, off_t size)
193 ASSERT((offset % cp->provider->sectorsize) == 0);
194 ASSERT((size % cp->provider->sectorsize) == 0);
200 maxio = MAXPHYS - (MAXPHYS % cp->provider->sectorsize);
203 for (; off < offset; off += maxio, p += maxio, size -= maxio) {
204 bzero(bp, sizeof(*bp));
207 bp->bio_offset = off;
208 bp->bio_length = MIN(size, maxio);
210 g_io_request(bp, cp);
211 error = biowait(bp, "vdev_geom_io");
221 vdev_geom_taste_orphan(struct g_consumer *cp)
224 KASSERT(1 == 0, ("%s called while tasting %s.", __func__,
225 cp->provider->name));
229 vdev_geom_read_config(struct g_consumer *cp, nvlist_t **config)
231 struct g_provider *pp;
237 uint64_t guid, state, txg;
240 g_topology_assert_not();
243 ZFS_LOG(1, "Reading config from %s...", pp->name);
245 psize = pp->mediasize;
246 psize = P2ALIGN(psize, (uint64_t)sizeof(vdev_label_t));
248 size = sizeof(*label) + pp->sectorsize -
249 ((sizeof(*label) - 1) % pp->sectorsize) - 1;
252 label = kmem_alloc(size, KM_SLEEP);
253 buflen = sizeof(label->vl_vdev_phys.vp_nvlist);
256 for (l = 0; l < VDEV_LABELS; l++) {
258 offset = vdev_label_offset(psize, l, 0);
259 if ((offset % pp->sectorsize) != 0)
262 if (vdev_geom_io(cp, BIO_READ, label, offset, size) != 0)
264 buf = label->vl_vdev_phys.vp_nvlist;
266 if (nvlist_unpack(buf, buflen, config, 0) != 0)
269 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
270 &state) != 0 || state == POOL_STATE_DESTROYED ||
271 state > POOL_STATE_L2CACHE) {
272 nvlist_free(*config);
277 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
278 (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
279 &txg) != 0 || txg == 0)) {
280 nvlist_free(*config);
288 kmem_free(label, size);
289 return (*config == NULL ? ENOENT : 0);
293 vdev_geom_check_config(nvlist_t *config, const char *name, uint64_t *best_txg)
299 if (nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME, &pname) != 0 ||
300 strcmp(pname, name) != 0)
303 ZFS_LOG(1, "found pool: %s", pname);
306 nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG, &txg);
307 if (txg <= *best_txg)
310 ZFS_LOG(1, "txg: %ju", (uintmax_t)*best_txg);
316 vdev_geom_attach_taster(struct g_consumer *cp, struct g_provider *pp)
320 if (pp->flags & G_PF_WITHER)
322 if (pp->sectorsize > VDEV_PAD_SIZE || !ISP2(pp->sectorsize))
325 error = g_access(cp, 1, 0, 0);
332 vdev_geom_dettach_taster(struct g_consumer *cp)
334 g_access(cp, -1, 0, 0);
339 vdev_geom_read_pool_label(const char *name, nvlist_t **config)
342 struct g_geom *gp, *zgp;
343 struct g_provider *pp;
344 struct g_consumer *zcp;
352 zgp = g_new_geomf(&zfs_vdev_class, "zfs::vdev::taste");
353 /* This orphan function should be never called. */
354 zgp->orphan = vdev_geom_taste_orphan;
355 zcp = g_new_consumer(zgp);
359 LIST_FOREACH(mp, &g_classes, class) {
360 if (mp == &zfs_vdev_class)
362 LIST_FOREACH(gp, &mp->geom, geom) {
363 if (gp->flags & G_GEOM_WITHER)
365 LIST_FOREACH(pp, &gp->provider, provider) {
366 if (pp->flags & G_PF_WITHER)
368 if (vdev_geom_attach_taster(zcp, pp) != 0)
371 error = vdev_geom_read_config(zcp, &vdev_cfg);
373 vdev_geom_dettach_taster(zcp);
376 ZFS_LOG(1, "successfully read vdev config");
378 error = vdev_geom_check_config(vdev_cfg, name,
381 nvlist_free(vdev_cfg);
384 nvlist_free(*config);
390 g_destroy_consumer(zcp);
394 return (*config == NULL ? ENOENT : 0);
398 vdev_geom_read_guid(struct g_consumer *cp)
403 g_topology_assert_not();
406 if (vdev_geom_read_config(cp, &config) == 0) {
407 guid = nvlist_get_guid(config);
413 static struct g_consumer *
414 vdev_geom_attach_by_guid(uint64_t guid)
417 struct g_geom *gp, *zgp;
418 struct g_provider *pp;
419 struct g_consumer *cp, *zcp;
424 zgp = g_new_geomf(&zfs_vdev_class, "zfs::vdev::taste");
425 /* This orphan function should be never called. */
426 zgp->orphan = vdev_geom_taste_orphan;
427 zcp = g_new_consumer(zgp);
430 LIST_FOREACH(mp, &g_classes, class) {
431 if (mp == &zfs_vdev_class)
433 LIST_FOREACH(gp, &mp->geom, geom) {
434 if (gp->flags & G_GEOM_WITHER)
436 LIST_FOREACH(pp, &gp->provider, provider) {
437 if (vdev_geom_attach_taster(zcp, pp) != 0)
440 pguid = vdev_geom_read_guid(zcp);
442 vdev_geom_dettach_taster(zcp);
445 cp = vdev_geom_attach(pp);
447 printf("ZFS WARNING: Unable to attach to %s.\n",
460 g_destroy_consumer(zcp);
465 static struct g_consumer *
466 vdev_geom_open_by_guid(vdev_t *vd)
468 struct g_consumer *cp;
474 ZFS_LOG(1, "Searching by guid [%ju].", (uintmax_t)vd->vdev_guid);
475 cp = vdev_geom_attach_by_guid(vd->vdev_guid);
477 len = strlen(cp->provider->name) + strlen("/dev/") + 1;
478 buf = kmem_alloc(len, KM_SLEEP);
480 snprintf(buf, len, "/dev/%s", cp->provider->name);
481 spa_strfree(vd->vdev_path);
484 ZFS_LOG(1, "Attach by guid [%ju] succeeded, provider %s.",
485 (uintmax_t)vd->vdev_guid, vd->vdev_path);
487 ZFS_LOG(1, "Search by guid [%ju] failed.",
488 (uintmax_t)vd->vdev_guid);
494 static struct g_consumer *
495 vdev_geom_open_by_path(vdev_t *vd, int check_guid)
497 struct g_provider *pp;
498 struct g_consumer *cp;
504 pp = g_provider_by_name(vd->vdev_path + sizeof("/dev/") - 1);
506 ZFS_LOG(1, "Found provider by name %s.", vd->vdev_path);
507 cp = vdev_geom_attach(pp);
508 if (cp != NULL && check_guid && ISP2(pp->sectorsize) &&
509 pp->sectorsize <= VDEV_PAD_SIZE) {
511 guid = vdev_geom_read_guid(cp);
513 if (guid != vd->vdev_guid) {
514 vdev_geom_detach(cp, 0);
516 ZFS_LOG(1, "guid mismatch for provider %s: "
517 "%ju != %ju.", vd->vdev_path,
518 (uintmax_t)vd->vdev_guid, (uintmax_t)guid);
520 ZFS_LOG(1, "guid match for provider %s.",
530 vdev_geom_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
533 struct g_provider *pp;
534 struct g_consumer *cp;
539 * We must have a pathname, and it must be absolute.
541 if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') {
542 vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
553 * If we're creating or splitting a pool, just find the GEOM provider
554 * by its name and ignore GUID mismatches.
556 if (vd->vdev_spa->spa_load_state == SPA_LOAD_NONE ||
557 vd->vdev_spa->spa_splitting_newspa == B_TRUE)
558 cp = vdev_geom_open_by_path(vd, 0);
560 cp = vdev_geom_open_by_path(vd, 1);
563 * The device at vd->vdev_path doesn't have the
564 * expected guid. The disks might have merely
565 * moved around so try all other GEOM providers
566 * to find one with the right guid.
568 cp = vdev_geom_open_by_guid(vd);
573 ZFS_LOG(1, "Provider %s not found.", vd->vdev_path);
575 } else if (cp->provider->sectorsize > VDEV_PAD_SIZE ||
576 !ISP2(cp->provider->sectorsize)) {
577 ZFS_LOG(1, "Provider %s has unsupported sectorsize.",
579 vdev_geom_detach(cp, 0);
582 } else if (cp->acw == 0 && (spa_mode(vd->vdev_spa) & FWRITE) != 0) {
585 for (i = 0; i < 5; i++) {
586 error = g_access(cp, 0, 1, 0);
590 tsleep(vd, 0, "vdev", hz / 2);
594 printf("ZFS WARNING: Unable to open %s for writing (error=%d).\n",
595 vd->vdev_path, error);
596 vdev_geom_detach(cp, 0);
603 vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
612 * Determine the actual size of the device.
614 *max_psize = *psize = pp->mediasize;
617 * Determine the device's minimum transfer size.
619 *ashift = highbit(MAX(pp->sectorsize, SPA_MINBLOCKSIZE)) - 1;
622 * Clear the nowritecache bit, so that on a vdev_reopen() we will
625 vd->vdev_nowritecache = B_FALSE;
627 if (vd->vdev_physpath != NULL)
628 spa_strfree(vd->vdev_physpath);
629 bufsize = sizeof("/dev/") + strlen(pp->name);
630 vd->vdev_physpath = kmem_alloc(bufsize, KM_SLEEP);
631 snprintf(vd->vdev_physpath, bufsize, "/dev/%s", pp->name);
637 vdev_geom_close(vdev_t *vd)
639 struct g_consumer *cp;
645 vd->vdev_delayed_close = B_FALSE;
646 g_post_event(vdev_geom_detach, cp, M_WAITOK, NULL);
650 vdev_geom_io_intr(struct bio *bp)
655 zio = bp->bio_caller1;
657 zio->io_error = bp->bio_error;
658 if (zio->io_error == 0 && bp->bio_resid != 0)
660 if (bp->bio_cmd == BIO_FLUSH && bp->bio_error == ENOTSUP) {
662 * If we get ENOTSUP, we know that no future
663 * attempts will ever succeed. In this case we
664 * set a persistent bit so that we don't bother
665 * with the ioctl in the future.
667 vd->vdev_nowritecache = B_TRUE;
669 if (zio->io_error == EIO && !vd->vdev_remove_wanted) {
671 * If provider's error is set we assume it is being
674 if (bp->bio_to->error != 0) {
676 * We post the resource as soon as possible, instead of
677 * when the async removal actually happens, because the
678 * DE is using this information to discard previous I/O
681 /* XXX: zfs_post_remove() can sleep. */
682 zfs_post_remove(zio->io_spa, vd);
683 vd->vdev_remove_wanted = B_TRUE;
684 spa_async_request(zio->io_spa, SPA_ASYNC_REMOVE);
685 } else if (!vd->vdev_delayed_close) {
686 vd->vdev_delayed_close = B_TRUE;
694 vdev_geom_io_start(zio_t *zio)
697 struct g_consumer *cp;
703 if (zio->io_type == ZIO_TYPE_IOCTL) {
705 if (!vdev_readable(vd)) {
706 zio->io_error = ENXIO;
707 return (ZIO_PIPELINE_CONTINUE);
710 switch (zio->io_cmd) {
712 case DKIOCFLUSHWRITECACHE:
714 if (zfs_nocacheflush || vdev_geom_bio_flush_disable)
717 if (vd->vdev_nowritecache) {
718 zio->io_error = ENOTSUP;
724 zio->io_error = ENOTSUP;
727 return (ZIO_PIPELINE_CONTINUE);
732 zio->io_error = ENXIO;
733 return (ZIO_PIPELINE_CONTINUE);
736 bp->bio_caller1 = zio;
737 switch (zio->io_type) {
740 bp->bio_cmd = zio->io_type == ZIO_TYPE_READ ? BIO_READ : BIO_WRITE;
741 bp->bio_data = zio->io_data;
742 bp->bio_offset = zio->io_offset;
743 bp->bio_length = zio->io_size;
746 bp->bio_cmd = BIO_FLUSH;
747 bp->bio_flags |= BIO_ORDERED;
749 bp->bio_offset = cp->provider->mediasize;
753 bp->bio_done = vdev_geom_io_intr;
755 g_io_request(bp, cp);
757 return (ZIO_PIPELINE_STOP);
761 vdev_geom_io_done(zio_t *zio)
766 vdev_geom_hold(vdev_t *vd)
771 vdev_geom_rele(vdev_t *vd)
775 vdev_ops_t vdev_geom_ops = {
784 VDEV_TYPE_DISK, /* name of this vdev type */
785 B_TRUE /* leaf vdev */