4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
23 * All rights reserved.
25 * Portions Copyright (c) 2012 Martin Matuska <mm@FreeBSD.org>
28 #include <sys/zfs_context.h>
29 #include <sys/param.h>
30 #include <sys/kernel.h>
34 #include <sys/spa_impl.h>
35 #include <sys/vdev_impl.h>
36 #include <sys/fs/zfs.h>
38 #include <geom/geom.h>
39 #include <geom/geom_int.h>
42 * Virtual device vector for GEOM.
45 static g_attrchanged_t vdev_geom_attrchanged;
46 struct g_class zfs_vdev_class = {
49 .attrchanged = vdev_geom_attrchanged,
52 DECLARE_GEOM_CLASS(zfs_vdev_class, zfs_vdev);
54 SYSCTL_DECL(_vfs_zfs_vdev);
55 /* Don't send BIO_FLUSH. */
56 static int vdev_geom_bio_flush_disable = 0;
57 TUNABLE_INT("vfs.zfs.vdev.bio_flush_disable", &vdev_geom_bio_flush_disable);
58 SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, bio_flush_disable, CTLFLAG_RW,
59 &vdev_geom_bio_flush_disable, 0, "Disable BIO_FLUSH");
60 /* Don't send BIO_DELETE. */
61 static int vdev_geom_bio_delete_disable = 0;
62 TUNABLE_INT("vfs.zfs.vdev.bio_delete_disable", &vdev_geom_bio_delete_disable);
63 SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, bio_delete_disable, CTLFLAG_RW,
64 &vdev_geom_bio_delete_disable, 0, "Disable BIO_DELETE");
67 vdev_geom_set_rotation_rate(vdev_t *vd, struct g_consumer *cp)
72 error = g_getattr("GEOM::rotation_rate", cp, &rate);
74 vd->vdev_rotation_rate = rate;
76 vd->vdev_rotation_rate = VDEV_RATE_UNKNOWN;
80 vdev_geom_attrchanged(struct g_consumer *cp, const char *attr)
88 if (strcmp(attr, "GEOM::rotation_rate") == 0) {
89 vdev_geom_set_rotation_rate(vd, cp);
95 vdev_geom_orphan(struct g_consumer *cp)
106 * Orphan callbacks occur from the GEOM event thread.
107 * Concurrent with this call, new I/O requests may be
108 * working their way through GEOM about to find out
109 * (only once executed by the g_down thread) that we've
110 * been orphaned from our disk provider. These I/Os
111 * must be retired before we can detach our consumer.
112 * This is most easily achieved by acquiring the
113 * SPA ZIO configuration lock as a writer, but doing
114 * so with the GEOM topology lock held would cause
115 * a lock order reversal. Instead, rely on the SPA's
116 * async removal support to invoke a close on this
117 * vdev once it is safe to do so.
119 vd->vdev_remove_wanted = B_TRUE;
120 spa_async_request(vd->vdev_spa, SPA_ASYNC_REMOVE);
123 static struct g_consumer *
124 vdev_geom_attach(struct g_provider *pp)
127 struct g_consumer *cp;
131 ZFS_LOG(1, "Attaching to %s.", pp->name);
132 /* Do we have geom already? No? Create one. */
133 LIST_FOREACH(gp, &zfs_vdev_class.geom, geom) {
134 if (gp->flags & G_GEOM_WITHER)
136 if (strcmp(gp->name, "zfs::vdev") != 0)
141 gp = g_new_geomf(&zfs_vdev_class, "zfs::vdev");
142 gp->orphan = vdev_geom_orphan;
143 cp = g_new_consumer(gp);
144 if (g_attach(cp, pp) != 0) {
145 g_wither_geom(gp, ENXIO);
148 if (g_access(cp, 1, 0, 1) != 0) {
149 g_wither_geom(gp, ENXIO);
152 ZFS_LOG(1, "Created geom and consumer for %s.", pp->name);
154 /* Check if we are already connected to this provider. */
155 LIST_FOREACH(cp, &gp->consumer, consumer) {
156 if (cp->provider == pp) {
157 ZFS_LOG(1, "Found consumer for %s.", pp->name);
162 cp = g_new_consumer(gp);
163 if (g_attach(cp, pp) != 0) {
164 g_destroy_consumer(cp);
167 if (g_access(cp, 1, 0, 1) != 0) {
169 g_destroy_consumer(cp);
172 ZFS_LOG(1, "Created consumer for %s.", pp->name);
174 if (g_access(cp, 1, 0, 1) != 0)
176 ZFS_LOG(1, "Used existing consumer for %s.", pp->name);
179 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
184 vdev_geom_detach(void *arg, int flag __unused)
187 struct g_consumer *cp;
193 ZFS_LOG(1, "Closing access to %s.", cp->provider->name);
194 g_access(cp, -1, 0, -1);
195 /* Destroy consumer on last close. */
196 if (cp->acr == 0 && cp->ace == 0) {
197 ZFS_LOG(1, "Destroyed consumer to %s.", cp->provider->name);
199 g_access(cp, 0, -cp->acw, 0);
201 g_destroy_consumer(cp);
203 /* Destroy geom if there are no consumers left. */
204 if (LIST_EMPTY(&gp->consumer)) {
205 ZFS_LOG(1, "Destroyed geom %s.", gp->name);
206 g_wither_geom(gp, ENXIO);
211 nvlist_get_guids(nvlist_t *list, uint64_t *pguid, uint64_t *vguid)
214 (void) nvlist_lookup_uint64(list, ZPOOL_CONFIG_GUID, vguid);
215 (void) nvlist_lookup_uint64(list, ZPOOL_CONFIG_POOL_GUID, pguid);
219 vdev_geom_io(struct g_consumer *cp, int cmd, void *data, off_t offset, off_t size)
226 ASSERT((offset % cp->provider->sectorsize) == 0);
227 ASSERT((size % cp->provider->sectorsize) == 0);
233 maxio = MAXPHYS - (MAXPHYS % cp->provider->sectorsize);
236 for (; off < offset; off += maxio, p += maxio, size -= maxio) {
237 bzero(bp, sizeof(*bp));
240 bp->bio_offset = off;
241 bp->bio_length = MIN(size, maxio);
243 g_io_request(bp, cp);
244 error = biowait(bp, "vdev_geom_io");
254 vdev_geom_taste_orphan(struct g_consumer *cp)
257 KASSERT(1 == 0, ("%s called while tasting %s.", __func__,
258 cp->provider->name));
262 vdev_geom_read_config(struct g_consumer *cp, nvlist_t **config)
264 struct g_provider *pp;
273 g_topology_assert_not();
276 ZFS_LOG(1, "Reading config from %s...", pp->name);
278 psize = pp->mediasize;
279 psize = P2ALIGN(psize, (uint64_t)sizeof(vdev_label_t));
281 size = sizeof(*label) + pp->sectorsize -
282 ((sizeof(*label) - 1) % pp->sectorsize) - 1;
284 label = kmem_alloc(size, KM_SLEEP);
285 buflen = sizeof(label->vl_vdev_phys.vp_nvlist);
288 for (l = 0; l < VDEV_LABELS; l++) {
290 offset = vdev_label_offset(psize, l, 0);
291 if ((offset % pp->sectorsize) != 0)
294 if (vdev_geom_io(cp, BIO_READ, label, offset, size) != 0)
296 buf = label->vl_vdev_phys.vp_nvlist;
298 if (nvlist_unpack(buf, buflen, config, 0) != 0)
301 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
302 &state) != 0 || state > POOL_STATE_L2CACHE) {
303 nvlist_free(*config);
308 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
309 (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
310 &txg) != 0 || txg == 0)) {
311 nvlist_free(*config);
319 kmem_free(label, size);
320 return (*config == NULL ? ENOENT : 0);
324 resize_configs(nvlist_t ***configs, uint64_t *count, uint64_t id)
326 nvlist_t **new_configs;
331 new_configs = kmem_zalloc((id + 1) * sizeof(nvlist_t *),
333 for (i = 0; i < *count; i++)
334 new_configs[i] = (*configs)[i];
335 if (*configs != NULL)
336 kmem_free(*configs, *count * sizeof(void *));
337 *configs = new_configs;
342 process_vdev_config(nvlist_t ***configs, uint64_t *count, nvlist_t *cfg,
343 const char *name, uint64_t* known_pool_guid)
347 uint64_t vdev_guid, known_guid;
348 uint64_t id, txg, known_txg;
352 if (nvlist_lookup_string(cfg, ZPOOL_CONFIG_POOL_NAME, &pname) != 0 ||
353 strcmp(pname, name) != 0)
356 if (nvlist_lookup_uint64(cfg, ZPOOL_CONFIG_POOL_GUID, &pool_guid) != 0)
359 if (nvlist_lookup_uint64(cfg, ZPOOL_CONFIG_TOP_GUID, &vdev_guid) != 0)
362 if (nvlist_lookup_nvlist(cfg, ZPOOL_CONFIG_VDEV_TREE, &vdev_tree) != 0)
365 if (nvlist_lookup_uint64(vdev_tree, ZPOOL_CONFIG_ID, &id) != 0)
368 VERIFY(nvlist_lookup_uint64(cfg, ZPOOL_CONFIG_POOL_TXG, &txg) == 0);
370 if (*known_pool_guid != 0) {
371 if (pool_guid != *known_pool_guid)
374 *known_pool_guid = pool_guid;
376 resize_configs(configs, count, id);
378 if ((*configs)[id] != NULL) {
379 VERIFY(nvlist_lookup_uint64((*configs)[id],
380 ZPOOL_CONFIG_POOL_TXG, &known_txg) == 0);
381 if (txg <= known_txg)
383 nvlist_free((*configs)[id]);
386 (*configs)[id] = cfg;
394 vdev_geom_attach_taster(struct g_consumer *cp, struct g_provider *pp)
398 if (pp->flags & G_PF_WITHER)
401 error = g_access(cp, 1, 0, 0);
403 if (pp->sectorsize > VDEV_PAD_SIZE || !ISP2(pp->sectorsize))
405 else if (pp->mediasize < SPA_MINDEVSIZE)
408 g_access(cp, -1, 0, 0);
416 vdev_geom_detach_taster(struct g_consumer *cp)
418 g_access(cp, -1, 0, 0);
423 vdev_geom_read_pool_label(const char *name,
424 nvlist_t ***configs, uint64_t *count)
427 struct g_geom *gp, *zgp;
428 struct g_provider *pp;
429 struct g_consumer *zcp;
437 zgp = g_new_geomf(&zfs_vdev_class, "zfs::vdev::taste");
438 /* This orphan function should be never called. */
439 zgp->orphan = vdev_geom_taste_orphan;
440 zcp = g_new_consumer(zgp);
445 LIST_FOREACH(mp, &g_classes, class) {
446 if (mp == &zfs_vdev_class)
448 LIST_FOREACH(gp, &mp->geom, geom) {
449 if (gp->flags & G_GEOM_WITHER)
451 LIST_FOREACH(pp, &gp->provider, provider) {
452 if (pp->flags & G_PF_WITHER)
454 if (vdev_geom_attach_taster(zcp, pp) != 0)
457 error = vdev_geom_read_config(zcp, &vdev_cfg);
459 vdev_geom_detach_taster(zcp);
462 ZFS_LOG(1, "successfully read vdev config");
464 process_vdev_config(configs, count,
465 vdev_cfg, name, &pool_guid);
470 g_destroy_consumer(zcp);
475 return (*count > 0 ? 0 : ENOENT);
479 vdev_geom_read_guids(struct g_consumer *cp, uint64_t *pguid, uint64_t *vguid)
483 g_topology_assert_not();
487 if (vdev_geom_read_config(cp, &config) == 0) {
488 nvlist_get_guids(config, pguid, vguid);
493 static struct g_consumer *
494 vdev_geom_attach_by_guids(uint64_t pool_guid, uint64_t vdev_guid)
497 struct g_geom *gp, *zgp;
498 struct g_provider *pp;
499 struct g_consumer *cp, *zcp;
500 uint64_t pguid, vguid;
504 zgp = g_new_geomf(&zfs_vdev_class, "zfs::vdev::taste");
505 /* This orphan function should be never called. */
506 zgp->orphan = vdev_geom_taste_orphan;
507 zcp = g_new_consumer(zgp);
510 LIST_FOREACH(mp, &g_classes, class) {
511 if (mp == &zfs_vdev_class)
513 LIST_FOREACH(gp, &mp->geom, geom) {
514 if (gp->flags & G_GEOM_WITHER)
516 LIST_FOREACH(pp, &gp->provider, provider) {
517 if (vdev_geom_attach_taster(zcp, pp) != 0)
520 vdev_geom_read_guids(zcp, &pguid, &vguid);
522 vdev_geom_detach_taster(zcp);
524 * Check that the label's vdev guid matches the
525 * desired guid. If the label has a pool guid,
526 * check that it matches too. (Inactive spares
527 * and L2ARCs do not have any pool guid in the
531 pguid != pool_guid) ||
534 cp = vdev_geom_attach(pp);
536 printf("ZFS WARNING: Unable to "
537 "attach to %s.\n", pp->name);
549 g_destroy_consumer(zcp);
554 static struct g_consumer *
555 vdev_geom_open_by_guids(vdev_t *vd)
557 struct g_consumer *cp;
563 ZFS_LOG(1, "Searching by guid [%ju].", (uintmax_t)vd->vdev_guid);
564 cp = vdev_geom_attach_by_guids(spa_guid(vd->vdev_spa), vd->vdev_guid);
566 len = strlen(cp->provider->name) + strlen("/dev/") + 1;
567 buf = kmem_alloc(len, KM_SLEEP);
569 snprintf(buf, len, "/dev/%s", cp->provider->name);
570 spa_strfree(vd->vdev_path);
573 ZFS_LOG(1, "Attach by guid [%ju:%ju] succeeded, provider %s.",
574 (uintmax_t)spa_guid(vd->vdev_spa),
575 (uintmax_t)vd->vdev_guid, vd->vdev_path);
577 ZFS_LOG(1, "Search by guid [%ju:%ju] failed.",
578 (uintmax_t)spa_guid(vd->vdev_spa),
579 (uintmax_t)vd->vdev_guid);
585 static struct g_consumer *
586 vdev_geom_open_by_path(vdev_t *vd, int check_guid)
588 struct g_provider *pp;
589 struct g_consumer *cp;
590 uint64_t pguid, vguid;
595 pp = g_provider_by_name(vd->vdev_path + sizeof("/dev/") - 1);
597 ZFS_LOG(1, "Found provider by name %s.", vd->vdev_path);
598 cp = vdev_geom_attach(pp);
599 if (cp != NULL && check_guid && ISP2(pp->sectorsize) &&
600 pp->sectorsize <= VDEV_PAD_SIZE) {
602 vdev_geom_read_guids(cp, &pguid, &vguid);
604 if (pguid != spa_guid(vd->vdev_spa) ||
605 vguid != vd->vdev_guid) {
606 vdev_geom_detach(cp, 0);
608 ZFS_LOG(1, "guid mismatch for provider %s: "
609 "%ju:%ju != %ju:%ju.", vd->vdev_path,
610 (uintmax_t)spa_guid(vd->vdev_spa),
611 (uintmax_t)vd->vdev_guid,
612 (uintmax_t)pguid, (uintmax_t)vguid);
614 ZFS_LOG(1, "guid match for provider %s.",
624 vdev_geom_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
625 uint64_t *logical_ashift, uint64_t *physical_ashift)
627 struct g_provider *pp;
628 struct g_consumer *cp;
633 * We must have a pathname, and it must be absolute.
635 if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') {
636 vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
646 if (vd->vdev_spa->spa_splitting_newspa ||
647 (vd->vdev_prevstate == VDEV_STATE_UNKNOWN &&
648 vd->vdev_spa->spa_load_state == SPA_LOAD_NONE)) {
650 * We are dealing with a vdev that hasn't been previously
651 * opened (since boot), and we are not loading an
652 * existing pool configuration. This looks like a
653 * vdev add operation to a new or existing pool.
654 * Assume the user knows what he/she is doing and find
655 * GEOM provider by its name, ignoring GUID mismatches.
657 * XXPOLICY: It would be safer to only allow a device
658 * that is unlabeled or labeled but missing
659 * GUID information to be opened in this fashion,
660 * unless we are doing a split, in which case we
661 * should allow any guid.
663 cp = vdev_geom_open_by_path(vd, 0);
666 * Try using the recorded path for this device, but only
667 * accept it if its label data contains the expected GUIDs.
669 cp = vdev_geom_open_by_path(vd, 1);
672 * The device at vd->vdev_path doesn't have the
673 * expected GUIDs. The disks might have merely
674 * moved around so try all other GEOM providers
675 * to find one with the right GUIDs.
677 cp = vdev_geom_open_by_guids(vd);
682 ZFS_LOG(1, "Provider %s not found.", vd->vdev_path);
684 } else if (cp->provider->sectorsize > VDEV_PAD_SIZE ||
685 !ISP2(cp->provider->sectorsize)) {
686 ZFS_LOG(1, "Provider %s has unsupported sectorsize.",
688 vdev_geom_detach(cp, 0);
691 } else if (cp->acw == 0 && (spa_mode(vd->vdev_spa) & FWRITE) != 0) {
694 for (i = 0; i < 5; i++) {
695 error = g_access(cp, 0, 1, 0);
699 tsleep(vd, 0, "vdev", hz / 2);
703 printf("ZFS WARNING: Unable to open %s for writing (error=%d).\n",
704 vd->vdev_path, error);
705 vdev_geom_detach(cp, 0);
712 vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
721 * Determine the actual size of the device.
723 *max_psize = *psize = pp->mediasize;
726 * Determine the device's minimum transfer size and preferred
729 *logical_ashift = highbit(MAX(pp->sectorsize, SPA_MINBLOCKSIZE)) - 1;
730 *physical_ashift = 0;
732 *physical_ashift = highbit(pp->stripesize) - 1;
735 * Clear the nowritecache settings, so that on a vdev_reopen()
738 vd->vdev_nowritecache = B_FALSE;
740 if (vd->vdev_physpath != NULL)
741 spa_strfree(vd->vdev_physpath);
742 bufsize = sizeof("/dev/") + strlen(pp->name);
743 vd->vdev_physpath = kmem_alloc(bufsize, KM_SLEEP);
744 snprintf(vd->vdev_physpath, bufsize, "/dev/%s", pp->name);
747 * Determine the device's rotation rate.
749 vdev_geom_set_rotation_rate(vd, cp);
755 vdev_geom_close(vdev_t *vd)
757 struct g_consumer *cp;
763 vd->vdev_delayed_close = B_FALSE;
764 cp->private = NULL; /* XXX locking */
765 g_post_event(vdev_geom_detach, cp, M_WAITOK, NULL);
769 vdev_geom_io_intr(struct bio *bp)
774 zio = bp->bio_caller1;
776 zio->io_error = bp->bio_error;
777 if (zio->io_error == 0 && bp->bio_resid != 0)
778 zio->io_error = SET_ERROR(EIO);
780 switch(zio->io_error) {
783 * If we get ENOTSUP for BIO_FLUSH or BIO_DELETE we know
784 * that future attempts will never succeed. In this case
785 * we set a persistent flag so that we don't bother with
786 * requests in the future.
788 switch(bp->bio_cmd) {
790 vd->vdev_nowritecache = B_TRUE;
793 vd->vdev_notrim = B_TRUE;
798 if (!vd->vdev_remove_wanted) {
800 * If provider's error is set we assume it is being
803 if (bp->bio_to->error != 0) {
804 vd->vdev_remove_wanted = B_TRUE;
805 spa_async_request(zio->io_spa,
807 } else if (!vd->vdev_delayed_close) {
808 vd->vdev_delayed_close = B_TRUE;
818 vdev_geom_io_start(zio_t *zio)
821 struct g_consumer *cp;
827 switch (zio->io_type) {
830 if (!vdev_readable(vd)) {
831 zio->io_error = SET_ERROR(ENXIO);
833 switch (zio->io_cmd) {
834 case DKIOCFLUSHWRITECACHE:
835 if (zfs_nocacheflush || vdev_geom_bio_flush_disable)
837 if (vd->vdev_nowritecache) {
838 zio->io_error = SET_ERROR(ENOTSUP);
843 zio->io_error = SET_ERROR(ENOTSUP);
848 return (ZIO_PIPELINE_STOP);
850 if (vd->vdev_notrim) {
851 zio->io_error = SET_ERROR(ENOTSUP);
852 } else if (!vdev_geom_bio_delete_disable) {
856 return (ZIO_PIPELINE_STOP);
859 ASSERT(zio->io_type == ZIO_TYPE_READ ||
860 zio->io_type == ZIO_TYPE_WRITE ||
861 zio->io_type == ZIO_TYPE_FREE ||
862 zio->io_type == ZIO_TYPE_IOCTL);
866 zio->io_error = SET_ERROR(ENXIO);
868 return (ZIO_PIPELINE_STOP);
871 bp->bio_caller1 = zio;
872 switch (zio->io_type) {
875 bp->bio_cmd = zio->io_type == ZIO_TYPE_READ ? BIO_READ : BIO_WRITE;
876 bp->bio_data = zio->io_data;
877 bp->bio_offset = zio->io_offset;
878 bp->bio_length = zio->io_size;
881 bp->bio_cmd = BIO_DELETE;
883 bp->bio_offset = zio->io_offset;
884 bp->bio_length = zio->io_size;
887 bp->bio_cmd = BIO_FLUSH;
888 bp->bio_flags |= BIO_ORDERED;
890 bp->bio_offset = cp->provider->mediasize;
894 bp->bio_done = vdev_geom_io_intr;
896 g_io_request(bp, cp);
898 return (ZIO_PIPELINE_STOP);
902 vdev_geom_io_done(zio_t *zio)
907 vdev_geom_hold(vdev_t *vd)
912 vdev_geom_rele(vdev_t *vd)
916 vdev_ops_t vdev_geom_ops = {
925 VDEV_TYPE_DISK, /* name of this vdev type */
926 B_TRUE /* leaf vdev */