4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
23 * All rights reserved.
25 * Portions Copyright (c) 2012 Martin Matuska <mm@FreeBSD.org>
28 #include <sys/zfs_context.h>
29 #include <sys/param.h>
30 #include <sys/kernel.h>
34 #include <sys/spa_impl.h>
35 #include <sys/vdev_impl.h>
36 #include <sys/fs/zfs.h>
38 #include <geom/geom.h>
39 #include <geom/geom_int.h>
42 * Virtual device vector for GEOM.
45 static g_attrchanged_t vdev_geom_attrchanged;
46 struct g_class zfs_vdev_class = {
49 .attrchanged = vdev_geom_attrchanged,
52 DECLARE_GEOM_CLASS(zfs_vdev_class, zfs_vdev);
54 SYSCTL_DECL(_vfs_zfs_vdev);
55 /* Don't send BIO_FLUSH. */
56 static int vdev_geom_bio_flush_disable;
57 SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, bio_flush_disable, CTLFLAG_RWTUN,
58 &vdev_geom_bio_flush_disable, 0, "Disable BIO_FLUSH");
59 /* Don't send BIO_DELETE. */
60 static int vdev_geom_bio_delete_disable;
61 SYSCTL_INT(_vfs_zfs_vdev, OID_AUTO, bio_delete_disable, CTLFLAG_RWTUN,
62 &vdev_geom_bio_delete_disable, 0, "Disable BIO_DELETE");
65 vdev_geom_set_rotation_rate(vdev_t *vd, struct g_consumer *cp)
70 error = g_getattr("GEOM::rotation_rate", cp, &rate);
72 vd->vdev_rotation_rate = rate;
74 vd->vdev_rotation_rate = VDEV_RATE_UNKNOWN;
78 vdev_geom_attrchanged(struct g_consumer *cp, const char *attr)
86 if (strcmp(attr, "GEOM::rotation_rate") == 0) {
87 vdev_geom_set_rotation_rate(vd, cp);
93 vdev_geom_orphan(struct g_consumer *cp)
104 * Orphan callbacks occur from the GEOM event thread.
105 * Concurrent with this call, new I/O requests may be
106 * working their way through GEOM about to find out
107 * (only once executed by the g_down thread) that we've
108 * been orphaned from our disk provider. These I/Os
109 * must be retired before we can detach our consumer.
110 * This is most easily achieved by acquiring the
111 * SPA ZIO configuration lock as a writer, but doing
112 * so with the GEOM topology lock held would cause
113 * a lock order reversal. Instead, rely on the SPA's
114 * async removal support to invoke a close on this
115 * vdev once it is safe to do so.
117 zfs_post_remove(vd->vdev_spa, vd);
118 vd->vdev_remove_wanted = B_TRUE;
119 spa_async_request(vd->vdev_spa, SPA_ASYNC_REMOVE);
122 static struct g_consumer *
123 vdev_geom_attach(struct g_provider *pp)
126 struct g_consumer *cp;
130 ZFS_LOG(1, "Attaching to %s.", pp->name);
131 /* Do we have geom already? No? Create one. */
132 LIST_FOREACH(gp, &zfs_vdev_class.geom, geom) {
133 if (gp->flags & G_GEOM_WITHER)
135 if (strcmp(gp->name, "zfs::vdev") != 0)
140 gp = g_new_geomf(&zfs_vdev_class, "zfs::vdev");
141 gp->orphan = vdev_geom_orphan;
142 cp = g_new_consumer(gp);
143 if (g_attach(cp, pp) != 0) {
144 g_wither_geom(gp, ENXIO);
147 if (g_access(cp, 1, 0, 1) != 0) {
148 g_wither_geom(gp, ENXIO);
151 ZFS_LOG(1, "Created geom and consumer for %s.", pp->name);
153 /* Check if we are already connected to this provider. */
154 LIST_FOREACH(cp, &gp->consumer, consumer) {
155 if (cp->provider == pp) {
156 ZFS_LOG(1, "Found consumer for %s.", pp->name);
161 cp = g_new_consumer(gp);
162 if (g_attach(cp, pp) != 0) {
163 g_destroy_consumer(cp);
166 if (g_access(cp, 1, 0, 1) != 0) {
168 g_destroy_consumer(cp);
171 ZFS_LOG(1, "Created consumer for %s.", pp->name);
173 if (g_access(cp, 1, 0, 1) != 0)
175 ZFS_LOG(1, "Used existing consumer for %s.", pp->name);
178 cp->flags |= G_CF_DIRECT_SEND | G_CF_DIRECT_RECEIVE;
183 vdev_geom_detach(void *arg, int flag __unused)
186 struct g_consumer *cp;
192 ZFS_LOG(1, "Closing access to %s.", cp->provider->name);
193 g_access(cp, -1, 0, -1);
194 /* Destroy consumer on last close. */
195 if (cp->acr == 0 && cp->ace == 0) {
196 ZFS_LOG(1, "Destroyed consumer to %s.", cp->provider->name);
198 g_access(cp, 0, -cp->acw, 0);
200 g_destroy_consumer(cp);
202 /* Destroy geom if there are no consumers left. */
203 if (LIST_EMPTY(&gp->consumer)) {
204 ZFS_LOG(1, "Destroyed geom %s.", gp->name);
205 g_wither_geom(gp, ENXIO);
210 nvlist_get_guids(nvlist_t *list, uint64_t *pguid, uint64_t *vguid)
213 nvlist_lookup_uint64(list, ZPOOL_CONFIG_GUID, vguid);
214 nvlist_lookup_uint64(list, ZPOOL_CONFIG_POOL_GUID, pguid);
218 vdev_geom_io(struct g_consumer *cp, int cmd, void *data, off_t offset, off_t size)
225 ASSERT((offset % cp->provider->sectorsize) == 0);
226 ASSERT((size % cp->provider->sectorsize) == 0);
232 maxio = MAXPHYS - (MAXPHYS % cp->provider->sectorsize);
235 for (; off < offset; off += maxio, p += maxio, size -= maxio) {
236 bzero(bp, sizeof(*bp));
239 bp->bio_offset = off;
240 bp->bio_length = MIN(size, maxio);
242 g_io_request(bp, cp);
243 error = biowait(bp, "vdev_geom_io");
253 vdev_geom_taste_orphan(struct g_consumer *cp)
256 KASSERT(1 == 0, ("%s called while tasting %s.", __func__,
257 cp->provider->name));
261 vdev_geom_read_config(struct g_consumer *cp, nvlist_t **config)
263 struct g_provider *pp;
272 g_topology_assert_not();
275 ZFS_LOG(1, "Reading config from %s...", pp->name);
277 psize = pp->mediasize;
278 psize = P2ALIGN(psize, (uint64_t)sizeof(vdev_label_t));
280 size = sizeof(*label) + pp->sectorsize -
281 ((sizeof(*label) - 1) % pp->sectorsize) - 1;
283 label = kmem_alloc(size, KM_SLEEP);
284 buflen = sizeof(label->vl_vdev_phys.vp_nvlist);
287 for (l = 0; l < VDEV_LABELS; l++) {
289 offset = vdev_label_offset(psize, l, 0);
290 if ((offset % pp->sectorsize) != 0)
293 if (vdev_geom_io(cp, BIO_READ, label, offset, size) != 0)
295 buf = label->vl_vdev_phys.vp_nvlist;
297 if (nvlist_unpack(buf, buflen, config, 0) != 0)
300 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
301 &state) != 0 || state > POOL_STATE_L2CACHE) {
302 nvlist_free(*config);
307 if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
308 (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
309 &txg) != 0 || txg == 0)) {
310 nvlist_free(*config);
318 kmem_free(label, size);
319 return (*config == NULL ? ENOENT : 0);
323 resize_configs(nvlist_t ***configs, uint64_t *count, uint64_t id)
325 nvlist_t **new_configs;
330 new_configs = kmem_zalloc((id + 1) * sizeof(nvlist_t *),
332 for (i = 0; i < *count; i++)
333 new_configs[i] = (*configs)[i];
334 if (*configs != NULL)
335 kmem_free(*configs, *count * sizeof(void *));
336 *configs = new_configs;
341 process_vdev_config(nvlist_t ***configs, uint64_t *count, nvlist_t *cfg,
342 const char *name, uint64_t* known_pool_guid)
346 uint64_t vdev_guid, known_guid;
347 uint64_t id, txg, known_txg;
351 if (nvlist_lookup_string(cfg, ZPOOL_CONFIG_POOL_NAME, &pname) != 0 ||
352 strcmp(pname, name) != 0)
355 if (nvlist_lookup_uint64(cfg, ZPOOL_CONFIG_POOL_GUID, &pool_guid) != 0)
358 if (nvlist_lookup_uint64(cfg, ZPOOL_CONFIG_TOP_GUID, &vdev_guid) != 0)
361 if (nvlist_lookup_nvlist(cfg, ZPOOL_CONFIG_VDEV_TREE, &vdev_tree) != 0)
364 if (nvlist_lookup_uint64(vdev_tree, ZPOOL_CONFIG_ID, &id) != 0)
367 VERIFY(nvlist_lookup_uint64(cfg, ZPOOL_CONFIG_POOL_TXG, &txg) == 0);
369 if (*known_pool_guid != 0) {
370 if (pool_guid != *known_pool_guid)
373 *known_pool_guid = pool_guid;
375 resize_configs(configs, count, id);
377 if ((*configs)[id] != NULL) {
378 VERIFY(nvlist_lookup_uint64((*configs)[id],
379 ZPOOL_CONFIG_POOL_TXG, &known_txg) == 0);
380 if (txg <= known_txg)
382 nvlist_free((*configs)[id]);
385 (*configs)[id] = cfg;
393 vdev_geom_attach_taster(struct g_consumer *cp, struct g_provider *pp)
397 if (pp->flags & G_PF_WITHER)
400 error = g_access(cp, 1, 0, 0);
402 if (pp->sectorsize > VDEV_PAD_SIZE || !ISP2(pp->sectorsize))
404 else if (pp->mediasize < SPA_MINDEVSIZE)
407 g_access(cp, -1, 0, 0);
415 vdev_geom_detach_taster(struct g_consumer *cp)
417 g_access(cp, -1, 0, 0);
422 vdev_geom_read_pool_label(const char *name,
423 nvlist_t ***configs, uint64_t *count)
426 struct g_geom *gp, *zgp;
427 struct g_provider *pp;
428 struct g_consumer *zcp;
436 zgp = g_new_geomf(&zfs_vdev_class, "zfs::vdev::taste");
437 /* This orphan function should be never called. */
438 zgp->orphan = vdev_geom_taste_orphan;
439 zcp = g_new_consumer(zgp);
444 LIST_FOREACH(mp, &g_classes, class) {
445 if (mp == &zfs_vdev_class)
447 LIST_FOREACH(gp, &mp->geom, geom) {
448 if (gp->flags & G_GEOM_WITHER)
450 LIST_FOREACH(pp, &gp->provider, provider) {
451 if (pp->flags & G_PF_WITHER)
453 if (vdev_geom_attach_taster(zcp, pp) != 0)
456 error = vdev_geom_read_config(zcp, &vdev_cfg);
458 vdev_geom_detach_taster(zcp);
461 ZFS_LOG(1, "successfully read vdev config");
463 process_vdev_config(configs, count,
464 vdev_cfg, name, &pool_guid);
469 g_destroy_consumer(zcp);
474 return (*count > 0 ? 0 : ENOENT);
478 vdev_geom_read_guids(struct g_consumer *cp, uint64_t *pguid, uint64_t *vguid)
482 g_topology_assert_not();
486 if (vdev_geom_read_config(cp, &config) == 0) {
487 nvlist_get_guids(config, pguid, vguid);
492 static struct g_consumer *
493 vdev_geom_attach_by_guids(uint64_t pool_guid, uint64_t vdev_guid)
496 struct g_geom *gp, *zgp;
497 struct g_provider *pp;
498 struct g_consumer *cp, *zcp;
499 uint64_t pguid, vguid;
503 zgp = g_new_geomf(&zfs_vdev_class, "zfs::vdev::taste");
504 /* This orphan function should be never called. */
505 zgp->orphan = vdev_geom_taste_orphan;
506 zcp = g_new_consumer(zgp);
509 LIST_FOREACH(mp, &g_classes, class) {
510 if (mp == &zfs_vdev_class)
512 LIST_FOREACH(gp, &mp->geom, geom) {
513 if (gp->flags & G_GEOM_WITHER)
515 LIST_FOREACH(pp, &gp->provider, provider) {
516 if (vdev_geom_attach_taster(zcp, pp) != 0)
519 vdev_geom_read_guids(zcp, &pguid, &vguid);
521 vdev_geom_detach_taster(zcp);
522 if (pguid != pool_guid || vguid != vdev_guid)
524 cp = vdev_geom_attach(pp);
526 printf("ZFS WARNING: Unable to "
527 "attach to %s.\n", pp->name);
539 g_destroy_consumer(zcp);
544 static struct g_consumer *
545 vdev_geom_open_by_guids(vdev_t *vd)
547 struct g_consumer *cp;
553 ZFS_LOG(1, "Searching by guid [%ju].", (uintmax_t)vd->vdev_guid);
554 cp = vdev_geom_attach_by_guids(spa_guid(vd->vdev_spa), vd->vdev_guid);
556 len = strlen(cp->provider->name) + strlen("/dev/") + 1;
557 buf = kmem_alloc(len, KM_SLEEP);
559 snprintf(buf, len, "/dev/%s", cp->provider->name);
560 spa_strfree(vd->vdev_path);
563 ZFS_LOG(1, "Attach by guid [%ju:%ju] succeeded, provider %s.",
564 (uintmax_t)spa_guid(vd->vdev_spa),
565 (uintmax_t)vd->vdev_guid, vd->vdev_path);
567 ZFS_LOG(1, "Search by guid [%ju:%ju] failed.",
568 (uintmax_t)spa_guid(vd->vdev_spa),
569 (uintmax_t)vd->vdev_guid);
575 static struct g_consumer *
576 vdev_geom_open_by_path(vdev_t *vd, int check_guid)
578 struct g_provider *pp;
579 struct g_consumer *cp;
580 uint64_t pguid, vguid;
585 pp = g_provider_by_name(vd->vdev_path + sizeof("/dev/") - 1);
587 ZFS_LOG(1, "Found provider by name %s.", vd->vdev_path);
588 cp = vdev_geom_attach(pp);
589 if (cp != NULL && check_guid && ISP2(pp->sectorsize) &&
590 pp->sectorsize <= VDEV_PAD_SIZE) {
592 vdev_geom_read_guids(cp, &pguid, &vguid);
594 if (pguid != spa_guid(vd->vdev_spa) ||
595 vguid != vd->vdev_guid) {
596 vdev_geom_detach(cp, 0);
598 ZFS_LOG(1, "guid mismatch for provider %s: "
599 "%ju:%ju != %ju:%ju.", vd->vdev_path,
600 (uintmax_t)spa_guid(vd->vdev_spa),
601 (uintmax_t)vd->vdev_guid,
602 (uintmax_t)pguid, (uintmax_t)vguid);
604 ZFS_LOG(1, "guid match for provider %s.",
614 vdev_geom_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
615 uint64_t *logical_ashift, uint64_t *physical_ashift)
617 struct g_provider *pp;
618 struct g_consumer *cp;
623 * We must have a pathname, and it must be absolute.
625 if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') {
626 vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
636 if (vd->vdev_spa->spa_splitting_newspa ||
637 (vd->vdev_prevstate == VDEV_STATE_UNKNOWN &&
638 vd->vdev_spa->spa_load_state == SPA_LOAD_NONE)) {
640 * We are dealing with a vdev that hasn't been previously
641 * opened (since boot), and we are not loading an
642 * existing pool configuration. This looks like a
643 * vdev add operation to a new or existing pool.
644 * Assume the user knows what he/she is doing and find
645 * GEOM provider by its name, ignoring GUID mismatches.
647 * XXPOLICY: It would be safer to only allow a device
648 * that is unlabeled or labeled but missing
649 * GUID information to be opened in this fashion,
650 * unless we are doing a split, in which case we
651 * should allow any guid.
653 cp = vdev_geom_open_by_path(vd, 0);
656 * Try using the recorded path for this device, but only
657 * accept it if its label data contains the expected GUIDs.
659 cp = vdev_geom_open_by_path(vd, 1);
662 * The device at vd->vdev_path doesn't have the
663 * expected GUIDs. The disks might have merely
664 * moved around so try all other GEOM providers
665 * to find one with the right GUIDs.
667 cp = vdev_geom_open_by_guids(vd);
672 ZFS_LOG(1, "Provider %s not found.", vd->vdev_path);
674 } else if (cp->provider->sectorsize > VDEV_PAD_SIZE ||
675 !ISP2(cp->provider->sectorsize)) {
676 ZFS_LOG(1, "Provider %s has unsupported sectorsize.",
678 vdev_geom_detach(cp, 0);
681 } else if (cp->acw == 0 && (spa_mode(vd->vdev_spa) & FWRITE) != 0) {
684 for (i = 0; i < 5; i++) {
685 error = g_access(cp, 0, 1, 0);
689 tsleep(vd, 0, "vdev", hz / 2);
693 printf("ZFS WARNING: Unable to open %s for writing (error=%d).\n",
694 vd->vdev_path, error);
695 vdev_geom_detach(cp, 0);
702 vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
711 * Determine the actual size of the device.
713 *max_psize = *psize = pp->mediasize;
716 * Determine the device's minimum transfer size and preferred
719 *logical_ashift = highbit(MAX(pp->sectorsize, SPA_MINBLOCKSIZE)) - 1;
720 *physical_ashift = 0;
722 *physical_ashift = highbit(pp->stripesize) - 1;
725 * Clear the nowritecache settings, so that on a vdev_reopen()
728 vd->vdev_nowritecache = B_FALSE;
730 if (vd->vdev_physpath != NULL)
731 spa_strfree(vd->vdev_physpath);
732 bufsize = sizeof("/dev/") + strlen(pp->name);
733 vd->vdev_physpath = kmem_alloc(bufsize, KM_SLEEP);
734 snprintf(vd->vdev_physpath, bufsize, "/dev/%s", pp->name);
737 * Determine the device's rotation rate.
739 vdev_geom_set_rotation_rate(vd, cp);
745 vdev_geom_close(vdev_t *vd)
747 struct g_consumer *cp;
753 vd->vdev_delayed_close = B_FALSE;
754 cp->private = NULL; /* XXX locking */
755 g_post_event(vdev_geom_detach, cp, M_WAITOK, NULL);
759 vdev_geom_io_intr(struct bio *bp)
764 zio = bp->bio_caller1;
766 zio->io_error = bp->bio_error;
767 if (zio->io_error == 0 && bp->bio_resid != 0)
768 zio->io_error = SET_ERROR(EIO);
770 switch(zio->io_error) {
773 * If we get ENOTSUP for BIO_FLUSH or BIO_DELETE we know
774 * that future attempts will never succeed. In this case
775 * we set a persistent flag so that we don't bother with
776 * requests in the future.
778 switch(bp->bio_cmd) {
780 vd->vdev_nowritecache = B_TRUE;
783 vd->vdev_notrim = B_TRUE;
788 if (!vd->vdev_remove_wanted) {
790 * If provider's error is set we assume it is being
793 if (bp->bio_to->error != 0) {
794 vd->vdev_remove_wanted = B_TRUE;
795 spa_async_request(zio->io_spa,
797 } else if (!vd->vdev_delayed_close) {
798 vd->vdev_delayed_close = B_TRUE;
808 vdev_geom_io_start(zio_t *zio)
811 struct g_consumer *cp;
817 switch (zio->io_type) {
820 if (!vdev_readable(vd)) {
821 zio->io_error = SET_ERROR(ENXIO);
825 switch (zio->io_cmd) {
826 case DKIOCFLUSHWRITECACHE:
827 if (zfs_nocacheflush || vdev_geom_bio_flush_disable)
829 if (vd->vdev_nowritecache) {
830 zio->io_error = SET_ERROR(ENOTSUP);
835 zio->io_error = SET_ERROR(ENOTSUP);
842 if (vd->vdev_notrim) {
843 zio->io_error = SET_ERROR(ENOTSUP);
844 } else if (!vdev_geom_bio_delete_disable) {
851 ASSERT(zio->io_type == ZIO_TYPE_READ ||
852 zio->io_type == ZIO_TYPE_WRITE ||
853 zio->io_type == ZIO_TYPE_FREE ||
854 zio->io_type == ZIO_TYPE_IOCTL);
858 zio->io_error = SET_ERROR(ENXIO);
863 bp->bio_caller1 = zio;
864 switch (zio->io_type) {
867 bp->bio_cmd = zio->io_type == ZIO_TYPE_READ ? BIO_READ : BIO_WRITE;
868 bp->bio_data = zio->io_data;
869 bp->bio_offset = zio->io_offset;
870 bp->bio_length = zio->io_size;
873 bp->bio_cmd = BIO_DELETE;
875 bp->bio_offset = zio->io_offset;
876 bp->bio_length = zio->io_size;
879 bp->bio_cmd = BIO_FLUSH;
880 bp->bio_flags |= BIO_ORDERED;
882 bp->bio_offset = cp->provider->mediasize;
886 bp->bio_done = vdev_geom_io_intr;
888 g_io_request(bp, cp);
892 vdev_geom_io_done(zio_t *zio)
897 vdev_geom_hold(vdev_t *vd)
902 vdev_geom_rele(vdev_t *vd)
906 vdev_ops_t vdev_geom_ops = {
915 VDEV_TYPE_DISK, /* name of this vdev type */
916 B_TRUE /* leaf vdev */