4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2006-2010 Pawel Jakub Dawidek <pjd@FreeBSD.org>
25 * All rights reserved.
26 * Copyright (c) 2013 by Delphix. All rights reserved.
27 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
30 /* Portions Copyright 2010 Robert Milkowski */
31 /* Portions Copyright 2011 Martin Matuska <mm@FreeBSD.org> */
34 * ZFS volume emulation driver.
36 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
37 * Volumes are accessed through the symbolic links named:
39 * /dev/zvol/dsk/<pool_name>/<dataset_name>
40 * /dev/zvol/rdsk/<pool_name>/<dataset_name>
42 * These links are created by the /dev filesystem (sdev_zvolops.c).
43 * Volumes are persistent through reboot. No user command needs to be
44 * run before opening and using a device.
47 * On FreeBSD ZVOLs are simply GEOM providers like any other storage device
51 #include <sys/types.h>
52 #include <sys/param.h>
53 #include <sys/kernel.h>
54 #include <sys/errno.h>
60 #include <sys/cmn_err.h>
64 #include <sys/spa_impl.h>
66 #include <sys/dmu_traverse.h>
67 #include <sys/dnode.h>
68 #include <sys/dsl_dataset.h>
69 #include <sys/dsl_prop.h>
71 #include <sys/byteorder.h>
72 #include <sys/sunddi.h>
73 #include <sys/dirent.h>
74 #include <sys/policy.h>
75 #include <sys/fs/zfs.h>
76 #include <sys/zfs_ioctl.h>
78 #include <sys/refcount.h>
79 #include <sys/zfs_znode.h>
80 #include <sys/zfs_rlock.h>
81 #include <sys/vdev_impl.h>
82 #include <sys/vdev_raidz.h>
84 #include <sys/zil_impl.h>
86 #include <sys/dmu_tx.h>
87 #include <sys/zfeature.h>
88 #include <sys/zio_checksum.h>
90 #include <geom/geom.h>
92 #include "zfs_namecheck.h"
94 struct g_class zfs_zvol_class = {
99 DECLARE_GEOM_CLASS(zfs_zvol_class, zfs_zvol);
102 static char *zvol_tag = "zvol_tag";
104 #define ZVOL_DUMPSIZE "dumpsize"
107 * The spa_namespace_lock protects the zfsdev_state structure from being
108 * modified while it's being used, e.g. an open that comes in before a
109 * create finishes. It also protects temporary opens of the dataset so that,
110 * e.g., an open doesn't get a spurious EBUSY.
112 static uint32_t zvol_minors;
114 typedef struct zvol_extent {
116 dva_t ze_dva; /* dva associated with this extent */
117 uint64_t ze_nblks; /* number of blocks in extent */
121 * The in-core state of each volume.
123 typedef struct zvol_state {
124 char zv_name[MAXPATHLEN]; /* pool/dd name */
125 uint64_t zv_volsize; /* amount of space we advertise */
126 uint64_t zv_volblocksize; /* volume block size */
127 struct g_provider *zv_provider; /* GEOM provider */
128 uint8_t zv_min_bs; /* minimum addressable block shift */
129 uint8_t zv_flags; /* readonly, dumpified, etc. */
130 objset_t *zv_objset; /* objset handle */
131 uint32_t zv_total_opens; /* total open count */
132 zilog_t *zv_zilog; /* ZIL handle */
133 list_t zv_extents; /* List of extents for dump */
134 znode_t zv_znode; /* for range locking */
135 dmu_buf_t *zv_dbuf; /* bonus handle */
137 struct bio_queue_head zv_queue;
138 struct mtx zv_queue_mtx; /* zv_queue mutex */
142 * zvol specific flags
144 #define ZVOL_RDONLY 0x1
145 #define ZVOL_DUMPIFIED 0x2
146 #define ZVOL_EXCL 0x4
150 * zvol maximum transfer in one DMU tx.
152 int zvol_maxphys = DMU_MAX_ACCESS/2;
154 extern int zfs_set_prop_nvlist(const char *, zprop_source_t,
155 nvlist_t *, nvlist_t *);
156 static int zvol_remove_zv(zvol_state_t *);
157 static int zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio);
158 static int zvol_dumpify(zvol_state_t *zv);
159 static int zvol_dump_fini(zvol_state_t *zv);
160 static int zvol_dump_init(zvol_state_t *zv, boolean_t resize);
162 static zvol_state_t *zvol_geom_create(const char *name);
163 static void zvol_geom_run(zvol_state_t *zv);
164 static void zvol_geom_destroy(zvol_state_t *zv);
165 static int zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace);
166 static void zvol_geom_start(struct bio *bp);
167 static void zvol_geom_worker(void *arg);
170 zvol_size_changed(zvol_state_t *zv)
173 dev_t dev = makedevice(maj, min);
175 VERIFY(ddi_prop_update_int64(dev, zfs_dip,
176 "Size", volsize) == DDI_SUCCESS);
177 VERIFY(ddi_prop_update_int64(dev, zfs_dip,
178 "Nblocks", lbtodb(volsize)) == DDI_SUCCESS);
180 /* Notify specfs to invalidate the cached size */
181 spec_size_invalidate(dev, VBLK);
182 spec_size_invalidate(dev, VCHR);
184 struct g_provider *pp;
186 pp = zv->zv_provider;
189 if (zv->zv_volsize == pp->mediasize)
192 * Changing provider size is not really supported by GEOM, but it
193 * should be safe when provider is closed.
195 if (zv->zv_total_opens > 0)
197 pp->mediasize = zv->zv_volsize;
202 zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
205 return (SET_ERROR(EINVAL));
207 if (volsize % blocksize != 0)
208 return (SET_ERROR(EINVAL));
211 if (volsize - 1 > SPEC_MAXOFFSET_T)
212 return (SET_ERROR(EOVERFLOW));
218 zvol_check_volblocksize(uint64_t volblocksize)
220 if (volblocksize < SPA_MINBLOCKSIZE ||
221 volblocksize > SPA_MAXBLOCKSIZE ||
223 return (SET_ERROR(EDOM));
229 zvol_get_stats(objset_t *os, nvlist_t *nv)
232 dmu_object_info_t doi;
235 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
239 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
241 error = dmu_object_info(os, ZVOL_OBJ, &doi);
244 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
245 doi.doi_data_block_size);
251 static zvol_state_t *
252 zvol_minor_lookup(const char *name)
254 struct g_provider *pp;
256 zvol_state_t *zv = NULL;
258 ASSERT(MUTEX_HELD(&spa_namespace_lock));
261 LIST_FOREACH(gp, &zfs_zvol_class.geom, geom) {
262 pp = LIST_FIRST(&gp->provider);
268 if (strcmp(zv->zv_name, name) == 0)
273 return (gp != NULL ? zv : NULL);
276 /* extent mapping arg */
284 zvol_map_block(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
285 const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
287 struct maparg *ma = arg;
289 int bs = ma->ma_zv->zv_volblocksize;
291 if (BP_IS_HOLE(bp) ||
292 zb->zb_object != ZVOL_OBJ || zb->zb_level != 0)
295 VERIFY3U(ma->ma_blks, ==, zb->zb_blkid);
298 /* Abort immediately if we have encountered gang blocks */
300 return (SET_ERROR(EFRAGS));
303 * See if the block is at the end of the previous extent.
305 ze = list_tail(&ma->ma_zv->zv_extents);
307 DVA_GET_VDEV(BP_IDENTITY(bp)) == DVA_GET_VDEV(&ze->ze_dva) &&
308 DVA_GET_OFFSET(BP_IDENTITY(bp)) ==
309 DVA_GET_OFFSET(&ze->ze_dva) + ze->ze_nblks * bs) {
314 dprintf_bp(bp, "%s", "next blkptr:");
316 /* start a new extent */
317 ze = kmem_zalloc(sizeof (zvol_extent_t), KM_SLEEP);
318 ze->ze_dva = bp->blk_dva[0]; /* structure assignment */
320 list_insert_tail(&ma->ma_zv->zv_extents, ze);
325 zvol_free_extents(zvol_state_t *zv)
329 while (ze = list_head(&zv->zv_extents)) {
330 list_remove(&zv->zv_extents, ze);
331 kmem_free(ze, sizeof (zvol_extent_t));
336 zvol_get_lbas(zvol_state_t *zv)
338 objset_t *os = zv->zv_objset;
344 zvol_free_extents(zv);
346 /* commit any in-flight changes before traversing the dataset */
347 txg_wait_synced(dmu_objset_pool(os), 0);
348 err = traverse_dataset(dmu_objset_ds(os), 0,
349 TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, zvol_map_block, &ma);
350 if (err || ma.ma_blks != (zv->zv_volsize / zv->zv_volblocksize)) {
351 zvol_free_extents(zv);
352 return (err ? err : EIO);
360 zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
362 zfs_creat_t *zct = arg;
363 nvlist_t *nvprops = zct->zct_props;
365 uint64_t volblocksize, volsize;
367 VERIFY(nvlist_lookup_uint64(nvprops,
368 zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
369 if (nvlist_lookup_uint64(nvprops,
370 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
371 volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
374 * These properties must be removed from the list so the generic
375 * property setting step won't apply to them.
377 VERIFY(nvlist_remove_all(nvprops,
378 zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
379 (void) nvlist_remove_all(nvprops,
380 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
382 error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
386 error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
390 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
395 * Replay a TX_WRITE ZIL transaction that didn't get committed
396 * after a system failure
399 zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap)
401 objset_t *os = zv->zv_objset;
402 char *data = (char *)(lr + 1); /* data follows lr_write_t */
403 uint64_t offset, length;
408 byteswap_uint64_array(lr, sizeof (*lr));
410 offset = lr->lr_offset;
411 length = lr->lr_length;
413 /* If it's a dmu_sync() block, write the whole block */
414 if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
415 uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
416 if (length < blocksize) {
417 offset -= offset % blocksize;
422 tx = dmu_tx_create(os);
423 dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length);
424 error = dmu_tx_assign(tx, TXG_WAIT);
428 dmu_write(os, ZVOL_OBJ, offset, length, data, tx);
437 zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap)
439 return (SET_ERROR(ENOTSUP));
443 * Callback vectors for replaying records.
444 * Only TX_WRITE is needed for zvol.
446 zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = {
447 zvol_replay_err, /* 0 no such transaction type */
448 zvol_replay_err, /* TX_CREATE */
449 zvol_replay_err, /* TX_MKDIR */
450 zvol_replay_err, /* TX_MKXATTR */
451 zvol_replay_err, /* TX_SYMLINK */
452 zvol_replay_err, /* TX_REMOVE */
453 zvol_replay_err, /* TX_RMDIR */
454 zvol_replay_err, /* TX_LINK */
455 zvol_replay_err, /* TX_RENAME */
456 zvol_replay_write, /* TX_WRITE */
457 zvol_replay_err, /* TX_TRUNCATE */
458 zvol_replay_err, /* TX_SETATTR */
459 zvol_replay_err, /* TX_ACL */
460 zvol_replay_err, /* TX_CREATE_ACL */
461 zvol_replay_err, /* TX_CREATE_ATTR */
462 zvol_replay_err, /* TX_CREATE_ACL_ATTR */
463 zvol_replay_err, /* TX_MKDIR_ACL */
464 zvol_replay_err, /* TX_MKDIR_ATTR */
465 zvol_replay_err, /* TX_MKDIR_ACL_ATTR */
466 zvol_replay_err, /* TX_WRITE2 */
471 zvol_name2minor(const char *name, minor_t *minor)
475 mutex_enter(&spa_namespace_lock);
476 zv = zvol_minor_lookup(name);
478 *minor = zv->zv_minor;
479 mutex_exit(&spa_namespace_lock);
480 return (zv ? 0 : -1);
485 * Create a minor node (plus a whole lot more) for the specified volume.
488 zvol_create_minor(const char *name)
490 zfs_soft_state_t *zs;
493 dmu_object_info_t doi;
497 ZFS_LOG(1, "Creating ZVOL %s...", name);
499 mutex_enter(&spa_namespace_lock);
501 if (zvol_minor_lookup(name) != NULL) {
502 mutex_exit(&spa_namespace_lock);
503 return (SET_ERROR(EEXIST));
506 /* lie and say we're read-only */
507 error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, FTAG, &os);
510 mutex_exit(&spa_namespace_lock);
515 if ((minor = zfsdev_minor_alloc()) == 0) {
516 dmu_objset_disown(os, FTAG);
517 mutex_exit(&spa_namespace_lock);
518 return (SET_ERROR(ENXIO));
521 if (ddi_soft_state_zalloc(zfsdev_state, minor) != DDI_SUCCESS) {
522 dmu_objset_disown(os, FTAG);
523 mutex_exit(&spa_namespace_lock);
524 return (SET_ERROR(EAGAIN));
526 (void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME,
529 (void) snprintf(chrbuf, sizeof (chrbuf), "%u,raw", minor);
531 if (ddi_create_minor_node(zfs_dip, chrbuf, S_IFCHR,
532 minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
533 ddi_soft_state_free(zfsdev_state, minor);
534 dmu_objset_disown(os, FTAG);
535 mutex_exit(&spa_namespace_lock);
536 return (SET_ERROR(EAGAIN));
539 (void) snprintf(blkbuf, sizeof (blkbuf), "%u", minor);
541 if (ddi_create_minor_node(zfs_dip, blkbuf, S_IFBLK,
542 minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
543 ddi_remove_minor_node(zfs_dip, chrbuf);
544 ddi_soft_state_free(zfsdev_state, minor);
545 dmu_objset_disown(os, FTAG);
546 mutex_exit(&spa_namespace_lock);
547 return (SET_ERROR(EAGAIN));
550 zs = ddi_get_soft_state(zfsdev_state, minor);
551 zs->zss_type = ZSST_ZVOL;
552 zv = zs->zss_data = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
555 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
558 dmu_objset_disown(os, zvol_tag);
559 mutex_exit(&spa_namespace_lock);
565 zv = zvol_geom_create(name);
566 zv->zv_volsize = volsize;
567 zv->zv_provider->mediasize = zv->zv_volsize;
571 (void) strlcpy(zv->zv_name, name, MAXPATHLEN);
572 zv->zv_min_bs = DEV_BSHIFT;
574 if (dmu_objset_is_snapshot(os) || !spa_writeable(dmu_objset_spa(os)))
575 zv->zv_flags |= ZVOL_RDONLY;
576 mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL);
577 avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare,
578 sizeof (rl_t), offsetof(rl_t, r_node));
579 list_create(&zv->zv_extents, sizeof (zvol_extent_t),
580 offsetof(zvol_extent_t, ze_node));
581 /* get and cache the blocksize */
582 error = dmu_object_info(os, ZVOL_OBJ, &doi);
584 zv->zv_volblocksize = doi.doi_data_block_size;
586 if (spa_writeable(dmu_objset_spa(os))) {
587 if (zil_replay_disable)
588 zil_destroy(dmu_objset_zil(os), B_FALSE);
590 zil_replay(os, zv, zvol_replay_vector);
592 dmu_objset_disown(os, FTAG);
593 zv->zv_objset = NULL;
597 mutex_exit(&spa_namespace_lock);
604 ZFS_LOG(1, "ZVOL %s created.", name);
610 * Remove minor node for the specified volume.
613 zvol_remove_zv(zvol_state_t *zv)
616 minor_t minor = zv->zv_minor;
619 ASSERT(MUTEX_HELD(&spa_namespace_lock));
620 if (zv->zv_total_opens != 0)
621 return (SET_ERROR(EBUSY));
623 ZFS_LOG(1, "ZVOL %s destroyed.", zv->zv_name);
626 (void) snprintf(nmbuf, sizeof (nmbuf), "%u,raw", minor);
627 ddi_remove_minor_node(zfs_dip, nmbuf);
630 avl_destroy(&zv->zv_znode.z_range_avl);
631 mutex_destroy(&zv->zv_znode.z_range_lock);
633 zvol_geom_destroy(zv);
640 zvol_remove_minor(const char *name)
645 mutex_enter(&spa_namespace_lock);
646 if ((zv = zvol_minor_lookup(name)) == NULL) {
647 mutex_exit(&spa_namespace_lock);
648 return (SET_ERROR(ENXIO));
651 rc = zvol_remove_zv(zv);
653 mutex_exit(&spa_namespace_lock);
658 zvol_first_open(zvol_state_t *zv)
665 /* lie and say we're read-only */
666 error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, B_TRUE,
671 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
674 dmu_objset_disown(os, zvol_tag);
678 error = dmu_bonus_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dbuf);
680 dmu_objset_disown(os, zvol_tag);
683 zv->zv_volsize = volsize;
684 zv->zv_zilog = zil_open(os, zvol_get_data);
685 zvol_size_changed(zv);
687 VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &readonly,
689 if (readonly || dmu_objset_is_snapshot(os) ||
690 !spa_writeable(dmu_objset_spa(os)))
691 zv->zv_flags |= ZVOL_RDONLY;
693 zv->zv_flags &= ~ZVOL_RDONLY;
698 zvol_last_close(zvol_state_t *zv)
700 zil_close(zv->zv_zilog);
703 dmu_buf_rele(zv->zv_dbuf, zvol_tag);
709 if (dsl_dataset_is_dirty(dmu_objset_ds(zv->zv_objset)) &&
710 !(zv->zv_flags & ZVOL_RDONLY))
711 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
712 dmu_objset_evict_dbufs(zv->zv_objset);
714 dmu_objset_disown(zv->zv_objset, zvol_tag);
715 zv->zv_objset = NULL;
720 zvol_prealloc(zvol_state_t *zv)
722 objset_t *os = zv->zv_objset;
724 uint64_t refd, avail, usedobjs, availobjs;
725 uint64_t resid = zv->zv_volsize;
728 /* Check the space usage before attempting to allocate the space */
729 dmu_objset_space(os, &refd, &avail, &usedobjs, &availobjs);
730 if (avail < zv->zv_volsize)
731 return (SET_ERROR(ENOSPC));
733 /* Free old extents if they exist */
734 zvol_free_extents(zv);
738 uint64_t bytes = MIN(resid, SPA_MAXBLOCKSIZE);
740 tx = dmu_tx_create(os);
741 dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
742 error = dmu_tx_assign(tx, TXG_WAIT);
745 (void) dmu_free_long_range(os, ZVOL_OBJ, 0, off);
748 dmu_prealloc(os, ZVOL_OBJ, off, bytes, tx);
753 txg_wait_synced(dmu_objset_pool(os), 0);
760 zvol_update_volsize(objset_t *os, uint64_t volsize)
765 ASSERT(MUTEX_HELD(&spa_namespace_lock));
767 tx = dmu_tx_create(os);
768 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
769 error = dmu_tx_assign(tx, TXG_WAIT);
775 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1,
780 error = dmu_free_long_range(os,
781 ZVOL_OBJ, volsize, DMU_OBJECT_END);
786 zvol_remove_minors(const char *name)
788 struct g_geom *gp, *gptmp;
789 struct g_provider *pp;
793 namelen = strlen(name);
796 mutex_enter(&spa_namespace_lock);
799 LIST_FOREACH_SAFE(gp, &zfs_zvol_class.geom, geom, gptmp) {
800 pp = LIST_FIRST(&gp->provider);
806 if (strcmp(zv->zv_name, name) == 0 ||
807 (strncmp(zv->zv_name, name, namelen) == 0 &&
808 zv->zv_name[namelen] == '/')) {
809 (void) zvol_remove_zv(zv);
814 mutex_exit(&spa_namespace_lock);
819 zvol_set_volsize(const char *name, major_t maj, uint64_t volsize)
821 zvol_state_t *zv = NULL;
824 dmu_object_info_t doi;
825 uint64_t old_volsize = 0ULL;
828 mutex_enter(&spa_namespace_lock);
829 zv = zvol_minor_lookup(name);
830 if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) {
831 mutex_exit(&spa_namespace_lock);
835 if ((error = dmu_object_info(os, ZVOL_OBJ, &doi)) != 0 ||
836 (error = zvol_check_volsize(volsize,
837 doi.doi_data_block_size)) != 0)
840 VERIFY(dsl_prop_get_integer(name, "readonly", &readonly,
847 error = zvol_update_volsize(os, volsize);
849 * Reinitialize the dump area to the new size. If we
850 * failed to resize the dump area then restore it back to
853 if (zv && error == 0) {
855 if (zv->zv_flags & ZVOL_DUMPIFIED) {
856 old_volsize = zv->zv_volsize;
857 zv->zv_volsize = volsize;
858 if ((error = zvol_dumpify(zv)) != 0 ||
859 (error = dumpvp_resize()) != 0) {
860 (void) zvol_update_volsize(os, old_volsize);
861 zv->zv_volsize = old_volsize;
862 error = zvol_dumpify(zv);
865 #endif /* ZVOL_DUMP */
867 zv->zv_volsize = volsize;
868 zvol_size_changed(zv);
874 * Generate a LUN expansion event.
876 if (zv && error == 0) {
879 char *physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
881 (void) snprintf(physpath, MAXPATHLEN, "%s%u", ZVOL_PSEUDO_DEV,
884 VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0);
885 VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0);
887 (void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS,
888 ESC_DEV_DLE, attr, &eid, DDI_SLEEP);
891 kmem_free(physpath, MAXPATHLEN);
896 dmu_objset_rele(os, FTAG);
898 mutex_exit(&spa_namespace_lock);
905 zvol_open(struct g_provider *pp, int flag, int count)
909 boolean_t locked = B_FALSE;
912 * Protect against recursively entering spa_namespace_lock
913 * when spa_open() is used for a pool on a (local) ZVOL(s).
914 * This is needed since we replaced upstream zfsdev_state_lock
915 * with spa_namespace_lock in the ZVOL code.
916 * We are using the same trick as spa_open().
917 * Note that calls in zvol_first_open which need to resolve
918 * pool name to a spa object will enter spa_open()
919 * recursively, but that function already has all the
920 * necessary protection.
922 if (!MUTEX_HELD(&spa_namespace_lock)) {
923 mutex_enter(&spa_namespace_lock);
930 mutex_exit(&spa_namespace_lock);
931 return (SET_ERROR(ENXIO));
934 if (zv->zv_total_opens == 0) {
935 err = zvol_first_open(zv);
938 mutex_exit(&spa_namespace_lock);
941 pp->mediasize = zv->zv_volsize;
942 pp->stripeoffset = 0;
943 pp->stripesize = zv->zv_volblocksize;
945 if ((flag & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
946 err = SET_ERROR(EROFS);
949 if (zv->zv_flags & ZVOL_EXCL) {
950 err = SET_ERROR(EBUSY);
955 if (zv->zv_total_opens != 0) {
956 err = SET_ERROR(EBUSY);
959 zv->zv_flags |= ZVOL_EXCL;
963 zv->zv_total_opens += count;
965 mutex_exit(&spa_namespace_lock);
969 if (zv->zv_total_opens == 0)
972 mutex_exit(&spa_namespace_lock);
978 zvol_close(struct g_provider *pp, int flag, int count)
982 boolean_t locked = B_FALSE;
984 /* See comment in zvol_open(). */
985 if (!MUTEX_HELD(&spa_namespace_lock)) {
986 mutex_enter(&spa_namespace_lock);
993 mutex_exit(&spa_namespace_lock);
994 return (SET_ERROR(ENXIO));
997 if (zv->zv_flags & ZVOL_EXCL) {
998 ASSERT(zv->zv_total_opens == 1);
999 zv->zv_flags &= ~ZVOL_EXCL;
1003 * If the open count is zero, this is a spurious close.
1004 * That indicates a bug in the kernel / DDI framework.
1006 ASSERT(zv->zv_total_opens != 0);
1009 * You may get multiple opens, but only one close.
1011 zv->zv_total_opens -= count;
1013 if (zv->zv_total_opens == 0)
1014 zvol_last_close(zv);
1017 mutex_exit(&spa_namespace_lock);
1022 zvol_get_done(zgd_t *zgd, int error)
1025 dmu_buf_rele(zgd->zgd_db, zgd);
1027 zfs_range_unlock(zgd->zgd_rl);
1029 if (error == 0 && zgd->zgd_bp)
1030 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
1032 kmem_free(zgd, sizeof (zgd_t));
1036 * Get data to generate a TX_WRITE intent log record.
1039 zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
1041 zvol_state_t *zv = arg;
1042 objset_t *os = zv->zv_objset;
1043 uint64_t object = ZVOL_OBJ;
1044 uint64_t offset = lr->lr_offset;
1045 uint64_t size = lr->lr_length; /* length of user data */
1046 blkptr_t *bp = &lr->lr_blkptr;
1051 ASSERT(zio != NULL);
1054 zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
1055 zgd->zgd_zilog = zv->zv_zilog;
1056 zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
1059 * Write records come in two flavors: immediate and indirect.
1060 * For small writes it's cheaper to store the data with the
1061 * log record (immediate); for large writes it's cheaper to
1062 * sync the data and get a pointer to it (indirect) so that
1063 * we don't have to write the data twice.
1065 if (buf != NULL) { /* immediate write */
1066 error = dmu_read(os, object, offset, size, buf,
1067 DMU_READ_NO_PREFETCH);
1069 size = zv->zv_volblocksize;
1070 offset = P2ALIGN(offset, size);
1071 error = dmu_buf_hold(os, object, offset, zgd, &db,
1072 DMU_READ_NO_PREFETCH);
1074 blkptr_t *obp = dmu_buf_get_blkptr(db);
1076 ASSERT(BP_IS_HOLE(bp));
1083 ASSERT(db->db_offset == offset);
1084 ASSERT(db->db_size == size);
1086 error = dmu_sync(zio, lr->lr_common.lrc_txg,
1087 zvol_get_done, zgd);
1094 zvol_get_done(zgd, error);
1100 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
1102 * We store data in the log buffers if it's small enough.
1103 * Otherwise we will later flush the data out via dmu_sync().
1105 ssize_t zvol_immediate_write_sz = 32768;
1108 zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, offset_t off, ssize_t resid,
1111 uint32_t blocksize = zv->zv_volblocksize;
1112 zilog_t *zilog = zv->zv_zilog;
1114 ssize_t immediate_write_sz;
1116 if (zil_replaying(zilog, tx))
1119 immediate_write_sz = (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
1120 ? 0 : zvol_immediate_write_sz;
1122 slogging = spa_has_slogs(zilog->zl_spa) &&
1123 (zilog->zl_logbias == ZFS_LOGBIAS_LATENCY);
1129 itx_wr_state_t write_state;
1132 * Unlike zfs_log_write() we can be called with
1133 * upto DMU_MAX_ACCESS/2 (5MB) writes.
1135 if (blocksize > immediate_write_sz && !slogging &&
1136 resid >= blocksize && off % blocksize == 0) {
1137 write_state = WR_INDIRECT; /* uses dmu_sync */
1140 write_state = WR_COPIED;
1141 len = MIN(ZIL_MAX_LOG_DATA, resid);
1143 write_state = WR_NEED_COPY;
1144 len = MIN(ZIL_MAX_LOG_DATA, resid);
1147 itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
1148 (write_state == WR_COPIED ? len : 0));
1149 lr = (lr_write_t *)&itx->itx_lr;
1150 if (write_state == WR_COPIED && dmu_read(zv->zv_objset,
1151 ZVOL_OBJ, off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) {
1152 zil_itx_destroy(itx);
1153 itx = zil_itx_create(TX_WRITE, sizeof (*lr));
1154 lr = (lr_write_t *)&itx->itx_lr;
1155 write_state = WR_NEED_COPY;
1158 itx->itx_wr_state = write_state;
1159 if (write_state == WR_NEED_COPY)
1160 itx->itx_sod += len;
1161 lr->lr_foid = ZVOL_OBJ;
1162 lr->lr_offset = off;
1163 lr->lr_length = len;
1165 BP_ZERO(&lr->lr_blkptr);
1167 itx->itx_private = zv;
1168 itx->itx_sync = sync;
1170 zil_itx_assign(zilog, itx, tx);
1179 zvol_dumpio_vdev(vdev_t *vd, void *addr, uint64_t offset, uint64_t origoffset,
1180 uint64_t size, boolean_t doread, boolean_t isdump)
1186 if (vd->vdev_ops == &vdev_mirror_ops ||
1187 vd->vdev_ops == &vdev_replacing_ops ||
1188 vd->vdev_ops == &vdev_spare_ops) {
1189 for (c = 0; c < vd->vdev_children; c++) {
1190 int err = zvol_dumpio_vdev(vd->vdev_child[c],
1191 addr, offset, origoffset, size, doread, isdump);
1194 } else if (doread) {
1200 if (!vd->vdev_ops->vdev_op_leaf && vd->vdev_ops != &vdev_raidz_ops)
1201 return (numerrors < vd->vdev_children ? 0 : EIO);
1203 if (doread && !vdev_readable(vd))
1204 return (SET_ERROR(EIO));
1205 else if (!doread && !vdev_writeable(vd))
1206 return (SET_ERROR(EIO));
1208 if (vd->vdev_ops == &vdev_raidz_ops) {
1209 return (vdev_raidz_physio(vd,
1210 addr, size, offset, origoffset, doread, isdump));
1213 offset += VDEV_LABEL_START_SIZE;
1215 if (ddi_in_panic() || isdump) {
1218 return (SET_ERROR(EIO));
1220 ASSERT3P(dvd, !=, NULL);
1221 return (ldi_dump(dvd->vd_lh, addr, lbtodb(offset),
1225 ASSERT3P(dvd, !=, NULL);
1226 return (vdev_disk_ldi_physio(dvd->vd_lh, addr, size,
1227 offset, doread ? B_READ : B_WRITE));
1232 zvol_dumpio(zvol_state_t *zv, void *addr, uint64_t offset, uint64_t size,
1233 boolean_t doread, boolean_t isdump)
1238 spa_t *spa = dmu_objset_spa(zv->zv_objset);
1240 /* Must be sector aligned, and not stradle a block boundary. */
1241 if (P2PHASE(offset, DEV_BSIZE) || P2PHASE(size, DEV_BSIZE) ||
1242 P2BOUNDARY(offset, size, zv->zv_volblocksize)) {
1243 return (SET_ERROR(EINVAL));
1245 ASSERT(size <= zv->zv_volblocksize);
1247 /* Locate the extent this belongs to */
1248 ze = list_head(&zv->zv_extents);
1249 while (offset >= ze->ze_nblks * zv->zv_volblocksize) {
1250 offset -= ze->ze_nblks * zv->zv_volblocksize;
1251 ze = list_next(&zv->zv_extents, ze);
1255 return (SET_ERROR(EINVAL));
1257 if (!ddi_in_panic())
1258 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
1260 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&ze->ze_dva));
1261 offset += DVA_GET_OFFSET(&ze->ze_dva);
1262 error = zvol_dumpio_vdev(vd, addr, offset, DVA_GET_OFFSET(&ze->ze_dva),
1263 size, doread, isdump);
1265 if (!ddi_in_panic())
1266 spa_config_exit(spa, SCL_STATE, FTAG);
1273 zvol_strategy(struct bio *bp)
1275 zvol_state_t *zv = bp->bio_to->private;
1276 uint64_t off, volsize;
1282 boolean_t doread = (bp->bio_cmd == BIO_READ);
1283 boolean_t is_dumpified;
1287 g_io_deliver(bp, ENXIO);
1291 if (bp->bio_cmd != BIO_READ && (zv->zv_flags & ZVOL_RDONLY)) {
1292 g_io_deliver(bp, EROFS);
1296 off = bp->bio_offset;
1297 volsize = zv->zv_volsize;
1302 addr = bp->bio_data;
1303 resid = bp->bio_length;
1305 if (resid > 0 && (off < 0 || off >= volsize)) {
1306 g_io_deliver(bp, EIO);
1311 is_dumpified = zv->zv_flags & ZVOL_DUMPIFIED;
1313 is_dumpified = B_FALSE;
1315 sync = !doread && !is_dumpified &&
1316 zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS;
1319 * There must be no buffer changes when doing a dmu_sync() because
1320 * we can't change the data whilst calculating the checksum.
1322 rl = zfs_range_lock(&zv->zv_znode, off, resid,
1323 doread ? RL_READER : RL_WRITER);
1325 while (resid != 0 && off < volsize) {
1326 size_t size = MIN(resid, zvol_maxphys);
1329 size = MIN(size, P2END(off, zv->zv_volblocksize) - off);
1330 error = zvol_dumpio(zv, addr, off, size,
1332 } else if (doread) {
1336 error = dmu_read(os, ZVOL_OBJ, off, size, addr,
1339 dmu_tx_t *tx = dmu_tx_create(os);
1340 dmu_tx_hold_write(tx, ZVOL_OBJ, off, size);
1341 error = dmu_tx_assign(tx, TXG_WAIT);
1345 dmu_write(os, ZVOL_OBJ, off, size, addr, tx);
1346 zvol_log_write(zv, tx, off, size, sync);
1351 /* convert checksum errors into IO errors */
1352 if (error == ECKSUM)
1353 error = SET_ERROR(EIO);
1360 zfs_range_unlock(rl);
1362 bp->bio_completed = bp->bio_length - resid;
1363 if (bp->bio_completed < bp->bio_length)
1364 bp->bio_error = (off > volsize ? EINVAL : error);
1367 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1368 g_io_deliver(bp, 0);
1375 * Set the buffer count to the zvol maximum transfer.
1376 * Using our own routine instead of the default minphys()
1377 * means that for larger writes we write bigger buffers on X86
1378 * (128K instead of 56K) and flush the disk write cache less often
1379 * (every zvol_maxphys - currently 1MB) instead of minphys (currently
1380 * 56K on X86 and 128K on sparc).
1383 zvol_minphys(struct buf *bp)
1385 if (bp->b_bcount > zvol_maxphys)
1386 bp->b_bcount = zvol_maxphys;
1390 zvol_dump(dev_t dev, caddr_t addr, daddr_t blkno, int nblocks)
1392 minor_t minor = getminor(dev);
1399 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1401 return (SET_ERROR(ENXIO));
1403 if ((zv->zv_flags & ZVOL_DUMPIFIED) == 0)
1404 return (SET_ERROR(EINVAL));
1406 boff = ldbtob(blkno);
1407 resid = ldbtob(nblocks);
1409 VERIFY3U(boff + resid, <=, zv->zv_volsize);
1412 size = MIN(resid, P2END(boff, zv->zv_volblocksize) - boff);
1413 error = zvol_dumpio(zv, addr, boff, size, B_FALSE, B_TRUE);
1426 zvol_read(dev_t dev, uio_t *uio, cred_t *cr)
1428 minor_t minor = getminor(dev);
1434 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1436 return (SET_ERROR(ENXIO));
1438 volsize = zv->zv_volsize;
1439 if (uio->uio_resid > 0 &&
1440 (uio->uio_loffset < 0 || uio->uio_loffset >= volsize))
1441 return (SET_ERROR(EIO));
1443 if (zv->zv_flags & ZVOL_DUMPIFIED) {
1444 error = physio(zvol_strategy, NULL, dev, B_READ,
1449 rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1451 while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1452 uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1454 /* don't read past the end */
1455 if (bytes > volsize - uio->uio_loffset)
1456 bytes = volsize - uio->uio_loffset;
1458 error = dmu_read_uio(zv->zv_objset, ZVOL_OBJ, uio, bytes);
1460 /* convert checksum errors into IO errors */
1461 if (error == ECKSUM)
1462 error = SET_ERROR(EIO);
1466 zfs_range_unlock(rl);
1472 zvol_write(dev_t dev, uio_t *uio, cred_t *cr)
1474 minor_t minor = getminor(dev);
1481 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1483 return (SET_ERROR(ENXIO));
1485 volsize = zv->zv_volsize;
1486 if (uio->uio_resid > 0 &&
1487 (uio->uio_loffset < 0 || uio->uio_loffset >= volsize))
1488 return (SET_ERROR(EIO));
1490 if (zv->zv_flags & ZVOL_DUMPIFIED) {
1491 error = physio(zvol_strategy, NULL, dev, B_WRITE,
1496 sync = !(zv->zv_flags & ZVOL_WCE) ||
1497 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS);
1499 rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1501 while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1502 uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1503 uint64_t off = uio->uio_loffset;
1504 dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
1506 if (bytes > volsize - off) /* don't write past the end */
1507 bytes = volsize - off;
1509 dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
1510 error = dmu_tx_assign(tx, TXG_WAIT);
1515 error = dmu_write_uio_dbuf(zv->zv_dbuf, uio, bytes, tx);
1517 zvol_log_write(zv, tx, off, bytes, sync);
1523 zfs_range_unlock(rl);
1525 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1530 zvol_getefi(void *arg, int flag, uint64_t vs, uint8_t bs)
1532 struct uuid uuid = EFI_RESERVED;
1533 efi_gpe_t gpe = { 0 };
1539 if (ddi_copyin(arg, &efi, sizeof (dk_efi_t), flag))
1540 return (SET_ERROR(EFAULT));
1541 ptr = (char *)(uintptr_t)efi.dki_data_64;
1542 length = efi.dki_length;
1544 * Some clients may attempt to request a PMBR for the
1545 * zvol. Currently this interface will return EINVAL to
1546 * such requests. These requests could be supported by
1547 * adding a check for lba == 0 and consing up an appropriate
1550 if (efi.dki_lba < 1 || efi.dki_lba > 2 || length <= 0)
1551 return (SET_ERROR(EINVAL));
1553 gpe.efi_gpe_StartingLBA = LE_64(34ULL);
1554 gpe.efi_gpe_EndingLBA = LE_64((vs >> bs) - 1);
1555 UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid);
1557 if (efi.dki_lba == 1) {
1558 efi_gpt_t gpt = { 0 };
1560 gpt.efi_gpt_Signature = LE_64(EFI_SIGNATURE);
1561 gpt.efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT);
1562 gpt.efi_gpt_HeaderSize = LE_32(sizeof (gpt));
1563 gpt.efi_gpt_MyLBA = LE_64(1ULL);
1564 gpt.efi_gpt_FirstUsableLBA = LE_64(34ULL);
1565 gpt.efi_gpt_LastUsableLBA = LE_64((vs >> bs) - 1);
1566 gpt.efi_gpt_PartitionEntryLBA = LE_64(2ULL);
1567 gpt.efi_gpt_NumberOfPartitionEntries = LE_32(1);
1568 gpt.efi_gpt_SizeOfPartitionEntry =
1569 LE_32(sizeof (efi_gpe_t));
1570 CRC32(crc, &gpe, sizeof (gpe), -1U, crc32_table);
1571 gpt.efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc);
1572 CRC32(crc, &gpt, sizeof (gpt), -1U, crc32_table);
1573 gpt.efi_gpt_HeaderCRC32 = LE_32(~crc);
1574 if (ddi_copyout(&gpt, ptr, MIN(sizeof (gpt), length),
1576 return (SET_ERROR(EFAULT));
1577 ptr += sizeof (gpt);
1578 length -= sizeof (gpt);
1580 if (length > 0 && ddi_copyout(&gpe, ptr, MIN(sizeof (gpe),
1582 return (SET_ERROR(EFAULT));
1587 * BEGIN entry points to allow external callers access to the volume.
1590 * Return the volume parameters needed for access from an external caller.
1591 * These values are invariant as long as the volume is held open.
1594 zvol_get_volume_params(minor_t minor, uint64_t *blksize,
1595 uint64_t *max_xfer_len, void **minor_hdl, void **objset_hdl, void **zil_hdl,
1596 void **rl_hdl, void **bonus_hdl)
1600 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1602 return (SET_ERROR(ENXIO));
1603 if (zv->zv_flags & ZVOL_DUMPIFIED)
1604 return (SET_ERROR(ENXIO));
1606 ASSERT(blksize && max_xfer_len && minor_hdl &&
1607 objset_hdl && zil_hdl && rl_hdl && bonus_hdl);
1609 *blksize = zv->zv_volblocksize;
1610 *max_xfer_len = (uint64_t)zvol_maxphys;
1612 *objset_hdl = zv->zv_objset;
1613 *zil_hdl = zv->zv_zilog;
1614 *rl_hdl = &zv->zv_znode;
1615 *bonus_hdl = zv->zv_dbuf;
1620 * Return the current volume size to an external caller.
1621 * The size can change while the volume is open.
1624 zvol_get_volume_size(void *minor_hdl)
1626 zvol_state_t *zv = minor_hdl;
1628 return (zv->zv_volsize);
1632 * Return the current WCE setting to an external caller.
1633 * The WCE setting can change while the volume is open.
1636 zvol_get_volume_wce(void *minor_hdl)
1638 zvol_state_t *zv = minor_hdl;
1640 return ((zv->zv_flags & ZVOL_WCE) ? 1 : 0);
1644 * Entry point for external callers to zvol_log_write
1647 zvol_log_write_minor(void *minor_hdl, dmu_tx_t *tx, offset_t off, ssize_t resid,
1650 zvol_state_t *zv = minor_hdl;
1652 zvol_log_write(zv, tx, off, resid, sync);
1655 * END entry points to allow external callers access to the volume.
1659 * Dirtbag ioctls to support mkfs(1M) for UFS filesystems. See dkio(7I).
1663 zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
1666 struct dk_cinfo dki;
1667 struct dk_minfo dkm;
1668 struct dk_callback *dkc;
1672 mutex_enter(&spa_namespace_lock);
1674 zv = zfsdev_get_soft_state(getminor(dev), ZSST_ZVOL);
1677 mutex_exit(&spa_namespace_lock);
1678 return (SET_ERROR(ENXIO));
1680 ASSERT(zv->zv_total_opens > 0);
1685 bzero(&dki, sizeof (dki));
1686 (void) strcpy(dki.dki_cname, "zvol");
1687 (void) strcpy(dki.dki_dname, "zvol");
1688 dki.dki_ctype = DKC_UNKNOWN;
1689 dki.dki_unit = getminor(dev);
1690 dki.dki_maxtransfer = 1 << (SPA_MAXBLOCKSHIFT - zv->zv_min_bs);
1691 mutex_exit(&spa_namespace_lock);
1692 if (ddi_copyout(&dki, (void *)arg, sizeof (dki), flag))
1693 error = SET_ERROR(EFAULT);
1696 case DKIOCGMEDIAINFO:
1697 bzero(&dkm, sizeof (dkm));
1698 dkm.dki_lbsize = 1U << zv->zv_min_bs;
1699 dkm.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
1700 dkm.dki_media_type = DK_UNKNOWN;
1701 mutex_exit(&spa_namespace_lock);
1702 if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag))
1703 error = SET_ERROR(EFAULT);
1708 uint64_t vs = zv->zv_volsize;
1709 uint8_t bs = zv->zv_min_bs;
1711 mutex_exit(&spa_namespace_lock);
1712 error = zvol_getefi((void *)arg, flag, vs, bs);
1716 case DKIOCFLUSHWRITECACHE:
1717 dkc = (struct dk_callback *)arg;
1718 mutex_exit(&spa_namespace_lock);
1719 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1720 if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback) {
1721 (*dkc->dkc_callback)(dkc->dkc_cookie, error);
1728 int wce = (zv->zv_flags & ZVOL_WCE) ? 1 : 0;
1729 if (ddi_copyout(&wce, (void *)arg, sizeof (int),
1731 error = SET_ERROR(EFAULT);
1737 if (ddi_copyin((void *)arg, &wce, sizeof (int),
1739 error = SET_ERROR(EFAULT);
1743 zv->zv_flags |= ZVOL_WCE;
1744 mutex_exit(&spa_namespace_lock);
1746 zv->zv_flags &= ~ZVOL_WCE;
1747 mutex_exit(&spa_namespace_lock);
1748 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1756 * commands using these (like prtvtoc) expect ENOTSUP
1757 * since we're emulating an EFI label
1759 error = SET_ERROR(ENOTSUP);
1763 rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
1765 error = zvol_dumpify(zv);
1766 zfs_range_unlock(rl);
1770 if (!(zv->zv_flags & ZVOL_DUMPIFIED))
1772 rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
1774 error = zvol_dump_fini(zv);
1775 zfs_range_unlock(rl);
1783 if (ddi_copyin((void *)arg, &df, sizeof (df), flag)) {
1784 error = SET_ERROR(EFAULT);
1789 * Apply Postel's Law to length-checking. If they overshoot,
1790 * just blank out until the end, if there's a need to blank
1793 if (df.df_start >= zv->zv_volsize)
1794 break; /* No need to do anything... */
1795 if (df.df_start + df.df_length > zv->zv_volsize)
1796 df.df_length = DMU_OBJECT_END;
1798 rl = zfs_range_lock(&zv->zv_znode, df.df_start, df.df_length,
1800 tx = dmu_tx_create(zv->zv_objset);
1801 error = dmu_tx_assign(tx, TXG_WAIT);
1805 zvol_log_truncate(zv, tx, df.df_start,
1806 df.df_length, B_TRUE);
1808 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
1809 df.df_start, df.df_length);
1812 zfs_range_unlock(rl);
1816 * If the write-cache is disabled or 'sync' property
1817 * is set to 'always' then treat this as a synchronous
1818 * operation (i.e. commit to zil).
1820 if (!(zv->zv_flags & ZVOL_WCE) ||
1821 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS))
1822 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1825 * If the caller really wants synchronous writes, and
1826 * can't wait for them, don't return until the write
1829 if (df.df_flags & DF_WAIT_SYNC) {
1831 dmu_objset_pool(zv->zv_objset), 0);
1838 error = SET_ERROR(ENOTTY);
1842 mutex_exit(&spa_namespace_lock);
1850 return (zvol_minors != 0);
1856 VERIFY(ddi_soft_state_init(&zfsdev_state, sizeof (zfs_soft_state_t),
1858 ZFS_LOG(1, "ZVOL Initialized.");
1864 ddi_soft_state_fini(&zfsdev_state);
1865 ZFS_LOG(1, "ZVOL Deinitialized.");
1871 zfs_mvdev_dump_feature_check(void *arg, dmu_tx_t *tx)
1873 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
1875 if (spa_feature_is_active(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP))
1882 zfs_mvdev_dump_activate_feature_sync(void *arg, dmu_tx_t *tx)
1884 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
1886 spa_feature_incr(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP, tx);
1890 zvol_dump_init(zvol_state_t *zv, boolean_t resize)
1894 objset_t *os = zv->zv_objset;
1895 spa_t *spa = dmu_objset_spa(os);
1896 vdev_t *vd = spa->spa_root_vdev;
1897 nvlist_t *nv = NULL;
1898 uint64_t version = spa_version(spa);
1899 enum zio_checksum checksum;
1901 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1902 ASSERT(vd->vdev_ops == &vdev_root_ops);
1904 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 0,
1906 /* wait for dmu_free_long_range to actually free the blocks */
1907 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
1910 * If the pool on which the dump device is being initialized has more
1911 * than one child vdev, check that the MULTI_VDEV_CRASH_DUMP feature is
1912 * enabled. If so, bump that feature's counter to indicate that the
1913 * feature is active. We also check the vdev type to handle the
1915 * # zpool create test raidz disk1 disk2 disk3
1916 * Now have spa_root_vdev->vdev_children == 1 (the raidz vdev),
1917 * the raidz vdev itself has 3 children.
1919 if (vd->vdev_children > 1 || vd->vdev_ops == &vdev_raidz_ops) {
1920 if (!spa_feature_is_enabled(spa,
1921 SPA_FEATURE_MULTI_VDEV_CRASH_DUMP))
1922 return (SET_ERROR(ENOTSUP));
1923 (void) dsl_sync_task(spa_name(spa),
1924 zfs_mvdev_dump_feature_check,
1925 zfs_mvdev_dump_activate_feature_sync, NULL, 2);
1928 tx = dmu_tx_create(os);
1929 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
1930 dmu_tx_hold_bonus(tx, ZVOL_OBJ);
1931 error = dmu_tx_assign(tx, TXG_WAIT);
1938 * If MULTI_VDEV_CRASH_DUMP is active, use the NOPARITY checksum
1939 * function. Otherwise, use the old default -- OFF.
1941 checksum = spa_feature_is_active(spa,
1942 SPA_FEATURE_MULTI_VDEV_CRASH_DUMP) ? ZIO_CHECKSUM_NOPARITY :
1946 * If we are resizing the dump device then we only need to
1947 * update the refreservation to match the newly updated
1948 * zvolsize. Otherwise, we save off the original state of the
1949 * zvol so that we can restore them if the zvol is ever undumpified.
1952 error = zap_update(os, ZVOL_ZAP_OBJ,
1953 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
1954 &zv->zv_volsize, tx);
1956 uint64_t checksum, compress, refresrv, vbs, dedup;
1958 error = dsl_prop_get_integer(zv->zv_name,
1959 zfs_prop_to_name(ZFS_PROP_COMPRESSION), &compress, NULL);
1960 error = error ? error : dsl_prop_get_integer(zv->zv_name,
1961 zfs_prop_to_name(ZFS_PROP_CHECKSUM), &checksum, NULL);
1962 error = error ? error : dsl_prop_get_integer(zv->zv_name,
1963 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), &refresrv, NULL);
1964 error = error ? error : dsl_prop_get_integer(zv->zv_name,
1965 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &vbs, NULL);
1966 if (version >= SPA_VERSION_DEDUP) {
1967 error = error ? error :
1968 dsl_prop_get_integer(zv->zv_name,
1969 zfs_prop_to_name(ZFS_PROP_DEDUP), &dedup, NULL);
1972 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1973 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1,
1975 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1976 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum, tx);
1977 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1978 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
1980 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1981 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1,
1983 error = error ? error : dmu_object_set_blocksize(
1984 os, ZVOL_OBJ, SPA_MAXBLOCKSIZE, 0, tx);
1985 if (version >= SPA_VERSION_DEDUP) {
1986 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1987 zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1,
1991 zv->zv_volblocksize = SPA_MAXBLOCKSIZE;
1996 * We only need update the zvol's property if we are initializing
1997 * the dump area for the first time.
2000 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2001 VERIFY(nvlist_add_uint64(nv,
2002 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 0) == 0);
2003 VERIFY(nvlist_add_uint64(nv,
2004 zfs_prop_to_name(ZFS_PROP_COMPRESSION),
2005 ZIO_COMPRESS_OFF) == 0);
2006 VERIFY(nvlist_add_uint64(nv,
2007 zfs_prop_to_name(ZFS_PROP_CHECKSUM),
2009 if (version >= SPA_VERSION_DEDUP) {
2010 VERIFY(nvlist_add_uint64(nv,
2011 zfs_prop_to_name(ZFS_PROP_DEDUP),
2012 ZIO_CHECKSUM_OFF) == 0);
2015 error = zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
2023 /* Allocate the space for the dump */
2024 error = zvol_prealloc(zv);
2029 zvol_dumpify(zvol_state_t *zv)
2032 uint64_t dumpsize = 0;
2034 objset_t *os = zv->zv_objset;
2036 if (zv->zv_flags & ZVOL_RDONLY)
2037 return (SET_ERROR(EROFS));
2039 if (zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE,
2040 8, 1, &dumpsize) != 0 || dumpsize != zv->zv_volsize) {
2041 boolean_t resize = (dumpsize > 0);
2043 if ((error = zvol_dump_init(zv, resize)) != 0) {
2044 (void) zvol_dump_fini(zv);
2050 * Build up our lba mapping.
2052 error = zvol_get_lbas(zv);
2054 (void) zvol_dump_fini(zv);
2058 tx = dmu_tx_create(os);
2059 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2060 error = dmu_tx_assign(tx, TXG_WAIT);
2063 (void) zvol_dump_fini(zv);
2067 zv->zv_flags |= ZVOL_DUMPIFIED;
2068 error = zap_update(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 8, 1,
2069 &zv->zv_volsize, tx);
2073 (void) zvol_dump_fini(zv);
2077 txg_wait_synced(dmu_objset_pool(os), 0);
2082 zvol_dump_fini(zvol_state_t *zv)
2085 objset_t *os = zv->zv_objset;
2088 uint64_t checksum, compress, refresrv, vbs, dedup;
2089 uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset));
2092 * Attempt to restore the zvol back to its pre-dumpified state.
2093 * This is a best-effort attempt as it's possible that not all
2094 * of these properties were initialized during the dumpify process
2095 * (i.e. error during zvol_dump_init).
2098 tx = dmu_tx_create(os);
2099 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2100 error = dmu_tx_assign(tx, TXG_WAIT);
2105 (void) zap_remove(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, tx);
2108 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2109 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum);
2110 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2111 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, &compress);
2112 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2113 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, &refresrv);
2114 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2115 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, &vbs);
2117 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2118 (void) nvlist_add_uint64(nv,
2119 zfs_prop_to_name(ZFS_PROP_CHECKSUM), checksum);
2120 (void) nvlist_add_uint64(nv,
2121 zfs_prop_to_name(ZFS_PROP_COMPRESSION), compress);
2122 (void) nvlist_add_uint64(nv,
2123 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), refresrv);
2124 if (version >= SPA_VERSION_DEDUP &&
2125 zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2126 zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, &dedup) == 0) {
2127 (void) nvlist_add_uint64(nv,
2128 zfs_prop_to_name(ZFS_PROP_DEDUP), dedup);
2130 (void) zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
2134 zvol_free_extents(zv);
2135 zv->zv_flags &= ~ZVOL_DUMPIFIED;
2136 (void) dmu_free_long_range(os, ZVOL_OBJ, 0, DMU_OBJECT_END);
2137 /* wait for dmu_free_long_range to actually free the blocks */
2138 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
2139 tx = dmu_tx_create(os);
2140 dmu_tx_hold_bonus(tx, ZVOL_OBJ);
2141 error = dmu_tx_assign(tx, TXG_WAIT);
2146 if (dmu_object_set_blocksize(os, ZVOL_OBJ, vbs, 0, tx) == 0)
2147 zv->zv_volblocksize = vbs;
2154 static zvol_state_t *
2155 zvol_geom_create(const char *name)
2157 struct g_provider *pp;
2161 gp = g_new_geomf(&zfs_zvol_class, "zfs::zvol::%s", name);
2162 gp->start = zvol_geom_start;
2163 gp->access = zvol_geom_access;
2164 pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, name);
2165 pp->sectorsize = DEV_BSIZE;
2167 zv = kmem_zalloc(sizeof(*zv), KM_SLEEP);
2168 zv->zv_provider = pp;
2170 bioq_init(&zv->zv_queue);
2171 mtx_init(&zv->zv_queue_mtx, "zvol", NULL, MTX_DEF);
2179 zvol_geom_run(zvol_state_t *zv)
2181 struct g_provider *pp;
2183 pp = zv->zv_provider;
2184 g_error_provider(pp, 0);
2186 kproc_kthread_add(zvol_geom_worker, zv, &zfsproc, NULL, 0, 0,
2187 "zfskern", "zvol %s", pp->name + sizeof(ZVOL_DRIVER));
2191 zvol_geom_destroy(zvol_state_t *zv)
2193 struct g_provider *pp;
2195 g_topology_assert();
2197 mtx_lock(&zv->zv_queue_mtx);
2199 wakeup_one(&zv->zv_queue);
2200 while (zv->zv_state != 2)
2201 msleep(&zv->zv_state, &zv->zv_queue_mtx, 0, "zvol:w", 0);
2202 mtx_destroy(&zv->zv_queue_mtx);
2204 pp = zv->zv_provider;
2205 zv->zv_provider = NULL;
2207 g_wither_geom(pp->geom, ENXIO);
2209 kmem_free(zv, sizeof(*zv));
2213 zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace)
2215 int count, error, flags;
2217 g_topology_assert();
2220 * To make it easier we expect either open or close, but not both
2223 KASSERT((acr >= 0 && acw >= 0 && ace >= 0) ||
2224 (acr <= 0 && acw <= 0 && ace <= 0),
2225 ("Unsupported access request to %s (acr=%d, acw=%d, ace=%d).",
2226 pp->name, acr, acw, ace));
2228 if (pp->private == NULL) {
2229 if (acr <= 0 && acw <= 0 && ace <= 0)
2235 * We don't pass FEXCL flag to zvol_open()/zvol_close() if ace != 0,
2236 * because GEOM already handles that and handles it a bit differently.
2237 * GEOM allows for multiple read/exclusive consumers and ZFS allows
2238 * only one exclusive consumer, no matter if it is reader or writer.
2239 * I like better the way GEOM works so I'll leave it for GEOM to
2240 * decide what to do.
2243 count = acr + acw + ace;
2248 if (acr != 0 || ace != 0)
2253 g_topology_unlock();
2255 error = zvol_open(pp, flags, count);
2257 error = zvol_close(pp, flags, -count);
2263 zvol_geom_start(struct bio *bp)
2268 switch (bp->bio_cmd) {
2272 zv = bp->bio_to->private;
2274 mtx_lock(&zv->zv_queue_mtx);
2275 first = (bioq_first(&zv->zv_queue) == NULL);
2276 bioq_insert_tail(&zv->zv_queue, bp);
2277 mtx_unlock(&zv->zv_queue_mtx);
2279 wakeup_one(&zv->zv_queue);
2284 g_io_deliver(bp, EOPNOTSUPP);
2290 zvol_geom_worker(void *arg)
2295 thread_lock(curthread);
2296 sched_prio(curthread, PRIBIO);
2297 thread_unlock(curthread);
2301 mtx_lock(&zv->zv_queue_mtx);
2302 bp = bioq_takefirst(&zv->zv_queue);
2304 if (zv->zv_state == 1) {
2306 wakeup(&zv->zv_state);
2307 mtx_unlock(&zv->zv_queue_mtx);
2310 msleep(&zv->zv_queue, &zv->zv_queue_mtx, PRIBIO | PDROP,
2314 mtx_unlock(&zv->zv_queue_mtx);
2315 switch (bp->bio_cmd) {
2317 zil_commit(zv->zv_zilog, ZVOL_OBJ);
2318 g_io_deliver(bp, 0);
2328 extern boolean_t dataset_name_hidden(const char *name);
2331 zvol_create_snapshots(objset_t *os, const char *name)
2333 uint64_t cookie, obj;
2338 sname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2341 (void) dmu_objset_find(name, dmu_objset_prefetch, NULL,
2346 len = snprintf(sname, MAXPATHLEN, "%s@", name);
2347 if (len >= MAXPATHLEN) {
2348 dmu_objset_rele(os, FTAG);
2349 error = ENAMETOOLONG;
2353 dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
2354 error = dmu_snapshot_list_next(os, MAXPATHLEN - len,
2355 sname + len, &obj, &cookie, NULL);
2356 dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
2358 if (error == ENOENT)
2363 if ((error = zvol_create_minor(sname)) != 0) {
2364 printf("ZFS WARNING: Unable to create ZVOL %s (error=%d).\n",
2370 kmem_free(sname, MAXPATHLEN);
2375 zvol_create_minors(const char *name)
2382 if (dataset_name_hidden(name))
2385 if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) {
2386 printf("ZFS WARNING: Unable to put hold on %s (error=%d).\n",
2390 if (dmu_objset_type(os) == DMU_OST_ZVOL) {
2391 dsl_dataset_long_hold(os->os_dsl_dataset, FTAG);
2392 dsl_pool_rele(dmu_objset_pool(os), FTAG);
2393 if ((error = zvol_create_minor(name)) == 0)
2394 error = zvol_create_snapshots(os, name);
2396 printf("ZFS WARNING: Unable to create ZVOL %s (error=%d).\n",
2399 dsl_dataset_long_rele(os->os_dsl_dataset, FTAG);
2400 dsl_dataset_rele(os->os_dsl_dataset, FTAG);
2403 if (dmu_objset_type(os) != DMU_OST_ZFS) {
2404 dmu_objset_rele(os, FTAG);
2408 osname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2409 if (snprintf(osname, MAXPATHLEN, "%s/", name) >= MAXPATHLEN) {
2410 dmu_objset_rele(os, FTAG);
2411 kmem_free(osname, MAXPATHLEN);
2414 p = osname + strlen(osname);
2415 len = MAXPATHLEN - (p - osname);
2418 /* Prefetch the datasets. */
2420 while (dmu_dir_list_next(os, len, p, NULL, &cookie) == 0) {
2421 if (!dataset_name_hidden(osname))
2422 (void) dmu_objset_prefetch(osname, NULL);
2427 while (dmu_dir_list_next(os, MAXPATHLEN - (p - osname), p, NULL,
2429 dmu_objset_rele(os, FTAG);
2430 (void)zvol_create_minors(osname);
2431 if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) {
2432 printf("ZFS WARNING: Unable to put hold on %s (error=%d).\n",
2438 dmu_objset_rele(os, FTAG);
2439 kmem_free(osname, MAXPATHLEN);
2444 zvol_rename_minor(struct g_geom *gp, const char *newname)
2446 struct g_provider *pp;
2449 ASSERT(MUTEX_HELD(&spa_namespace_lock));
2450 g_topology_assert();
2452 pp = LIST_FIRST(&gp->provider);
2457 zv->zv_provider = NULL;
2458 g_wither_provider(pp, ENXIO);
2460 pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, newname);
2461 pp->sectorsize = DEV_BSIZE;
2462 pp->mediasize = zv->zv_volsize;
2464 zv->zv_provider = pp;
2465 strlcpy(zv->zv_name, newname, sizeof(zv->zv_name));
2466 g_error_provider(pp, 0);
2470 zvol_rename_minors(const char *oldname, const char *newname)
2472 char name[MAXPATHLEN];
2473 struct g_provider *pp;
2475 size_t oldnamelen, newnamelen;
2479 oldnamelen = strlen(oldname);
2480 newnamelen = strlen(newname);
2483 mutex_enter(&spa_namespace_lock);
2486 LIST_FOREACH(gp, &zfs_zvol_class.geom, geom) {
2487 pp = LIST_FIRST(&gp->provider);
2493 if (strcmp(zv->zv_name, oldname) == 0) {
2494 zvol_rename_minor(gp, newname);
2495 } else if (strncmp(zv->zv_name, oldname, oldnamelen) == 0 &&
2496 (zv->zv_name[oldnamelen] == '/' ||
2497 zv->zv_name[oldnamelen] == '@')) {
2498 snprintf(name, sizeof(name), "%s%c%s", newname,
2499 zv->zv_name[oldnamelen],
2500 zv->zv_name + oldnamelen + 1);
2501 zvol_rename_minor(gp, name);
2505 g_topology_unlock();
2506 mutex_exit(&spa_namespace_lock);