4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2006-2010 Pawel Jakub Dawidek <pjd@FreeBSD.org>
25 * All rights reserved.
28 /* Portions Copyright 2010 Robert Milkowski */
29 /* Portions Copyright 2011 Martin Matuska <mm@FreeBSD.org> */
32 * ZFS volume emulation driver.
34 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
35 * Volumes are accessed through the symbolic links named:
37 * /dev/zvol/dsk/<pool_name>/<dataset_name>
38 * /dev/zvol/rdsk/<pool_name>/<dataset_name>
40 * These links are created by the /dev filesystem (sdev_zvolops.c).
41 * Volumes are persistent through reboot. No user command needs to be
42 * run before opening and using a device.
45 * On FreeBSD ZVOLs are simply GEOM providers like any other storage device
49 #include <sys/types.h>
50 #include <sys/param.h>
51 #include <sys/kernel.h>
52 #include <sys/errno.h>
58 #include <sys/cmn_err.h>
63 #include <sys/dmu_traverse.h>
64 #include <sys/dnode.h>
65 #include <sys/dsl_dataset.h>
66 #include <sys/dsl_prop.h>
68 #include <sys/byteorder.h>
69 #include <sys/sunddi.h>
70 #include <sys/dirent.h>
71 #include <sys/policy.h>
72 #include <sys/fs/zfs.h>
73 #include <sys/zfs_ioctl.h>
75 #include <sys/refcount.h>
76 #include <sys/zfs_znode.h>
77 #include <sys/zfs_rlock.h>
78 #include <sys/vdev_impl.h>
80 #include <sys/zil_impl.h>
82 #include <geom/geom.h>
84 #include "zfs_namecheck.h"
86 struct g_class zfs_zvol_class = {
91 DECLARE_GEOM_CLASS(zfs_zvol_class, zfs_zvol);
94 static char *zvol_tag = "zvol_tag";
96 #define ZVOL_DUMPSIZE "dumpsize"
99 * The spa_namespace_lock protects the zfsdev_state structure from being
100 * modified while it's being used, e.g. an open that comes in before a
101 * create finishes. It also protects temporary opens of the dataset so that,
102 * e.g., an open doesn't get a spurious EBUSY.
104 static uint32_t zvol_minors;
106 typedef struct zvol_extent {
108 dva_t ze_dva; /* dva associated with this extent */
109 uint64_t ze_nblks; /* number of blocks in extent */
113 * The in-core state of each volume.
115 typedef struct zvol_state {
116 char zv_name[MAXPATHLEN]; /* pool/dd name */
117 uint64_t zv_volsize; /* amount of space we advertise */
118 uint64_t zv_volblocksize; /* volume block size */
119 struct g_provider *zv_provider; /* GEOM provider */
120 uint8_t zv_min_bs; /* minimum addressable block shift */
121 uint8_t zv_flags; /* readonly, dumpified, etc. */
122 objset_t *zv_objset; /* objset handle */
123 uint32_t zv_total_opens; /* total open count */
124 zilog_t *zv_zilog; /* ZIL handle */
125 list_t zv_extents; /* List of extents for dump */
126 znode_t zv_znode; /* for range locking */
127 dmu_buf_t *zv_dbuf; /* bonus handle */
129 struct bio_queue_head zv_queue;
130 struct mtx zv_queue_mtx; /* zv_queue mutex */
134 * zvol specific flags
136 #define ZVOL_RDONLY 0x1
137 #define ZVOL_DUMPIFIED 0x2
138 #define ZVOL_EXCL 0x4
142 * zvol maximum transfer in one DMU tx.
144 int zvol_maxphys = DMU_MAX_ACCESS/2;
146 extern int zfs_set_prop_nvlist(const char *, zprop_source_t,
147 nvlist_t *, nvlist_t **);
148 static int zvol_remove_zv(zvol_state_t *);
149 static int zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio);
150 static int zvol_dumpify(zvol_state_t *zv);
151 static int zvol_dump_fini(zvol_state_t *zv);
152 static int zvol_dump_init(zvol_state_t *zv, boolean_t resize);
154 static zvol_state_t *zvol_geom_create(const char *name);
155 static void zvol_geom_run(zvol_state_t *zv);
156 static void zvol_geom_destroy(zvol_state_t *zv);
157 static int zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace);
158 static void zvol_geom_start(struct bio *bp);
159 static void zvol_geom_worker(void *arg);
162 zvol_size_changed(zvol_state_t *zv)
165 dev_t dev = makedevice(maj, min);
167 VERIFY(ddi_prop_update_int64(dev, zfs_dip,
168 "Size", volsize) == DDI_SUCCESS);
169 VERIFY(ddi_prop_update_int64(dev, zfs_dip,
170 "Nblocks", lbtodb(volsize)) == DDI_SUCCESS);
172 /* Notify specfs to invalidate the cached size */
173 spec_size_invalidate(dev, VBLK);
174 spec_size_invalidate(dev, VCHR);
176 struct g_provider *pp;
178 pp = zv->zv_provider;
181 if (zv->zv_volsize == pp->mediasize)
184 * Changing provider size is not really supported by GEOM, but it
185 * should be safe when provider is closed.
187 if (zv->zv_total_opens > 0)
189 pp->mediasize = zv->zv_volsize;
194 zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
199 if (volsize % blocksize != 0)
203 if (volsize - 1 > SPEC_MAXOFFSET_T)
210 zvol_check_volblocksize(uint64_t volblocksize)
212 if (volblocksize < SPA_MINBLOCKSIZE ||
213 volblocksize > SPA_MAXBLOCKSIZE ||
221 zvol_get_stats(objset_t *os, nvlist_t *nv)
224 dmu_object_info_t doi;
227 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
231 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
233 error = dmu_object_info(os, ZVOL_OBJ, &doi);
236 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
237 doi.doi_data_block_size);
243 static zvol_state_t *
244 zvol_minor_lookup(const char *name)
246 struct g_provider *pp;
248 zvol_state_t *zv = NULL;
250 ASSERT(MUTEX_HELD(&spa_namespace_lock));
253 LIST_FOREACH(gp, &zfs_zvol_class.geom, geom) {
254 pp = LIST_FIRST(&gp->provider);
260 if (strcmp(zv->zv_name, name) == 0)
265 return (gp != NULL ? zv : NULL);
268 /* extent mapping arg */
276 zvol_map_block(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, arc_buf_t *pbuf,
277 const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
279 struct maparg *ma = arg;
281 int bs = ma->ma_zv->zv_volblocksize;
283 if (bp == NULL || zb->zb_object != ZVOL_OBJ || zb->zb_level != 0)
286 VERIFY3U(ma->ma_blks, ==, zb->zb_blkid);
289 /* Abort immediately if we have encountered gang blocks */
294 * See if the block is at the end of the previous extent.
296 ze = list_tail(&ma->ma_zv->zv_extents);
298 DVA_GET_VDEV(BP_IDENTITY(bp)) == DVA_GET_VDEV(&ze->ze_dva) &&
299 DVA_GET_OFFSET(BP_IDENTITY(bp)) ==
300 DVA_GET_OFFSET(&ze->ze_dva) + ze->ze_nblks * bs) {
305 dprintf_bp(bp, "%s", "next blkptr:");
307 /* start a new extent */
308 ze = kmem_zalloc(sizeof (zvol_extent_t), KM_SLEEP);
309 ze->ze_dva = bp->blk_dva[0]; /* structure assignment */
311 list_insert_tail(&ma->ma_zv->zv_extents, ze);
316 zvol_free_extents(zvol_state_t *zv)
320 while (ze = list_head(&zv->zv_extents)) {
321 list_remove(&zv->zv_extents, ze);
322 kmem_free(ze, sizeof (zvol_extent_t));
327 zvol_get_lbas(zvol_state_t *zv)
329 objset_t *os = zv->zv_objset;
335 zvol_free_extents(zv);
337 /* commit any in-flight changes before traversing the dataset */
338 txg_wait_synced(dmu_objset_pool(os), 0);
339 err = traverse_dataset(dmu_objset_ds(os), 0,
340 TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, zvol_map_block, &ma);
341 if (err || ma.ma_blks != (zv->zv_volsize / zv->zv_volblocksize)) {
342 zvol_free_extents(zv);
343 return (err ? err : EIO);
351 zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
353 zfs_creat_t *zct = arg;
354 nvlist_t *nvprops = zct->zct_props;
356 uint64_t volblocksize, volsize;
358 VERIFY(nvlist_lookup_uint64(nvprops,
359 zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
360 if (nvlist_lookup_uint64(nvprops,
361 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
362 volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
365 * These properties must be removed from the list so the generic
366 * property setting step won't apply to them.
368 VERIFY(nvlist_remove_all(nvprops,
369 zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
370 (void) nvlist_remove_all(nvprops,
371 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
373 error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
377 error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
381 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
386 * Replay a TX_WRITE ZIL transaction that didn't get committed
387 * after a system failure
390 zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap)
392 objset_t *os = zv->zv_objset;
393 char *data = (char *)(lr + 1); /* data follows lr_write_t */
394 uint64_t offset, length;
399 byteswap_uint64_array(lr, sizeof (*lr));
401 offset = lr->lr_offset;
402 length = lr->lr_length;
404 /* If it's a dmu_sync() block, write the whole block */
405 if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
406 uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
407 if (length < blocksize) {
408 offset -= offset % blocksize;
413 tx = dmu_tx_create(os);
414 dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length);
415 error = dmu_tx_assign(tx, TXG_WAIT);
419 dmu_write(os, ZVOL_OBJ, offset, length, data, tx);
428 zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap)
434 * Callback vectors for replaying records.
435 * Only TX_WRITE is needed for zvol.
437 zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = {
438 zvol_replay_err, /* 0 no such transaction type */
439 zvol_replay_err, /* TX_CREATE */
440 zvol_replay_err, /* TX_MKDIR */
441 zvol_replay_err, /* TX_MKXATTR */
442 zvol_replay_err, /* TX_SYMLINK */
443 zvol_replay_err, /* TX_REMOVE */
444 zvol_replay_err, /* TX_RMDIR */
445 zvol_replay_err, /* TX_LINK */
446 zvol_replay_err, /* TX_RENAME */
447 zvol_replay_write, /* TX_WRITE */
448 zvol_replay_err, /* TX_TRUNCATE */
449 zvol_replay_err, /* TX_SETATTR */
450 zvol_replay_err, /* TX_ACL */
451 zvol_replay_err, /* TX_CREATE_ACL */
452 zvol_replay_err, /* TX_CREATE_ATTR */
453 zvol_replay_err, /* TX_CREATE_ACL_ATTR */
454 zvol_replay_err, /* TX_MKDIR_ACL */
455 zvol_replay_err, /* TX_MKDIR_ATTR */
456 zvol_replay_err, /* TX_MKDIR_ACL_ATTR */
457 zvol_replay_err, /* TX_WRITE2 */
462 zvol_name2minor(const char *name, minor_t *minor)
466 mutex_enter(&spa_namespace_lock);
467 zv = zvol_minor_lookup(name);
469 *minor = zv->zv_minor;
470 mutex_exit(&spa_namespace_lock);
471 return (zv ? 0 : -1);
476 * Create a minor node (plus a whole lot more) for the specified volume.
479 zvol_create_minor(const char *name)
481 zfs_soft_state_t *zs;
484 dmu_object_info_t doi;
488 ZFS_LOG(1, "Creating ZVOL %s...", name);
490 mutex_enter(&spa_namespace_lock);
492 if (zvol_minor_lookup(name) != NULL) {
493 mutex_exit(&spa_namespace_lock);
497 /* lie and say we're read-only */
498 error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, FTAG, &os);
501 mutex_exit(&spa_namespace_lock);
506 if ((minor = zfsdev_minor_alloc()) == 0) {
507 dmu_objset_disown(os, FTAG);
508 mutex_exit(&spa_namespace_lock);
512 if (ddi_soft_state_zalloc(zfsdev_state, minor) != DDI_SUCCESS) {
513 dmu_objset_disown(os, FTAG);
514 mutex_exit(&spa_namespace_lock);
517 (void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME,
520 (void) snprintf(chrbuf, sizeof (chrbuf), "%u,raw", minor);
522 if (ddi_create_minor_node(zfs_dip, chrbuf, S_IFCHR,
523 minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
524 ddi_soft_state_free(zfsdev_state, minor);
525 dmu_objset_disown(os, FTAG);
526 mutex_exit(&spa_namespace_lock);
530 (void) snprintf(blkbuf, sizeof (blkbuf), "%u", minor);
532 if (ddi_create_minor_node(zfs_dip, blkbuf, S_IFBLK,
533 minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
534 ddi_remove_minor_node(zfs_dip, chrbuf);
535 ddi_soft_state_free(zfsdev_state, minor);
536 dmu_objset_disown(os, FTAG);
537 mutex_exit(&spa_namespace_lock);
541 zs = ddi_get_soft_state(zfsdev_state, minor);
542 zs->zss_type = ZSST_ZVOL;
543 zv = zs->zss_data = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
546 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
549 dmu_objset_disown(os, zvol_tag);
550 mutex_exit(&spa_namespace_lock);
556 zv = zvol_geom_create(name);
557 zv->zv_volsize = volsize;
558 zv->zv_provider->mediasize = zv->zv_volsize;
562 (void) strlcpy(zv->zv_name, name, MAXPATHLEN);
563 zv->zv_min_bs = DEV_BSHIFT;
565 if (dmu_objset_is_snapshot(os) || !spa_writeable(dmu_objset_spa(os)))
566 zv->zv_flags |= ZVOL_RDONLY;
567 mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL);
568 avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare,
569 sizeof (rl_t), offsetof(rl_t, r_node));
570 list_create(&zv->zv_extents, sizeof (zvol_extent_t),
571 offsetof(zvol_extent_t, ze_node));
572 /* get and cache the blocksize */
573 error = dmu_object_info(os, ZVOL_OBJ, &doi);
575 zv->zv_volblocksize = doi.doi_data_block_size;
577 if (spa_writeable(dmu_objset_spa(os))) {
578 if (zil_replay_disable)
579 zil_destroy(dmu_objset_zil(os), B_FALSE);
581 zil_replay(os, zv, zvol_replay_vector);
583 dmu_objset_disown(os, FTAG);
584 zv->zv_objset = NULL;
588 mutex_exit(&spa_namespace_lock);
595 ZFS_LOG(1, "ZVOL %s created.", name);
601 * Remove minor node for the specified volume.
604 zvol_remove_zv(zvol_state_t *zv)
607 minor_t minor = zv->zv_minor;
610 ASSERT(MUTEX_HELD(&spa_namespace_lock));
611 if (zv->zv_total_opens != 0)
614 ZFS_LOG(1, "ZVOL %s destroyed.", zv->zv_name);
617 (void) snprintf(nmbuf, sizeof (nmbuf), "%u,raw", minor);
618 ddi_remove_minor_node(zfs_dip, nmbuf);
621 avl_destroy(&zv->zv_znode.z_range_avl);
622 mutex_destroy(&zv->zv_znode.z_range_lock);
624 zvol_geom_destroy(zv);
631 zvol_remove_minor(const char *name)
636 mutex_enter(&spa_namespace_lock);
637 if ((zv = zvol_minor_lookup(name)) == NULL) {
638 mutex_exit(&spa_namespace_lock);
642 rc = zvol_remove_zv(zv);
644 mutex_exit(&spa_namespace_lock);
649 zvol_first_open(zvol_state_t *zv)
656 /* lie and say we're read-only */
657 error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, B_TRUE,
662 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
665 dmu_objset_disown(os, zvol_tag);
669 error = dmu_bonus_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dbuf);
671 dmu_objset_disown(os, zvol_tag);
674 zv->zv_volsize = volsize;
675 zv->zv_zilog = zil_open(os, zvol_get_data);
676 zvol_size_changed(zv);
678 VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &readonly,
680 if (readonly || dmu_objset_is_snapshot(os) ||
681 !spa_writeable(dmu_objset_spa(os)))
682 zv->zv_flags |= ZVOL_RDONLY;
684 zv->zv_flags &= ~ZVOL_RDONLY;
689 zvol_last_close(zvol_state_t *zv)
691 zil_close(zv->zv_zilog);
694 dmu_buf_rele(zv->zv_dbuf, zvol_tag);
700 if (dsl_dataset_is_dirty(dmu_objset_ds(zv->zv_objset)) &&
701 !(zv->zv_flags & ZVOL_RDONLY))
702 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
703 (void) dmu_objset_evict_dbufs(zv->zv_objset);
705 dmu_objset_disown(zv->zv_objset, zvol_tag);
706 zv->zv_objset = NULL;
711 zvol_prealloc(zvol_state_t *zv)
713 objset_t *os = zv->zv_objset;
715 uint64_t refd, avail, usedobjs, availobjs;
716 uint64_t resid = zv->zv_volsize;
719 /* Check the space usage before attempting to allocate the space */
720 dmu_objset_space(os, &refd, &avail, &usedobjs, &availobjs);
721 if (avail < zv->zv_volsize)
724 /* Free old extents if they exist */
725 zvol_free_extents(zv);
729 uint64_t bytes = MIN(resid, SPA_MAXBLOCKSIZE);
731 tx = dmu_tx_create(os);
732 dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
733 error = dmu_tx_assign(tx, TXG_WAIT);
736 (void) dmu_free_long_range(os, ZVOL_OBJ, 0, off);
739 dmu_prealloc(os, ZVOL_OBJ, off, bytes, tx);
744 txg_wait_synced(dmu_objset_pool(os), 0);
751 zvol_update_volsize(objset_t *os, uint64_t volsize)
756 ASSERT(MUTEX_HELD(&spa_namespace_lock));
758 tx = dmu_tx_create(os);
759 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
760 error = dmu_tx_assign(tx, TXG_WAIT);
766 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1,
771 error = dmu_free_long_range(os,
772 ZVOL_OBJ, volsize, DMU_OBJECT_END);
777 zvol_remove_minors(const char *name)
779 struct g_geom *gp, *gptmp;
780 struct g_provider *pp;
784 namelen = strlen(name);
787 mutex_enter(&spa_namespace_lock);
790 LIST_FOREACH_SAFE(gp, &zfs_zvol_class.geom, geom, gptmp) {
791 pp = LIST_FIRST(&gp->provider);
797 if (strcmp(zv->zv_name, name) == 0 ||
798 (strncmp(zv->zv_name, name, namelen) == 0 &&
799 zv->zv_name[namelen] == '/')) {
800 (void) zvol_remove_zv(zv);
805 mutex_exit(&spa_namespace_lock);
810 zvol_set_volsize(const char *name, major_t maj, uint64_t volsize)
812 zvol_state_t *zv = NULL;
815 dmu_object_info_t doi;
816 uint64_t old_volsize = 0ULL;
819 mutex_enter(&spa_namespace_lock);
820 zv = zvol_minor_lookup(name);
821 if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) {
822 mutex_exit(&spa_namespace_lock);
826 if ((error = dmu_object_info(os, ZVOL_OBJ, &doi)) != 0 ||
827 (error = zvol_check_volsize(volsize,
828 doi.doi_data_block_size)) != 0)
831 VERIFY(dsl_prop_get_integer(name, "readonly", &readonly,
838 error = zvol_update_volsize(os, volsize);
840 * Reinitialize the dump area to the new size. If we
841 * failed to resize the dump area then restore it back to
844 if (zv && error == 0) {
846 if (zv->zv_flags & ZVOL_DUMPIFIED) {
847 old_volsize = zv->zv_volsize;
848 zv->zv_volsize = volsize;
849 if ((error = zvol_dumpify(zv)) != 0 ||
850 (error = dumpvp_resize()) != 0) {
851 (void) zvol_update_volsize(os, old_volsize);
852 zv->zv_volsize = old_volsize;
853 error = zvol_dumpify(zv);
856 #endif /* ZVOL_DUMP */
858 zv->zv_volsize = volsize;
859 zvol_size_changed(zv);
865 * Generate a LUN expansion event.
867 if (zv && error == 0) {
870 char *physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
872 (void) snprintf(physpath, MAXPATHLEN, "%s%u", ZVOL_PSEUDO_DEV,
875 VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0);
876 VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0);
878 (void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS,
879 ESC_DEV_DLE, attr, &eid, DDI_SLEEP);
882 kmem_free(physpath, MAXPATHLEN);
887 dmu_objset_rele(os, FTAG);
889 mutex_exit(&spa_namespace_lock);
896 zvol_open(struct g_provider *pp, int flag, int count)
900 boolean_t locked = B_FALSE;
903 * Protect against recursively entering spa_namespace_lock
904 * when spa_open() is used for a pool on a (local) ZVOL(s).
905 * This is needed since we replaced upstream zfsdev_state_lock
906 * with spa_namespace_lock in the ZVOL code.
907 * We are using the same trick as spa_open().
908 * Note that calls in zvol_first_open which need to resolve
909 * pool name to a spa object will enter spa_open()
910 * recursively, but that function already has all the
911 * necessary protection.
913 if (!MUTEX_HELD(&spa_namespace_lock)) {
914 mutex_enter(&spa_namespace_lock);
921 mutex_exit(&spa_namespace_lock);
925 if (zv->zv_total_opens == 0)
926 err = zvol_first_open(zv);
929 mutex_exit(&spa_namespace_lock);
932 if ((flag & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
936 if (zv->zv_flags & ZVOL_EXCL) {
942 if (zv->zv_total_opens != 0) {
946 zv->zv_flags |= ZVOL_EXCL;
950 zv->zv_total_opens += count;
952 mutex_exit(&spa_namespace_lock);
956 if (zv->zv_total_opens == 0)
959 mutex_exit(&spa_namespace_lock);
965 zvol_close(struct g_provider *pp, int flag, int count)
969 boolean_t locked = B_FALSE;
971 /* See comment in zvol_open(). */
972 if (!MUTEX_HELD(&spa_namespace_lock)) {
973 mutex_enter(&spa_namespace_lock);
980 mutex_exit(&spa_namespace_lock);
984 if (zv->zv_flags & ZVOL_EXCL) {
985 ASSERT(zv->zv_total_opens == 1);
986 zv->zv_flags &= ~ZVOL_EXCL;
990 * If the open count is zero, this is a spurious close.
991 * That indicates a bug in the kernel / DDI framework.
993 ASSERT(zv->zv_total_opens != 0);
996 * You may get multiple opens, but only one close.
998 zv->zv_total_opens -= count;
1000 if (zv->zv_total_opens == 0)
1001 zvol_last_close(zv);
1004 mutex_exit(&spa_namespace_lock);
1009 zvol_get_done(zgd_t *zgd, int error)
1012 dmu_buf_rele(zgd->zgd_db, zgd);
1014 zfs_range_unlock(zgd->zgd_rl);
1016 if (error == 0 && zgd->zgd_bp)
1017 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
1019 kmem_free(zgd, sizeof (zgd_t));
1023 * Get data to generate a TX_WRITE intent log record.
1026 zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
1028 zvol_state_t *zv = arg;
1029 objset_t *os = zv->zv_objset;
1030 uint64_t object = ZVOL_OBJ;
1031 uint64_t offset = lr->lr_offset;
1032 uint64_t size = lr->lr_length; /* length of user data */
1033 blkptr_t *bp = &lr->lr_blkptr;
1038 ASSERT(zio != NULL);
1041 zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
1042 zgd->zgd_zilog = zv->zv_zilog;
1043 zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
1046 * Write records come in two flavors: immediate and indirect.
1047 * For small writes it's cheaper to store the data with the
1048 * log record (immediate); for large writes it's cheaper to
1049 * sync the data and get a pointer to it (indirect) so that
1050 * we don't have to write the data twice.
1052 if (buf != NULL) { /* immediate write */
1053 error = dmu_read(os, object, offset, size, buf,
1054 DMU_READ_NO_PREFETCH);
1056 size = zv->zv_volblocksize;
1057 offset = P2ALIGN(offset, size);
1058 error = dmu_buf_hold(os, object, offset, zgd, &db,
1059 DMU_READ_NO_PREFETCH);
1061 blkptr_t *obp = dmu_buf_get_blkptr(db);
1063 ASSERT(BP_IS_HOLE(bp));
1070 ASSERT(db->db_offset == offset);
1071 ASSERT(db->db_size == size);
1073 error = dmu_sync(zio, lr->lr_common.lrc_txg,
1074 zvol_get_done, zgd);
1081 zvol_get_done(zgd, error);
1087 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
1089 * We store data in the log buffers if it's small enough.
1090 * Otherwise we will later flush the data out via dmu_sync().
1092 ssize_t zvol_immediate_write_sz = 32768;
1095 zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, offset_t off, ssize_t resid,
1098 uint32_t blocksize = zv->zv_volblocksize;
1099 zilog_t *zilog = zv->zv_zilog;
1101 ssize_t immediate_write_sz;
1103 if (zil_replaying(zilog, tx))
1106 immediate_write_sz = (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
1107 ? 0 : zvol_immediate_write_sz;
1109 slogging = spa_has_slogs(zilog->zl_spa) &&
1110 (zilog->zl_logbias == ZFS_LOGBIAS_LATENCY);
1116 itx_wr_state_t write_state;
1119 * Unlike zfs_log_write() we can be called with
1120 * upto DMU_MAX_ACCESS/2 (5MB) writes.
1122 if (blocksize > immediate_write_sz && !slogging &&
1123 resid >= blocksize && off % blocksize == 0) {
1124 write_state = WR_INDIRECT; /* uses dmu_sync */
1127 write_state = WR_COPIED;
1128 len = MIN(ZIL_MAX_LOG_DATA, resid);
1130 write_state = WR_NEED_COPY;
1131 len = MIN(ZIL_MAX_LOG_DATA, resid);
1134 itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
1135 (write_state == WR_COPIED ? len : 0));
1136 lr = (lr_write_t *)&itx->itx_lr;
1137 if (write_state == WR_COPIED && dmu_read(zv->zv_objset,
1138 ZVOL_OBJ, off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) {
1139 zil_itx_destroy(itx);
1140 itx = zil_itx_create(TX_WRITE, sizeof (*lr));
1141 lr = (lr_write_t *)&itx->itx_lr;
1142 write_state = WR_NEED_COPY;
1145 itx->itx_wr_state = write_state;
1146 if (write_state == WR_NEED_COPY)
1147 itx->itx_sod += len;
1148 lr->lr_foid = ZVOL_OBJ;
1149 lr->lr_offset = off;
1150 lr->lr_length = len;
1152 BP_ZERO(&lr->lr_blkptr);
1154 itx->itx_private = zv;
1155 itx->itx_sync = sync;
1157 zil_itx_assign(zilog, itx, tx);
1166 zvol_dumpio_vdev(vdev_t *vd, void *addr, uint64_t offset, uint64_t size,
1167 boolean_t doread, boolean_t isdump)
1173 for (c = 0; c < vd->vdev_children; c++) {
1174 ASSERT(vd->vdev_ops == &vdev_mirror_ops ||
1175 vd->vdev_ops == &vdev_replacing_ops ||
1176 vd->vdev_ops == &vdev_spare_ops);
1177 int err = zvol_dumpio_vdev(vd->vdev_child[c],
1178 addr, offset, size, doread, isdump);
1181 } else if (doread) {
1186 if (!vd->vdev_ops->vdev_op_leaf)
1187 return (numerrors < vd->vdev_children ? 0 : EIO);
1189 if (doread && !vdev_readable(vd))
1191 else if (!doread && !vdev_writeable(vd))
1195 ASSERT3P(dvd, !=, NULL);
1196 offset += VDEV_LABEL_START_SIZE;
1198 if (ddi_in_panic() || isdump) {
1202 return (ldi_dump(dvd->vd_lh, addr, lbtodb(offset),
1205 return (vdev_disk_physio(dvd->vd_lh, addr, size, offset,
1206 doread ? B_READ : B_WRITE));
1211 zvol_dumpio(zvol_state_t *zv, void *addr, uint64_t offset, uint64_t size,
1212 boolean_t doread, boolean_t isdump)
1217 spa_t *spa = dmu_objset_spa(zv->zv_objset);
1219 /* Must be sector aligned, and not stradle a block boundary. */
1220 if (P2PHASE(offset, DEV_BSIZE) || P2PHASE(size, DEV_BSIZE) ||
1221 P2BOUNDARY(offset, size, zv->zv_volblocksize)) {
1224 ASSERT(size <= zv->zv_volblocksize);
1226 /* Locate the extent this belongs to */
1227 ze = list_head(&zv->zv_extents);
1228 while (offset >= ze->ze_nblks * zv->zv_volblocksize) {
1229 offset -= ze->ze_nblks * zv->zv_volblocksize;
1230 ze = list_next(&zv->zv_extents, ze);
1233 if (!ddi_in_panic())
1234 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
1236 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&ze->ze_dva));
1237 offset += DVA_GET_OFFSET(&ze->ze_dva);
1238 error = zvol_dumpio_vdev(vd, addr, offset, size, doread, isdump);
1240 if (!ddi_in_panic())
1241 spa_config_exit(spa, SCL_STATE, FTAG);
1248 zvol_strategy(struct bio *bp)
1250 zvol_state_t *zv = bp->bio_to->private;
1251 uint64_t off, volsize;
1257 boolean_t doread = (bp->bio_cmd == BIO_READ);
1261 g_io_deliver(bp, ENXIO);
1265 if (bp->bio_cmd != BIO_READ && (zv->zv_flags & ZVOL_RDONLY)) {
1266 g_io_deliver(bp, EROFS);
1270 off = bp->bio_offset;
1271 volsize = zv->zv_volsize;
1276 addr = bp->bio_data;
1277 resid = bp->bio_length;
1279 if (resid > 0 && (off < 0 || off >= volsize)) {
1280 g_io_deliver(bp, EIO);
1284 sync = !doread && zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS;
1287 * There must be no buffer changes when doing a dmu_sync() because
1288 * we can't change the data whilst calculating the checksum.
1290 rl = zfs_range_lock(&zv->zv_znode, off, resid,
1291 doread ? RL_READER : RL_WRITER);
1293 while (resid != 0 && off < volsize) {
1294 size_t size = MIN(resid, zvol_maxphys);
1296 error = dmu_read(os, ZVOL_OBJ, off, size, addr,
1299 dmu_tx_t *tx = dmu_tx_create(os);
1300 dmu_tx_hold_write(tx, ZVOL_OBJ, off, size);
1301 error = dmu_tx_assign(tx, TXG_WAIT);
1305 dmu_write(os, ZVOL_OBJ, off, size, addr, tx);
1306 zvol_log_write(zv, tx, off, size, sync);
1311 /* convert checksum errors into IO errors */
1312 if (error == ECKSUM)
1320 zfs_range_unlock(rl);
1322 bp->bio_completed = bp->bio_length - resid;
1323 if (bp->bio_completed < bp->bio_length)
1324 bp->bio_error = (off > volsize ? EINVAL : error);
1327 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1328 g_io_deliver(bp, 0);
1335 * Set the buffer count to the zvol maximum transfer.
1336 * Using our own routine instead of the default minphys()
1337 * means that for larger writes we write bigger buffers on X86
1338 * (128K instead of 56K) and flush the disk write cache less often
1339 * (every zvol_maxphys - currently 1MB) instead of minphys (currently
1340 * 56K on X86 and 128K on sparc).
1343 zvol_minphys(struct buf *bp)
1345 if (bp->b_bcount > zvol_maxphys)
1346 bp->b_bcount = zvol_maxphys;
1350 zvol_dump(dev_t dev, caddr_t addr, daddr_t blkno, int nblocks)
1352 minor_t minor = getminor(dev);
1359 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1363 boff = ldbtob(blkno);
1364 resid = ldbtob(nblocks);
1366 VERIFY3U(boff + resid, <=, zv->zv_volsize);
1369 size = MIN(resid, P2END(boff, zv->zv_volblocksize) - boff);
1370 error = zvol_dumpio(zv, addr, boff, size, B_FALSE, B_TRUE);
1383 zvol_read(dev_t dev, uio_t *uio, cred_t *cr)
1385 minor_t minor = getminor(dev);
1391 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1395 volsize = zv->zv_volsize;
1396 if (uio->uio_resid > 0 &&
1397 (uio->uio_loffset < 0 || uio->uio_loffset >= volsize))
1400 if (zv->zv_flags & ZVOL_DUMPIFIED) {
1401 error = physio(zvol_strategy, NULL, dev, B_READ,
1406 rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1408 while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1409 uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1411 /* don't read past the end */
1412 if (bytes > volsize - uio->uio_loffset)
1413 bytes = volsize - uio->uio_loffset;
1415 error = dmu_read_uio(zv->zv_objset, ZVOL_OBJ, uio, bytes);
1417 /* convert checksum errors into IO errors */
1418 if (error == ECKSUM)
1423 zfs_range_unlock(rl);
1429 zvol_write(dev_t dev, uio_t *uio, cred_t *cr)
1431 minor_t minor = getminor(dev);
1438 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1442 volsize = zv->zv_volsize;
1443 if (uio->uio_resid > 0 &&
1444 (uio->uio_loffset < 0 || uio->uio_loffset >= volsize))
1447 if (zv->zv_flags & ZVOL_DUMPIFIED) {
1448 error = physio(zvol_strategy, NULL, dev, B_WRITE,
1453 sync = !(zv->zv_flags & ZVOL_WCE) ||
1454 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS);
1456 rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1458 while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1459 uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1460 uint64_t off = uio->uio_loffset;
1461 dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
1463 if (bytes > volsize - off) /* don't write past the end */
1464 bytes = volsize - off;
1466 dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
1467 error = dmu_tx_assign(tx, TXG_WAIT);
1472 error = dmu_write_uio_dbuf(zv->zv_dbuf, uio, bytes, tx);
1474 zvol_log_write(zv, tx, off, bytes, sync);
1480 zfs_range_unlock(rl);
1482 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1487 zvol_getefi(void *arg, int flag, uint64_t vs, uint8_t bs)
1489 struct uuid uuid = EFI_RESERVED;
1490 efi_gpe_t gpe = { 0 };
1496 if (ddi_copyin(arg, &efi, sizeof (dk_efi_t), flag))
1498 ptr = (char *)(uintptr_t)efi.dki_data_64;
1499 length = efi.dki_length;
1501 * Some clients may attempt to request a PMBR for the
1502 * zvol. Currently this interface will return EINVAL to
1503 * such requests. These requests could be supported by
1504 * adding a check for lba == 0 and consing up an appropriate
1507 if (efi.dki_lba < 1 || efi.dki_lba > 2 || length <= 0)
1510 gpe.efi_gpe_StartingLBA = LE_64(34ULL);
1511 gpe.efi_gpe_EndingLBA = LE_64((vs >> bs) - 1);
1512 UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid);
1514 if (efi.dki_lba == 1) {
1515 efi_gpt_t gpt = { 0 };
1517 gpt.efi_gpt_Signature = LE_64(EFI_SIGNATURE);
1518 gpt.efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT);
1519 gpt.efi_gpt_HeaderSize = LE_32(sizeof (gpt));
1520 gpt.efi_gpt_MyLBA = LE_64(1ULL);
1521 gpt.efi_gpt_FirstUsableLBA = LE_64(34ULL);
1522 gpt.efi_gpt_LastUsableLBA = LE_64((vs >> bs) - 1);
1523 gpt.efi_gpt_PartitionEntryLBA = LE_64(2ULL);
1524 gpt.efi_gpt_NumberOfPartitionEntries = LE_32(1);
1525 gpt.efi_gpt_SizeOfPartitionEntry =
1526 LE_32(sizeof (efi_gpe_t));
1527 CRC32(crc, &gpe, sizeof (gpe), -1U, crc32_table);
1528 gpt.efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc);
1529 CRC32(crc, &gpt, sizeof (gpt), -1U, crc32_table);
1530 gpt.efi_gpt_HeaderCRC32 = LE_32(~crc);
1531 if (ddi_copyout(&gpt, ptr, MIN(sizeof (gpt), length),
1534 ptr += sizeof (gpt);
1535 length -= sizeof (gpt);
1537 if (length > 0 && ddi_copyout(&gpe, ptr, MIN(sizeof (gpe),
1544 * BEGIN entry points to allow external callers access to the volume.
1547 * Return the volume parameters needed for access from an external caller.
1548 * These values are invariant as long as the volume is held open.
1551 zvol_get_volume_params(minor_t minor, uint64_t *blksize,
1552 uint64_t *max_xfer_len, void **minor_hdl, void **objset_hdl, void **zil_hdl,
1553 void **rl_hdl, void **bonus_hdl)
1557 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1560 if (zv->zv_flags & ZVOL_DUMPIFIED)
1563 ASSERT(blksize && max_xfer_len && minor_hdl &&
1564 objset_hdl && zil_hdl && rl_hdl && bonus_hdl);
1566 *blksize = zv->zv_volblocksize;
1567 *max_xfer_len = (uint64_t)zvol_maxphys;
1569 *objset_hdl = zv->zv_objset;
1570 *zil_hdl = zv->zv_zilog;
1571 *rl_hdl = &zv->zv_znode;
1572 *bonus_hdl = zv->zv_dbuf;
1577 * Return the current volume size to an external caller.
1578 * The size can change while the volume is open.
1581 zvol_get_volume_size(void *minor_hdl)
1583 zvol_state_t *zv = minor_hdl;
1585 return (zv->zv_volsize);
1589 * Return the current WCE setting to an external caller.
1590 * The WCE setting can change while the volume is open.
1593 zvol_get_volume_wce(void *minor_hdl)
1595 zvol_state_t *zv = minor_hdl;
1597 return ((zv->zv_flags & ZVOL_WCE) ? 1 : 0);
1601 * Entry point for external callers to zvol_log_write
1604 zvol_log_write_minor(void *minor_hdl, dmu_tx_t *tx, offset_t off, ssize_t resid,
1607 zvol_state_t *zv = minor_hdl;
1609 zvol_log_write(zv, tx, off, resid, sync);
1612 * END entry points to allow external callers access to the volume.
1616 * Dirtbag ioctls to support mkfs(1M) for UFS filesystems. See dkio(7I).
1620 zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
1623 struct dk_cinfo dki;
1624 struct dk_minfo dkm;
1625 struct dk_callback *dkc;
1629 mutex_enter(&spa_namespace_lock);
1631 zv = zfsdev_get_soft_state(getminor(dev), ZSST_ZVOL);
1634 mutex_exit(&spa_namespace_lock);
1637 ASSERT(zv->zv_total_opens > 0);
1642 bzero(&dki, sizeof (dki));
1643 (void) strcpy(dki.dki_cname, "zvol");
1644 (void) strcpy(dki.dki_dname, "zvol");
1645 dki.dki_ctype = DKC_UNKNOWN;
1646 dki.dki_unit = getminor(dev);
1647 dki.dki_maxtransfer = 1 << (SPA_MAXBLOCKSHIFT - zv->zv_min_bs);
1648 mutex_exit(&spa_namespace_lock);
1649 if (ddi_copyout(&dki, (void *)arg, sizeof (dki), flag))
1653 case DKIOCGMEDIAINFO:
1654 bzero(&dkm, sizeof (dkm));
1655 dkm.dki_lbsize = 1U << zv->zv_min_bs;
1656 dkm.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
1657 dkm.dki_media_type = DK_UNKNOWN;
1658 mutex_exit(&spa_namespace_lock);
1659 if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag))
1665 uint64_t vs = zv->zv_volsize;
1666 uint8_t bs = zv->zv_min_bs;
1668 mutex_exit(&spa_namespace_lock);
1669 error = zvol_getefi((void *)arg, flag, vs, bs);
1673 case DKIOCFLUSHWRITECACHE:
1674 dkc = (struct dk_callback *)arg;
1675 mutex_exit(&spa_namespace_lock);
1676 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1677 if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback) {
1678 (*dkc->dkc_callback)(dkc->dkc_cookie, error);
1685 int wce = (zv->zv_flags & ZVOL_WCE) ? 1 : 0;
1686 if (ddi_copyout(&wce, (void *)arg, sizeof (int),
1694 if (ddi_copyin((void *)arg, &wce, sizeof (int),
1700 zv->zv_flags |= ZVOL_WCE;
1701 mutex_exit(&spa_namespace_lock);
1703 zv->zv_flags &= ~ZVOL_WCE;
1704 mutex_exit(&spa_namespace_lock);
1705 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1713 * commands using these (like prtvtoc) expect ENOTSUP
1714 * since we're emulating an EFI label
1720 rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
1722 error = zvol_dumpify(zv);
1723 zfs_range_unlock(rl);
1727 if (!(zv->zv_flags & ZVOL_DUMPIFIED))
1729 rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
1731 error = zvol_dump_fini(zv);
1732 zfs_range_unlock(rl);
1740 mutex_exit(&spa_namespace_lock);
1748 return (zvol_minors != 0);
1754 VERIFY(ddi_soft_state_init(&zfsdev_state, sizeof (zfs_soft_state_t),
1756 ZFS_LOG(1, "ZVOL Initialized.");
1762 ddi_soft_state_fini(&zfsdev_state);
1763 ZFS_LOG(1, "ZVOL Deinitialized.");
1768 zvol_dump_init(zvol_state_t *zv, boolean_t resize)
1772 objset_t *os = zv->zv_objset;
1773 nvlist_t *nv = NULL;
1774 uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset));
1776 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1777 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 0,
1779 /* wait for dmu_free_long_range to actually free the blocks */
1780 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
1782 tx = dmu_tx_create(os);
1783 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
1784 dmu_tx_hold_bonus(tx, ZVOL_OBJ);
1785 error = dmu_tx_assign(tx, TXG_WAIT);
1792 * If we are resizing the dump device then we only need to
1793 * update the refreservation to match the newly updated
1794 * zvolsize. Otherwise, we save off the original state of the
1795 * zvol so that we can restore them if the zvol is ever undumpified.
1798 error = zap_update(os, ZVOL_ZAP_OBJ,
1799 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
1800 &zv->zv_volsize, tx);
1802 uint64_t checksum, compress, refresrv, vbs, dedup;
1804 error = dsl_prop_get_integer(zv->zv_name,
1805 zfs_prop_to_name(ZFS_PROP_COMPRESSION), &compress, NULL);
1806 error = error ? error : dsl_prop_get_integer(zv->zv_name,
1807 zfs_prop_to_name(ZFS_PROP_CHECKSUM), &checksum, NULL);
1808 error = error ? error : dsl_prop_get_integer(zv->zv_name,
1809 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), &refresrv, NULL);
1810 error = error ? error : dsl_prop_get_integer(zv->zv_name,
1811 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &vbs, NULL);
1812 if (version >= SPA_VERSION_DEDUP) {
1813 error = error ? error :
1814 dsl_prop_get_integer(zv->zv_name,
1815 zfs_prop_to_name(ZFS_PROP_DEDUP), &dedup, NULL);
1818 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1819 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1,
1821 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1822 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum, tx);
1823 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1824 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
1826 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1827 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1,
1829 error = error ? error : dmu_object_set_blocksize(
1830 os, ZVOL_OBJ, SPA_MAXBLOCKSIZE, 0, tx);
1831 if (version >= SPA_VERSION_DEDUP) {
1832 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1833 zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1,
1837 zv->zv_volblocksize = SPA_MAXBLOCKSIZE;
1842 * We only need update the zvol's property if we are initializing
1843 * the dump area for the first time.
1846 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1847 VERIFY(nvlist_add_uint64(nv,
1848 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 0) == 0);
1849 VERIFY(nvlist_add_uint64(nv,
1850 zfs_prop_to_name(ZFS_PROP_COMPRESSION),
1851 ZIO_COMPRESS_OFF) == 0);
1852 VERIFY(nvlist_add_uint64(nv,
1853 zfs_prop_to_name(ZFS_PROP_CHECKSUM),
1854 ZIO_CHECKSUM_OFF) == 0);
1855 if (version >= SPA_VERSION_DEDUP) {
1856 VERIFY(nvlist_add_uint64(nv,
1857 zfs_prop_to_name(ZFS_PROP_DEDUP),
1858 ZIO_CHECKSUM_OFF) == 0);
1861 error = zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
1869 /* Allocate the space for the dump */
1870 error = zvol_prealloc(zv);
1875 zvol_dumpify(zvol_state_t *zv)
1878 uint64_t dumpsize = 0;
1880 objset_t *os = zv->zv_objset;
1882 if (zv->zv_flags & ZVOL_RDONLY)
1885 if (zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE,
1886 8, 1, &dumpsize) != 0 || dumpsize != zv->zv_volsize) {
1887 boolean_t resize = (dumpsize > 0) ? B_TRUE : B_FALSE;
1889 if ((error = zvol_dump_init(zv, resize)) != 0) {
1890 (void) zvol_dump_fini(zv);
1896 * Build up our lba mapping.
1898 error = zvol_get_lbas(zv);
1900 (void) zvol_dump_fini(zv);
1904 tx = dmu_tx_create(os);
1905 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
1906 error = dmu_tx_assign(tx, TXG_WAIT);
1909 (void) zvol_dump_fini(zv);
1913 zv->zv_flags |= ZVOL_DUMPIFIED;
1914 error = zap_update(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 8, 1,
1915 &zv->zv_volsize, tx);
1919 (void) zvol_dump_fini(zv);
1923 txg_wait_synced(dmu_objset_pool(os), 0);
1928 zvol_dump_fini(zvol_state_t *zv)
1931 objset_t *os = zv->zv_objset;
1934 uint64_t checksum, compress, refresrv, vbs, dedup;
1935 uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset));
1938 * Attempt to restore the zvol back to its pre-dumpified state.
1939 * This is a best-effort attempt as it's possible that not all
1940 * of these properties were initialized during the dumpify process
1941 * (i.e. error during zvol_dump_init).
1944 tx = dmu_tx_create(os);
1945 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
1946 error = dmu_tx_assign(tx, TXG_WAIT);
1951 (void) zap_remove(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, tx);
1954 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
1955 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum);
1956 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
1957 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, &compress);
1958 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
1959 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, &refresrv);
1960 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
1961 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, &vbs);
1963 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1964 (void) nvlist_add_uint64(nv,
1965 zfs_prop_to_name(ZFS_PROP_CHECKSUM), checksum);
1966 (void) nvlist_add_uint64(nv,
1967 zfs_prop_to_name(ZFS_PROP_COMPRESSION), compress);
1968 (void) nvlist_add_uint64(nv,
1969 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), refresrv);
1970 if (version >= SPA_VERSION_DEDUP &&
1971 zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
1972 zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, &dedup) == 0) {
1973 (void) nvlist_add_uint64(nv,
1974 zfs_prop_to_name(ZFS_PROP_DEDUP), dedup);
1976 (void) zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
1980 zvol_free_extents(zv);
1981 zv->zv_flags &= ~ZVOL_DUMPIFIED;
1982 (void) dmu_free_long_range(os, ZVOL_OBJ, 0, DMU_OBJECT_END);
1983 /* wait for dmu_free_long_range to actually free the blocks */
1984 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
1985 tx = dmu_tx_create(os);
1986 dmu_tx_hold_bonus(tx, ZVOL_OBJ);
1987 error = dmu_tx_assign(tx, TXG_WAIT);
1992 if (dmu_object_set_blocksize(os, ZVOL_OBJ, vbs, 0, tx) == 0)
1993 zv->zv_volblocksize = vbs;
2000 static zvol_state_t *
2001 zvol_geom_create(const char *name)
2003 struct g_provider *pp;
2007 gp = g_new_geomf(&zfs_zvol_class, "zfs::zvol::%s", name);
2008 gp->start = zvol_geom_start;
2009 gp->access = zvol_geom_access;
2010 pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, name);
2011 pp->sectorsize = DEV_BSIZE;
2013 zv = kmem_zalloc(sizeof(*zv), KM_SLEEP);
2014 zv->zv_provider = pp;
2016 bioq_init(&zv->zv_queue);
2017 mtx_init(&zv->zv_queue_mtx, "zvol", NULL, MTX_DEF);
2025 zvol_geom_run(zvol_state_t *zv)
2027 struct g_provider *pp;
2029 pp = zv->zv_provider;
2030 g_error_provider(pp, 0);
2032 kproc_kthread_add(zvol_geom_worker, zv, &zfsproc, NULL, 0, 0,
2033 "zfskern", "zvol %s", pp->name + sizeof(ZVOL_DRIVER));
2037 zvol_geom_destroy(zvol_state_t *zv)
2039 struct g_provider *pp;
2041 g_topology_assert();
2043 mtx_lock(&zv->zv_queue_mtx);
2045 wakeup_one(&zv->zv_queue);
2046 while (zv->zv_state != 2)
2047 msleep(&zv->zv_state, &zv->zv_queue_mtx, 0, "zvol:w", 0);
2048 mtx_destroy(&zv->zv_queue_mtx);
2050 pp = zv->zv_provider;
2051 zv->zv_provider = NULL;
2053 g_wither_geom(pp->geom, ENXIO);
2055 kmem_free(zv, sizeof(*zv));
2059 zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace)
2061 int count, error, flags;
2063 g_topology_assert();
2066 * To make it easier we expect either open or close, but not both
2069 KASSERT((acr >= 0 && acw >= 0 && ace >= 0) ||
2070 (acr <= 0 && acw <= 0 && ace <= 0),
2071 ("Unsupported access request to %s (acr=%d, acw=%d, ace=%d).",
2072 pp->name, acr, acw, ace));
2074 if (pp->private == NULL) {
2075 if (acr <= 0 && acw <= 0 && ace <= 0)
2081 * We don't pass FEXCL flag to zvol_open()/zvol_close() if ace != 0,
2082 * because GEOM already handles that and handles it a bit differently.
2083 * GEOM allows for multiple read/exclusive consumers and ZFS allows
2084 * only one exclusive consumer, no matter if it is reader or writer.
2085 * I like better the way GEOM works so I'll leave it for GEOM to
2086 * decide what to do.
2089 count = acr + acw + ace;
2094 if (acr != 0 || ace != 0)
2099 g_topology_unlock();
2101 error = zvol_open(pp, flags, count);
2103 error = zvol_close(pp, flags, -count);
2109 zvol_geom_start(struct bio *bp)
2114 switch (bp->bio_cmd) {
2118 zv = bp->bio_to->private;
2120 mtx_lock(&zv->zv_queue_mtx);
2121 first = (bioq_first(&zv->zv_queue) == NULL);
2122 bioq_insert_tail(&zv->zv_queue, bp);
2123 mtx_unlock(&zv->zv_queue_mtx);
2125 wakeup_one(&zv->zv_queue);
2130 g_io_deliver(bp, EOPNOTSUPP);
2136 zvol_geom_worker(void *arg)
2141 thread_lock(curthread);
2142 sched_prio(curthread, PRIBIO);
2143 thread_unlock(curthread);
2147 mtx_lock(&zv->zv_queue_mtx);
2148 bp = bioq_takefirst(&zv->zv_queue);
2150 if (zv->zv_state == 1) {
2152 wakeup(&zv->zv_state);
2153 mtx_unlock(&zv->zv_queue_mtx);
2156 msleep(&zv->zv_queue, &zv->zv_queue_mtx, PRIBIO | PDROP,
2160 mtx_unlock(&zv->zv_queue_mtx);
2161 switch (bp->bio_cmd) {
2163 zil_commit(zv->zv_zilog, ZVOL_OBJ);
2164 g_io_deliver(bp, 0);
2174 extern boolean_t dataset_name_hidden(const char *name);
2177 zvol_create_snapshots(objset_t *os, const char *name)
2179 uint64_t cookie, obj;
2184 sname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2186 (void) dmu_objset_find(name, dmu_objset_prefetch, NULL,
2190 len = snprintf(sname, MAXPATHLEN, "%s@", name);
2191 if (len >= MAXPATHLEN) {
2192 dmu_objset_rele(os, FTAG);
2193 error = ENAMETOOLONG;
2197 error = dmu_snapshot_list_next(os, MAXPATHLEN - len,
2198 sname + len, &obj, &cookie, NULL);
2200 if (error == ENOENT)
2205 if ((error = zvol_create_minor(sname)) != 0) {
2206 printf("ZFS WARNING: Unable to create ZVOL %s (error=%d).\n",
2212 kmem_free(sname, MAXPATHLEN);
2217 zvol_create_minors(const char *name)
2224 if (dataset_name_hidden(name))
2227 if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) {
2228 printf("ZFS WARNING: Unable to put hold on %s (error=%d).\n",
2232 if (dmu_objset_type(os) == DMU_OST_ZVOL) {
2233 if ((error = zvol_create_minor(name)) == 0)
2234 error = zvol_create_snapshots(os, name);
2236 printf("ZFS WARNING: Unable to create ZVOL %s (error=%d).\n",
2239 dmu_objset_rele(os, FTAG);
2242 if (dmu_objset_type(os) != DMU_OST_ZFS) {
2243 dmu_objset_rele(os, FTAG);
2247 osname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2248 if (snprintf(osname, MAXPATHLEN, "%s/", name) >= MAXPATHLEN) {
2249 dmu_objset_rele(os, FTAG);
2250 kmem_free(osname, MAXPATHLEN);
2253 p = osname + strlen(osname);
2254 len = MAXPATHLEN - (p - osname);
2256 /* Prefetch the datasets. */
2258 while (dmu_dir_list_next(os, len, p, NULL, &cookie) == 0) {
2259 if (!dataset_name_hidden(osname))
2260 (void) dmu_objset_prefetch(osname, NULL);
2264 while (dmu_dir_list_next(os, MAXPATHLEN - (p - osname), p, NULL,
2266 dmu_objset_rele(os, FTAG);
2267 (void)zvol_create_minors(osname);
2268 if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) {
2269 printf("ZFS WARNING: Unable to put hold on %s (error=%d).\n",
2275 dmu_objset_rele(os, FTAG);
2276 kmem_free(osname, MAXPATHLEN);
2281 zvol_rename_minor(struct g_geom *gp, const char *newname)
2283 struct g_provider *pp;
2286 ASSERT(MUTEX_HELD(&spa_namespace_lock));
2287 g_topology_assert();
2289 pp = LIST_FIRST(&gp->provider);
2294 zv->zv_provider = NULL;
2295 g_wither_provider(pp, ENXIO);
2297 pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, newname);
2298 pp->sectorsize = DEV_BSIZE;
2299 pp->mediasize = zv->zv_volsize;
2301 zv->zv_provider = pp;
2302 strlcpy(zv->zv_name, newname, sizeof(zv->zv_name));
2303 g_error_provider(pp, 0);
2307 zvol_rename_minors(const char *oldname, const char *newname)
2309 char name[MAXPATHLEN];
2310 struct g_provider *pp;
2312 size_t oldnamelen, newnamelen;
2316 oldnamelen = strlen(oldname);
2317 newnamelen = strlen(newname);
2320 mutex_enter(&spa_namespace_lock);
2323 LIST_FOREACH(gp, &zfs_zvol_class.geom, geom) {
2324 pp = LIST_FIRST(&gp->provider);
2330 if (strcmp(zv->zv_name, oldname) == 0) {
2331 zvol_rename_minor(gp, newname);
2332 } else if (strncmp(zv->zv_name, oldname, oldnamelen) == 0 &&
2333 (zv->zv_name[oldnamelen] == '/' ||
2334 zv->zv_name[oldnamelen] == '@')) {
2335 snprintf(name, sizeof(name), "%s%c%s", newname,
2336 zv->zv_name[oldnamelen],
2337 zv->zv_name + oldnamelen + 1);
2338 zvol_rename_minor(gp, name);
2342 g_topology_unlock();
2343 mutex_exit(&spa_namespace_lock);