4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2006-2010 Pawel Jakub Dawidek <pjd@FreeBSD.org>
25 * All rights reserved.
28 /* Portions Copyright 2010 Robert Milkowski */
31 * ZFS volume emulation driver.
33 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
34 * Volumes are accessed through the symbolic links named:
36 * /dev/zvol/dsk/<pool_name>/<dataset_name>
37 * /dev/zvol/rdsk/<pool_name>/<dataset_name>
39 * These links are created by the /dev filesystem (sdev_zvolops.c).
40 * Volumes are persistent through reboot. No user command needs to be
41 * run before opening and using a device.
44 * On FreeBSD ZVOLs are simply GEOM providers like any other storage device
48 #include <sys/types.h>
49 #include <sys/param.h>
50 #include <sys/kernel.h>
51 #include <sys/errno.h>
57 #include <sys/cmn_err.h>
62 #include <sys/dmu_traverse.h>
63 #include <sys/dnode.h>
64 #include <sys/dsl_dataset.h>
65 #include <sys/dsl_prop.h>
67 #include <sys/byteorder.h>
68 #include <sys/sunddi.h>
69 #include <sys/dirent.h>
70 #include <sys/policy.h>
71 #include <sys/fs/zfs.h>
72 #include <sys/zfs_ioctl.h>
74 #include <sys/refcount.h>
75 #include <sys/zfs_znode.h>
76 #include <sys/zfs_rlock.h>
77 #include <sys/vdev_impl.h>
79 #include <sys/zil_impl.h>
80 #include <geom/geom.h>
82 #include "zfs_namecheck.h"
84 struct g_class zfs_zvol_class = {
89 DECLARE_GEOM_CLASS(zfs_zvol_class, zfs_zvol);
92 static char *zvol_tag = "zvol_tag";
94 #define ZVOL_DUMPSIZE "dumpsize"
97 * The spa_namespace_lock protects the zfsdev_state structure from being
98 * modified while it's being used, e.g. an open that comes in before a
99 * create finishes. It also protects temporary opens of the dataset so that,
100 * e.g., an open doesn't get a spurious EBUSY.
102 static uint32_t zvol_minors;
104 typedef struct zvol_extent {
106 dva_t ze_dva; /* dva associated with this extent */
107 uint64_t ze_nblks; /* number of blocks in extent */
111 * The in-core state of each volume.
113 typedef struct zvol_state {
114 char zv_name[MAXPATHLEN]; /* pool/dd name */
115 uint64_t zv_volsize; /* amount of space we advertise */
116 uint64_t zv_volblocksize; /* volume block size */
117 struct g_provider *zv_provider; /* GEOM provider */
118 uint8_t zv_min_bs; /* minimum addressable block shift */
119 uint8_t zv_flags; /* readonly, dumpified, etc. */
120 objset_t *zv_objset; /* objset handle */
121 uint32_t zv_total_opens; /* total open count */
122 zilog_t *zv_zilog; /* ZIL handle */
123 list_t zv_extents; /* List of extents for dump */
124 znode_t zv_znode; /* for range locking */
125 dmu_buf_t *zv_dbuf; /* bonus handle */
127 struct bio_queue_head zv_queue;
128 struct mtx zv_queue_mtx; /* zv_queue mutex */
132 * zvol specific flags
134 #define ZVOL_RDONLY 0x1
135 #define ZVOL_DUMPIFIED 0x2
136 #define ZVOL_EXCL 0x4
140 * zvol maximum transfer in one DMU tx.
142 int zvol_maxphys = DMU_MAX_ACCESS/2;
144 extern int zfs_set_prop_nvlist(const char *, zprop_source_t,
145 nvlist_t *, nvlist_t **);
146 static int zvol_remove_zv(zvol_state_t *);
147 static int zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio);
148 static int zvol_dumpify(zvol_state_t *zv);
149 static int zvol_dump_fini(zvol_state_t *zv);
150 static int zvol_dump_init(zvol_state_t *zv, boolean_t resize);
152 static zvol_state_t *zvol_geom_create(const char *name);
153 static void zvol_geom_run(zvol_state_t *zv);
154 static void zvol_geom_destroy(zvol_state_t *zv);
155 static int zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace);
156 static void zvol_geom_start(struct bio *bp);
157 static void zvol_geom_worker(void *arg);
160 zvol_size_changed(zvol_state_t *zv)
163 dev_t dev = makedevice(maj, min);
165 VERIFY(ddi_prop_update_int64(dev, zfs_dip,
166 "Size", volsize) == DDI_SUCCESS);
167 VERIFY(ddi_prop_update_int64(dev, zfs_dip,
168 "Nblocks", lbtodb(volsize)) == DDI_SUCCESS);
170 /* Notify specfs to invalidate the cached size */
171 spec_size_invalidate(dev, VBLK);
172 spec_size_invalidate(dev, VCHR);
174 struct g_provider *pp;
176 pp = zv->zv_provider;
179 if (zv->zv_volsize == pp->mediasize)
182 * Changing provider size is not really supported by GEOM, but it
183 * should be safe when provider is closed.
185 if (zv->zv_total_opens > 0)
187 pp->mediasize = zv->zv_volsize;
192 zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
197 if (volsize % blocksize != 0)
201 if (volsize - 1 > SPEC_MAXOFFSET_T)
208 zvol_check_volblocksize(uint64_t volblocksize)
210 if (volblocksize < SPA_MINBLOCKSIZE ||
211 volblocksize > SPA_MAXBLOCKSIZE ||
219 zvol_get_stats(objset_t *os, nvlist_t *nv)
222 dmu_object_info_t doi;
225 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
229 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
231 error = dmu_object_info(os, ZVOL_OBJ, &doi);
234 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
235 doi.doi_data_block_size);
241 static zvol_state_t *
242 zvol_minor_lookup(const char *name)
244 struct g_provider *pp;
246 zvol_state_t *zv = NULL;
248 ASSERT(MUTEX_HELD(&spa_namespace_lock));
251 LIST_FOREACH(gp, &zfs_zvol_class.geom, geom) {
252 pp = LIST_FIRST(&gp->provider);
258 if (strcmp(zv->zv_name, name) == 0)
263 return (gp != NULL ? zv : NULL);
266 /* extent mapping arg */
274 zvol_map_block(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, arc_buf_t *pbuf,
275 const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
277 struct maparg *ma = arg;
279 int bs = ma->ma_zv->zv_volblocksize;
281 if (bp == NULL || zb->zb_object != ZVOL_OBJ || zb->zb_level != 0)
284 VERIFY3U(ma->ma_blks, ==, zb->zb_blkid);
287 /* Abort immediately if we have encountered gang blocks */
292 * See if the block is at the end of the previous extent.
294 ze = list_tail(&ma->ma_zv->zv_extents);
296 DVA_GET_VDEV(BP_IDENTITY(bp)) == DVA_GET_VDEV(&ze->ze_dva) &&
297 DVA_GET_OFFSET(BP_IDENTITY(bp)) ==
298 DVA_GET_OFFSET(&ze->ze_dva) + ze->ze_nblks * bs) {
303 dprintf_bp(bp, "%s", "next blkptr:");
305 /* start a new extent */
306 ze = kmem_zalloc(sizeof (zvol_extent_t), KM_SLEEP);
307 ze->ze_dva = bp->blk_dva[0]; /* structure assignment */
309 list_insert_tail(&ma->ma_zv->zv_extents, ze);
314 zvol_free_extents(zvol_state_t *zv)
318 while (ze = list_head(&zv->zv_extents)) {
319 list_remove(&zv->zv_extents, ze);
320 kmem_free(ze, sizeof (zvol_extent_t));
325 zvol_get_lbas(zvol_state_t *zv)
327 objset_t *os = zv->zv_objset;
333 zvol_free_extents(zv);
335 /* commit any in-flight changes before traversing the dataset */
336 txg_wait_synced(dmu_objset_pool(os), 0);
337 err = traverse_dataset(dmu_objset_ds(os), 0,
338 TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, zvol_map_block, &ma);
339 if (err || ma.ma_blks != (zv->zv_volsize / zv->zv_volblocksize)) {
340 zvol_free_extents(zv);
341 return (err ? err : EIO);
349 zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
351 zfs_creat_t *zct = arg;
352 nvlist_t *nvprops = zct->zct_props;
354 uint64_t volblocksize, volsize;
356 VERIFY(nvlist_lookup_uint64(nvprops,
357 zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
358 if (nvlist_lookup_uint64(nvprops,
359 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
360 volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
363 * These properties must be removed from the list so the generic
364 * property setting step won't apply to them.
366 VERIFY(nvlist_remove_all(nvprops,
367 zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
368 (void) nvlist_remove_all(nvprops,
369 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
371 error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
375 error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
379 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
384 * Replay a TX_WRITE ZIL transaction that didn't get committed
385 * after a system failure
388 zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap)
390 objset_t *os = zv->zv_objset;
391 char *data = (char *)(lr + 1); /* data follows lr_write_t */
392 uint64_t offset, length;
397 byteswap_uint64_array(lr, sizeof (*lr));
399 offset = lr->lr_offset;
400 length = lr->lr_length;
402 /* If it's a dmu_sync() block, write the whole block */
403 if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
404 uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
405 if (length < blocksize) {
406 offset -= offset % blocksize;
411 tx = dmu_tx_create(os);
412 dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length);
413 error = dmu_tx_assign(tx, TXG_WAIT);
417 dmu_write(os, ZVOL_OBJ, offset, length, data, tx);
426 zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap)
432 * Callback vectors for replaying records.
433 * Only TX_WRITE is needed for zvol.
435 zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = {
436 zvol_replay_err, /* 0 no such transaction type */
437 zvol_replay_err, /* TX_CREATE */
438 zvol_replay_err, /* TX_MKDIR */
439 zvol_replay_err, /* TX_MKXATTR */
440 zvol_replay_err, /* TX_SYMLINK */
441 zvol_replay_err, /* TX_REMOVE */
442 zvol_replay_err, /* TX_RMDIR */
443 zvol_replay_err, /* TX_LINK */
444 zvol_replay_err, /* TX_RENAME */
445 zvol_replay_write, /* TX_WRITE */
446 zvol_replay_err, /* TX_TRUNCATE */
447 zvol_replay_err, /* TX_SETATTR */
448 zvol_replay_err, /* TX_ACL */
449 zvol_replay_err, /* TX_CREATE_ACL */
450 zvol_replay_err, /* TX_CREATE_ATTR */
451 zvol_replay_err, /* TX_CREATE_ACL_ATTR */
452 zvol_replay_err, /* TX_MKDIR_ACL */
453 zvol_replay_err, /* TX_MKDIR_ATTR */
454 zvol_replay_err, /* TX_MKDIR_ACL_ATTR */
455 zvol_replay_err, /* TX_WRITE2 */
460 zvol_name2minor(const char *name, minor_t *minor)
464 mutex_enter(&spa_namespace_lock);
465 zv = zvol_minor_lookup(name);
467 *minor = zv->zv_minor;
468 mutex_exit(&spa_namespace_lock);
469 return (zv ? 0 : -1);
474 * Create a minor node (plus a whole lot more) for the specified volume.
477 zvol_create_minor(const char *name)
479 zfs_soft_state_t *zs;
482 dmu_object_info_t doi;
485 ZFS_LOG(1, "Creating ZVOL %s...", name);
487 mutex_enter(&spa_namespace_lock);
489 if (zvol_minor_lookup(name) != NULL) {
490 mutex_exit(&spa_namespace_lock);
494 /* lie and say we're read-only */
495 error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, FTAG, &os);
498 mutex_exit(&spa_namespace_lock);
503 if ((minor = zfsdev_minor_alloc()) == 0) {
504 dmu_objset_disown(os, FTAG);
505 mutex_exit(&spa_namespace_lock);
509 if (ddi_soft_state_zalloc(zfsdev_state, minor) != DDI_SUCCESS) {
510 dmu_objset_disown(os, FTAG);
511 mutex_exit(&spa_namespace_lock);
514 (void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME,
517 (void) snprintf(chrbuf, sizeof (chrbuf), "%u,raw", minor);
519 if (ddi_create_minor_node(zfs_dip, chrbuf, S_IFCHR,
520 minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
521 ddi_soft_state_free(zfsdev_state, minor);
522 dmu_objset_disown(os, FTAG);
523 mutex_exit(&spa_namespace_lock);
527 (void) snprintf(blkbuf, sizeof (blkbuf), "%u", minor);
529 if (ddi_create_minor_node(zfs_dip, blkbuf, S_IFBLK,
530 minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
531 ddi_remove_minor_node(zfs_dip, chrbuf);
532 ddi_soft_state_free(zfsdev_state, minor);
533 dmu_objset_disown(os, FTAG);
534 mutex_exit(&spa_namespace_lock);
538 zs = ddi_get_soft_state(zfsdev_state, minor);
539 zs->zss_type = ZSST_ZVOL;
540 zv = zs->zss_data = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
545 zv = zvol_geom_create(name);
548 (void) strlcpy(zv->zv_name, name, MAXPATHLEN);
549 zv->zv_min_bs = DEV_BSHIFT;
551 if (dmu_objset_is_snapshot(os) || !spa_writeable(dmu_objset_spa(os)))
552 zv->zv_flags |= ZVOL_RDONLY;
553 mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL);
554 avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare,
555 sizeof (rl_t), offsetof(rl_t, r_node));
556 list_create(&zv->zv_extents, sizeof (zvol_extent_t),
557 offsetof(zvol_extent_t, ze_node));
558 /* get and cache the blocksize */
559 error = dmu_object_info(os, ZVOL_OBJ, &doi);
561 zv->zv_volblocksize = doi.doi_data_block_size;
563 if (spa_writeable(dmu_objset_spa(os))) {
564 if (zil_replay_disable)
565 zil_destroy(dmu_objset_zil(os), B_FALSE);
567 zil_replay(os, zv, zvol_replay_vector);
569 dmu_objset_disown(os, FTAG);
570 zv->zv_objset = NULL;
574 mutex_exit(&spa_namespace_lock);
581 ZFS_LOG(1, "ZVOL %s created.", name);
587 * Remove minor node for the specified volume.
590 zvol_remove_zv(zvol_state_t *zv)
593 minor_t minor = zv->zv_minor;
596 ASSERT(MUTEX_HELD(&spa_namespace_lock));
597 if (zv->zv_total_opens != 0)
600 ZFS_LOG(1, "ZVOL %s destroyed.", zv->zv_name);
603 (void) snprintf(nmbuf, sizeof (nmbuf), "%u,raw", minor);
604 ddi_remove_minor_node(zfs_dip, nmbuf);
607 avl_destroy(&zv->zv_znode.z_range_avl);
608 mutex_destroy(&zv->zv_znode.z_range_lock);
610 zvol_geom_destroy(zv);
617 zvol_remove_minor(const char *name)
622 mutex_enter(&spa_namespace_lock);
623 if ((zv = zvol_minor_lookup(name)) == NULL) {
624 mutex_exit(&spa_namespace_lock);
628 rc = zvol_remove_zv(zv);
630 mutex_exit(&spa_namespace_lock);
635 zvol_first_open(zvol_state_t *zv)
642 /* lie and say we're read-only */
643 error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, B_TRUE,
648 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
651 dmu_objset_disown(os, zvol_tag);
655 error = dmu_bonus_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dbuf);
657 dmu_objset_disown(os, zvol_tag);
660 zv->zv_volsize = volsize;
661 zv->zv_zilog = zil_open(os, zvol_get_data);
662 zvol_size_changed(zv);
664 VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &readonly,
666 if (readonly || dmu_objset_is_snapshot(os) ||
667 !spa_writeable(dmu_objset_spa(os)))
668 zv->zv_flags |= ZVOL_RDONLY;
670 zv->zv_flags &= ~ZVOL_RDONLY;
675 zvol_last_close(zvol_state_t *zv)
677 zil_close(zv->zv_zilog);
679 dmu_buf_rele(zv->zv_dbuf, zvol_tag);
681 dmu_objset_disown(zv->zv_objset, zvol_tag);
682 zv->zv_objset = NULL;
687 zvol_prealloc(zvol_state_t *zv)
689 objset_t *os = zv->zv_objset;
691 uint64_t refd, avail, usedobjs, availobjs;
692 uint64_t resid = zv->zv_volsize;
695 /* Check the space usage before attempting to allocate the space */
696 dmu_objset_space(os, &refd, &avail, &usedobjs, &availobjs);
697 if (avail < zv->zv_volsize)
700 /* Free old extents if they exist */
701 zvol_free_extents(zv);
705 uint64_t bytes = MIN(resid, SPA_MAXBLOCKSIZE);
707 tx = dmu_tx_create(os);
708 dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
709 error = dmu_tx_assign(tx, TXG_WAIT);
712 (void) dmu_free_long_range(os, ZVOL_OBJ, 0, off);
715 dmu_prealloc(os, ZVOL_OBJ, off, bytes, tx);
720 txg_wait_synced(dmu_objset_pool(os), 0);
727 zvol_update_volsize(objset_t *os, uint64_t volsize)
732 ASSERT(MUTEX_HELD(&spa_namespace_lock));
734 tx = dmu_tx_create(os);
735 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
736 error = dmu_tx_assign(tx, TXG_WAIT);
742 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1,
747 error = dmu_free_long_range(os,
748 ZVOL_OBJ, volsize, DMU_OBJECT_END);
753 zvol_remove_minors(const char *name)
755 struct g_geom *gp, *gptmp;
756 struct g_provider *pp;
760 namelen = strlen(name);
763 mutex_enter(&spa_namespace_lock);
766 LIST_FOREACH_SAFE(gp, &zfs_zvol_class.geom, geom, gptmp) {
767 pp = LIST_FIRST(&gp->provider);
773 if (strcmp(zv->zv_name, name) == 0 ||
774 (strncmp(zv->zv_name, name, namelen) == 0 &&
775 zv->zv_name[namelen] == '/')) {
776 (void) zvol_remove_zv(zv);
781 mutex_exit(&spa_namespace_lock);
786 zvol_set_volsize(const char *name, major_t maj, uint64_t volsize)
788 zvol_state_t *zv = NULL;
791 dmu_object_info_t doi;
792 uint64_t old_volsize = 0ULL;
795 mutex_enter(&spa_namespace_lock);
796 zv = zvol_minor_lookup(name);
797 if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) {
798 mutex_exit(&spa_namespace_lock);
802 if ((error = dmu_object_info(os, ZVOL_OBJ, &doi)) != 0 ||
803 (error = zvol_check_volsize(volsize,
804 doi.doi_data_block_size)) != 0)
807 VERIFY(dsl_prop_get_integer(name, "readonly", &readonly,
814 error = zvol_update_volsize(os, volsize);
816 * Reinitialize the dump area to the new size. If we
817 * failed to resize the dump area then restore it back to
820 if (zv && error == 0) {
822 if (zv->zv_flags & ZVOL_DUMPIFIED) {
823 old_volsize = zv->zv_volsize;
824 zv->zv_volsize = volsize;
825 if ((error = zvol_dumpify(zv)) != 0 ||
826 (error = dumpvp_resize()) != 0) {
827 (void) zvol_update_volsize(os, old_volsize);
828 zv->zv_volsize = old_volsize;
829 error = zvol_dumpify(zv);
832 #endif /* ZVOL_DUMP */
834 zv->zv_volsize = volsize;
835 zvol_size_changed(zv);
841 * Generate a LUN expansion event.
843 if (zv && error == 0) {
846 char *physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
848 (void) snprintf(physpath, MAXPATHLEN, "%s%u", ZVOL_PSEUDO_DEV,
851 VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0);
852 VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0);
854 (void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS,
855 ESC_DEV_DLE, attr, &eid, DDI_SLEEP);
858 kmem_free(physpath, MAXPATHLEN);
863 dmu_objset_rele(os, FTAG);
865 mutex_exit(&spa_namespace_lock);
872 zvol_open(struct g_provider *pp, int flag, int count)
877 mutex_enter(&spa_namespace_lock);
881 mutex_exit(&spa_namespace_lock);
885 if (zv->zv_total_opens == 0)
886 err = zvol_first_open(zv);
888 mutex_exit(&spa_namespace_lock);
891 if ((flag & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
895 if (zv->zv_flags & ZVOL_EXCL) {
901 if (zv->zv_total_opens != 0) {
905 zv->zv_flags |= ZVOL_EXCL;
909 zv->zv_total_opens += count;
910 mutex_exit(&spa_namespace_lock);
914 if (zv->zv_total_opens == 0)
916 mutex_exit(&spa_namespace_lock);
922 zvol_close(struct g_provider *pp, int flag, int count)
927 mutex_enter(&spa_namespace_lock);
931 mutex_exit(&spa_namespace_lock);
935 if (zv->zv_flags & ZVOL_EXCL) {
936 ASSERT(zv->zv_total_opens == 1);
937 zv->zv_flags &= ~ZVOL_EXCL;
941 * If the open count is zero, this is a spurious close.
942 * That indicates a bug in the kernel / DDI framework.
944 ASSERT(zv->zv_total_opens != 0);
947 * You may get multiple opens, but only one close.
949 zv->zv_total_opens -= count;
951 if (zv->zv_total_opens == 0)
954 mutex_exit(&spa_namespace_lock);
959 zvol_get_done(zgd_t *zgd, int error)
962 dmu_buf_rele(zgd->zgd_db, zgd);
964 zfs_range_unlock(zgd->zgd_rl);
966 if (error == 0 && zgd->zgd_bp)
967 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
969 kmem_free(zgd, sizeof (zgd_t));
973 * Get data to generate a TX_WRITE intent log record.
976 zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
978 zvol_state_t *zv = arg;
979 objset_t *os = zv->zv_objset;
980 uint64_t object = ZVOL_OBJ;
981 uint64_t offset = lr->lr_offset;
982 uint64_t size = lr->lr_length; /* length of user data */
983 blkptr_t *bp = &lr->lr_blkptr;
991 zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
992 zgd->zgd_zilog = zv->zv_zilog;
993 zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
996 * Write records come in two flavors: immediate and indirect.
997 * For small writes it's cheaper to store the data with the
998 * log record (immediate); for large writes it's cheaper to
999 * sync the data and get a pointer to it (indirect) so that
1000 * we don't have to write the data twice.
1002 if (buf != NULL) { /* immediate write */
1003 error = dmu_read(os, object, offset, size, buf,
1004 DMU_READ_NO_PREFETCH);
1006 size = zv->zv_volblocksize;
1007 offset = P2ALIGN(offset, size);
1008 error = dmu_buf_hold(os, object, offset, zgd, &db,
1009 DMU_READ_NO_PREFETCH);
1014 ASSERT(db->db_offset == offset);
1015 ASSERT(db->db_size == size);
1017 error = dmu_sync(zio, lr->lr_common.lrc_txg,
1018 zvol_get_done, zgd);
1025 zvol_get_done(zgd, error);
1031 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
1033 * We store data in the log buffers if it's small enough.
1034 * Otherwise we will later flush the data out via dmu_sync().
1036 ssize_t zvol_immediate_write_sz = 32768;
1039 zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, offset_t off, ssize_t resid,
1042 uint32_t blocksize = zv->zv_volblocksize;
1043 zilog_t *zilog = zv->zv_zilog;
1045 ssize_t immediate_write_sz;
1047 if (zil_replaying(zilog, tx))
1050 immediate_write_sz = (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
1051 ? 0 : zvol_immediate_write_sz;
1053 slogging = spa_has_slogs(zilog->zl_spa) &&
1054 (zilog->zl_logbias == ZFS_LOGBIAS_LATENCY);
1060 itx_wr_state_t write_state;
1063 * Unlike zfs_log_write() we can be called with
1064 * upto DMU_MAX_ACCESS/2 (5MB) writes.
1066 if (blocksize > immediate_write_sz && !slogging &&
1067 resid >= blocksize && off % blocksize == 0) {
1068 write_state = WR_INDIRECT; /* uses dmu_sync */
1071 write_state = WR_COPIED;
1072 len = MIN(ZIL_MAX_LOG_DATA, resid);
1074 write_state = WR_NEED_COPY;
1075 len = MIN(ZIL_MAX_LOG_DATA, resid);
1078 itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
1079 (write_state == WR_COPIED ? len : 0));
1080 lr = (lr_write_t *)&itx->itx_lr;
1081 if (write_state == WR_COPIED && dmu_read(zv->zv_objset,
1082 ZVOL_OBJ, off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) {
1083 zil_itx_destroy(itx);
1084 itx = zil_itx_create(TX_WRITE, sizeof (*lr));
1085 lr = (lr_write_t *)&itx->itx_lr;
1086 write_state = WR_NEED_COPY;
1089 itx->itx_wr_state = write_state;
1090 if (write_state == WR_NEED_COPY)
1091 itx->itx_sod += len;
1092 lr->lr_foid = ZVOL_OBJ;
1093 lr->lr_offset = off;
1094 lr->lr_length = len;
1096 BP_ZERO(&lr->lr_blkptr);
1098 itx->itx_private = zv;
1099 itx->itx_sync = sync;
1101 zil_itx_assign(zilog, itx, tx);
1110 zvol_dumpio_vdev(vdev_t *vd, void *addr, uint64_t offset, uint64_t size,
1111 boolean_t doread, boolean_t isdump)
1117 for (c = 0; c < vd->vdev_children; c++) {
1118 ASSERT(vd->vdev_ops == &vdev_mirror_ops ||
1119 vd->vdev_ops == &vdev_replacing_ops ||
1120 vd->vdev_ops == &vdev_spare_ops);
1121 int err = zvol_dumpio_vdev(vd->vdev_child[c],
1122 addr, offset, size, doread, isdump);
1125 } else if (doread) {
1130 if (!vd->vdev_ops->vdev_op_leaf)
1131 return (numerrors < vd->vdev_children ? 0 : EIO);
1133 if (doread && !vdev_readable(vd))
1135 else if (!doread && !vdev_writeable(vd))
1139 ASSERT3P(dvd, !=, NULL);
1140 offset += VDEV_LABEL_START_SIZE;
1142 if (ddi_in_panic() || isdump) {
1146 return (ldi_dump(dvd->vd_lh, addr, lbtodb(offset),
1149 return (vdev_disk_physio(dvd->vd_lh, addr, size, offset,
1150 doread ? B_READ : B_WRITE));
1155 zvol_dumpio(zvol_state_t *zv, void *addr, uint64_t offset, uint64_t size,
1156 boolean_t doread, boolean_t isdump)
1161 spa_t *spa = dmu_objset_spa(zv->zv_objset);
1163 /* Must be sector aligned, and not stradle a block boundary. */
1164 if (P2PHASE(offset, DEV_BSIZE) || P2PHASE(size, DEV_BSIZE) ||
1165 P2BOUNDARY(offset, size, zv->zv_volblocksize)) {
1168 ASSERT(size <= zv->zv_volblocksize);
1170 /* Locate the extent this belongs to */
1171 ze = list_head(&zv->zv_extents);
1172 while (offset >= ze->ze_nblks * zv->zv_volblocksize) {
1173 offset -= ze->ze_nblks * zv->zv_volblocksize;
1174 ze = list_next(&zv->zv_extents, ze);
1177 if (!ddi_in_panic())
1178 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
1180 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&ze->ze_dva));
1181 offset += DVA_GET_OFFSET(&ze->ze_dva);
1182 error = zvol_dumpio_vdev(vd, addr, offset, size, doread, isdump);
1184 if (!ddi_in_panic())
1185 spa_config_exit(spa, SCL_STATE, FTAG);
1192 zvol_strategy(struct bio *bp)
1194 zvol_state_t *zv = bp->bio_to->private;
1195 uint64_t off, volsize;
1201 boolean_t doread = (bp->bio_cmd == BIO_READ);
1205 g_io_deliver(bp, ENXIO);
1209 if (bp->bio_cmd != BIO_READ && (zv->zv_flags & ZVOL_RDONLY)) {
1210 g_io_deliver(bp, EROFS);
1214 off = bp->bio_offset;
1215 volsize = zv->zv_volsize;
1220 addr = bp->bio_data;
1221 resid = bp->bio_length;
1223 if (resid > 0 && (off < 0 || off >= volsize)) {
1224 g_io_deliver(bp, EIO);
1228 sync = !doread && zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS;
1231 * There must be no buffer changes when doing a dmu_sync() because
1232 * we can't change the data whilst calculating the checksum.
1234 rl = zfs_range_lock(&zv->zv_znode, off, resid,
1235 doread ? RL_READER : RL_WRITER);
1237 while (resid != 0 && off < volsize) {
1238 size_t size = MIN(resid, zvol_maxphys);
1240 error = dmu_read(os, ZVOL_OBJ, off, size, addr,
1243 dmu_tx_t *tx = dmu_tx_create(os);
1244 dmu_tx_hold_write(tx, ZVOL_OBJ, off, size);
1245 error = dmu_tx_assign(tx, TXG_WAIT);
1249 dmu_write(os, ZVOL_OBJ, off, size, addr, tx);
1250 zvol_log_write(zv, tx, off, size, sync);
1255 /* convert checksum errors into IO errors */
1256 if (error == ECKSUM)
1264 zfs_range_unlock(rl);
1266 bp->bio_completed = bp->bio_length - resid;
1267 if (bp->bio_completed < bp->bio_length)
1268 bp->bio_error = (off > volsize ? EINVAL : error);
1271 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1272 g_io_deliver(bp, 0);
1279 * Set the buffer count to the zvol maximum transfer.
1280 * Using our own routine instead of the default minphys()
1281 * means that for larger writes we write bigger buffers on X86
1282 * (128K instead of 56K) and flush the disk write cache less often
1283 * (every zvol_maxphys - currently 1MB) instead of minphys (currently
1284 * 56K on X86 and 128K on sparc).
1287 zvol_minphys(struct buf *bp)
1289 if (bp->b_bcount > zvol_maxphys)
1290 bp->b_bcount = zvol_maxphys;
1294 zvol_dump(dev_t dev, caddr_t addr, daddr_t blkno, int nblocks)
1296 minor_t minor = getminor(dev);
1303 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1307 boff = ldbtob(blkno);
1308 resid = ldbtob(nblocks);
1310 VERIFY3U(boff + resid, <=, zv->zv_volsize);
1313 size = MIN(resid, P2END(boff, zv->zv_volblocksize) - boff);
1314 error = zvol_dumpio(zv, addr, boff, size, B_FALSE, B_TRUE);
1327 zvol_read(dev_t dev, uio_t *uio, cred_t *cr)
1329 minor_t minor = getminor(dev);
1335 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1339 volsize = zv->zv_volsize;
1340 if (uio->uio_resid > 0 &&
1341 (uio->uio_loffset < 0 || uio->uio_loffset >= volsize))
1344 if (zv->zv_flags & ZVOL_DUMPIFIED) {
1345 error = physio(zvol_strategy, NULL, dev, B_READ,
1350 rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1352 while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1353 uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1355 /* don't read past the end */
1356 if (bytes > volsize - uio->uio_loffset)
1357 bytes = volsize - uio->uio_loffset;
1359 error = dmu_read_uio(zv->zv_objset, ZVOL_OBJ, uio, bytes);
1361 /* convert checksum errors into IO errors */
1362 if (error == ECKSUM)
1367 zfs_range_unlock(rl);
1373 zvol_write(dev_t dev, uio_t *uio, cred_t *cr)
1375 minor_t minor = getminor(dev);
1382 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1386 volsize = zv->zv_volsize;
1387 if (uio->uio_resid > 0 &&
1388 (uio->uio_loffset < 0 || uio->uio_loffset >= volsize))
1391 if (zv->zv_flags & ZVOL_DUMPIFIED) {
1392 error = physio(zvol_strategy, NULL, dev, B_WRITE,
1397 sync = !(zv->zv_flags & ZVOL_WCE) ||
1398 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS);
1400 rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1402 while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1403 uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1404 uint64_t off = uio->uio_loffset;
1405 dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
1407 if (bytes > volsize - off) /* don't write past the end */
1408 bytes = volsize - off;
1410 dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
1411 error = dmu_tx_assign(tx, TXG_WAIT);
1416 error = dmu_write_uio_dbuf(zv->zv_dbuf, uio, bytes, tx);
1418 zvol_log_write(zv, tx, off, bytes, sync);
1424 zfs_range_unlock(rl);
1426 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1431 zvol_getefi(void *arg, int flag, uint64_t vs, uint8_t bs)
1433 struct uuid uuid = EFI_RESERVED;
1434 efi_gpe_t gpe = { 0 };
1440 if (ddi_copyin(arg, &efi, sizeof (dk_efi_t), flag))
1442 ptr = (char *)(uintptr_t)efi.dki_data_64;
1443 length = efi.dki_length;
1445 * Some clients may attempt to request a PMBR for the
1446 * zvol. Currently this interface will return EINVAL to
1447 * such requests. These requests could be supported by
1448 * adding a check for lba == 0 and consing up an appropriate
1451 if (efi.dki_lba < 1 || efi.dki_lba > 2 || length <= 0)
1454 gpe.efi_gpe_StartingLBA = LE_64(34ULL);
1455 gpe.efi_gpe_EndingLBA = LE_64((vs >> bs) - 1);
1456 UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid);
1458 if (efi.dki_lba == 1) {
1459 efi_gpt_t gpt = { 0 };
1461 gpt.efi_gpt_Signature = LE_64(EFI_SIGNATURE);
1462 gpt.efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT);
1463 gpt.efi_gpt_HeaderSize = LE_32(sizeof (gpt));
1464 gpt.efi_gpt_MyLBA = LE_64(1ULL);
1465 gpt.efi_gpt_FirstUsableLBA = LE_64(34ULL);
1466 gpt.efi_gpt_LastUsableLBA = LE_64((vs >> bs) - 1);
1467 gpt.efi_gpt_PartitionEntryLBA = LE_64(2ULL);
1468 gpt.efi_gpt_NumberOfPartitionEntries = LE_32(1);
1469 gpt.efi_gpt_SizeOfPartitionEntry =
1470 LE_32(sizeof (efi_gpe_t));
1471 CRC32(crc, &gpe, sizeof (gpe), -1U, crc32_table);
1472 gpt.efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc);
1473 CRC32(crc, &gpt, sizeof (gpt), -1U, crc32_table);
1474 gpt.efi_gpt_HeaderCRC32 = LE_32(~crc);
1475 if (ddi_copyout(&gpt, ptr, MIN(sizeof (gpt), length),
1478 ptr += sizeof (gpt);
1479 length -= sizeof (gpt);
1481 if (length > 0 && ddi_copyout(&gpe, ptr, MIN(sizeof (gpe),
1488 * BEGIN entry points to allow external callers access to the volume.
1491 * Return the volume parameters needed for access from an external caller.
1492 * These values are invariant as long as the volume is held open.
1495 zvol_get_volume_params(minor_t minor, uint64_t *blksize,
1496 uint64_t *max_xfer_len, void **minor_hdl, void **objset_hdl, void **zil_hdl,
1497 void **rl_hdl, void **bonus_hdl)
1501 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1504 if (zv->zv_flags & ZVOL_DUMPIFIED)
1507 ASSERT(blksize && max_xfer_len && minor_hdl &&
1508 objset_hdl && zil_hdl && rl_hdl && bonus_hdl);
1510 *blksize = zv->zv_volblocksize;
1511 *max_xfer_len = (uint64_t)zvol_maxphys;
1513 *objset_hdl = zv->zv_objset;
1514 *zil_hdl = zv->zv_zilog;
1515 *rl_hdl = &zv->zv_znode;
1516 *bonus_hdl = zv->zv_dbuf;
1521 * Return the current volume size to an external caller.
1522 * The size can change while the volume is open.
1525 zvol_get_volume_size(void *minor_hdl)
1527 zvol_state_t *zv = minor_hdl;
1529 return (zv->zv_volsize);
1533 * Return the current WCE setting to an external caller.
1534 * The WCE setting can change while the volume is open.
1537 zvol_get_volume_wce(void *minor_hdl)
1539 zvol_state_t *zv = minor_hdl;
1541 return ((zv->zv_flags & ZVOL_WCE) ? 1 : 0);
1545 * Entry point for external callers to zvol_log_write
1548 zvol_log_write_minor(void *minor_hdl, dmu_tx_t *tx, offset_t off, ssize_t resid,
1551 zvol_state_t *zv = minor_hdl;
1553 zvol_log_write(zv, tx, off, resid, sync);
1556 * END entry points to allow external callers access to the volume.
1560 * Dirtbag ioctls to support mkfs(1M) for UFS filesystems. See dkio(7I).
1564 zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
1567 struct dk_cinfo dki;
1568 struct dk_minfo dkm;
1569 struct dk_callback *dkc;
1573 mutex_enter(&spa_namespace_lock);
1575 zv = zfsdev_get_soft_state(getminor(dev), ZSST_ZVOL);
1578 mutex_exit(&spa_namespace_lock);
1581 ASSERT(zv->zv_total_opens > 0);
1586 bzero(&dki, sizeof (dki));
1587 (void) strcpy(dki.dki_cname, "zvol");
1588 (void) strcpy(dki.dki_dname, "zvol");
1589 dki.dki_ctype = DKC_UNKNOWN;
1590 dki.dki_unit = getminor(dev);
1591 dki.dki_maxtransfer = 1 << (SPA_MAXBLOCKSHIFT - zv->zv_min_bs);
1592 mutex_exit(&spa_namespace_lock);
1593 if (ddi_copyout(&dki, (void *)arg, sizeof (dki), flag))
1597 case DKIOCGMEDIAINFO:
1598 bzero(&dkm, sizeof (dkm));
1599 dkm.dki_lbsize = 1U << zv->zv_min_bs;
1600 dkm.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
1601 dkm.dki_media_type = DK_UNKNOWN;
1602 mutex_exit(&spa_namespace_lock);
1603 if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag))
1609 uint64_t vs = zv->zv_volsize;
1610 uint8_t bs = zv->zv_min_bs;
1612 mutex_exit(&spa_namespace_lock);
1613 error = zvol_getefi((void *)arg, flag, vs, bs);
1617 case DKIOCFLUSHWRITECACHE:
1618 dkc = (struct dk_callback *)arg;
1619 mutex_exit(&spa_namespace_lock);
1620 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1621 if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback) {
1622 (*dkc->dkc_callback)(dkc->dkc_cookie, error);
1629 int wce = (zv->zv_flags & ZVOL_WCE) ? 1 : 0;
1630 if (ddi_copyout(&wce, (void *)arg, sizeof (int),
1638 if (ddi_copyin((void *)arg, &wce, sizeof (int),
1644 zv->zv_flags |= ZVOL_WCE;
1645 mutex_exit(&spa_namespace_lock);
1647 zv->zv_flags &= ~ZVOL_WCE;
1648 mutex_exit(&spa_namespace_lock);
1649 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1657 * commands using these (like prtvtoc) expect ENOTSUP
1658 * since we're emulating an EFI label
1664 rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
1666 error = zvol_dumpify(zv);
1667 zfs_range_unlock(rl);
1671 if (!(zv->zv_flags & ZVOL_DUMPIFIED))
1673 rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
1675 error = zvol_dump_fini(zv);
1676 zfs_range_unlock(rl);
1684 mutex_exit(&spa_namespace_lock);
1692 return (zvol_minors != 0);
1698 VERIFY(ddi_soft_state_init(&zfsdev_state, sizeof (zfs_soft_state_t),
1700 ZFS_LOG(1, "ZVOL Initialized.");
1706 ddi_soft_state_fini(&zfsdev_state);
1707 ZFS_LOG(1, "ZVOL Deinitialized.");
1712 zvol_dump_init(zvol_state_t *zv, boolean_t resize)
1716 objset_t *os = zv->zv_objset;
1717 nvlist_t *nv = NULL;
1718 uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset));
1720 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1721 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 0,
1723 /* wait for dmu_free_long_range to actually free the blocks */
1724 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
1726 tx = dmu_tx_create(os);
1727 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
1728 dmu_tx_hold_bonus(tx, ZVOL_OBJ);
1729 error = dmu_tx_assign(tx, TXG_WAIT);
1736 * If we are resizing the dump device then we only need to
1737 * update the refreservation to match the newly updated
1738 * zvolsize. Otherwise, we save off the original state of the
1739 * zvol so that we can restore them if the zvol is ever undumpified.
1742 error = zap_update(os, ZVOL_ZAP_OBJ,
1743 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
1744 &zv->zv_volsize, tx);
1746 uint64_t checksum, compress, refresrv, vbs, dedup;
1748 error = dsl_prop_get_integer(zv->zv_name,
1749 zfs_prop_to_name(ZFS_PROP_COMPRESSION), &compress, NULL);
1750 error = error ? error : dsl_prop_get_integer(zv->zv_name,
1751 zfs_prop_to_name(ZFS_PROP_CHECKSUM), &checksum, NULL);
1752 error = error ? error : dsl_prop_get_integer(zv->zv_name,
1753 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), &refresrv, NULL);
1754 error = error ? error : dsl_prop_get_integer(zv->zv_name,
1755 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &vbs, NULL);
1756 if (version >= SPA_VERSION_DEDUP) {
1757 error = error ? error :
1758 dsl_prop_get_integer(zv->zv_name,
1759 zfs_prop_to_name(ZFS_PROP_DEDUP), &dedup, NULL);
1762 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1763 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1,
1765 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1766 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum, tx);
1767 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1768 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
1770 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1771 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1,
1773 error = error ? error : dmu_object_set_blocksize(
1774 os, ZVOL_OBJ, SPA_MAXBLOCKSIZE, 0, tx);
1775 if (version >= SPA_VERSION_DEDUP) {
1776 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1777 zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1,
1781 zv->zv_volblocksize = SPA_MAXBLOCKSIZE;
1786 * We only need update the zvol's property if we are initializing
1787 * the dump area for the first time.
1790 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1791 VERIFY(nvlist_add_uint64(nv,
1792 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 0) == 0);
1793 VERIFY(nvlist_add_uint64(nv,
1794 zfs_prop_to_name(ZFS_PROP_COMPRESSION),
1795 ZIO_COMPRESS_OFF) == 0);
1796 VERIFY(nvlist_add_uint64(nv,
1797 zfs_prop_to_name(ZFS_PROP_CHECKSUM),
1798 ZIO_CHECKSUM_OFF) == 0);
1799 if (version >= SPA_VERSION_DEDUP) {
1800 VERIFY(nvlist_add_uint64(nv,
1801 zfs_prop_to_name(ZFS_PROP_DEDUP),
1802 ZIO_CHECKSUM_OFF) == 0);
1805 error = zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
1813 /* Allocate the space for the dump */
1814 error = zvol_prealloc(zv);
1819 zvol_dumpify(zvol_state_t *zv)
1822 uint64_t dumpsize = 0;
1824 objset_t *os = zv->zv_objset;
1826 if (zv->zv_flags & ZVOL_RDONLY)
1829 if (zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE,
1830 8, 1, &dumpsize) != 0 || dumpsize != zv->zv_volsize) {
1831 boolean_t resize = (dumpsize > 0) ? B_TRUE : B_FALSE;
1833 if ((error = zvol_dump_init(zv, resize)) != 0) {
1834 (void) zvol_dump_fini(zv);
1840 * Build up our lba mapping.
1842 error = zvol_get_lbas(zv);
1844 (void) zvol_dump_fini(zv);
1848 tx = dmu_tx_create(os);
1849 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
1850 error = dmu_tx_assign(tx, TXG_WAIT);
1853 (void) zvol_dump_fini(zv);
1857 zv->zv_flags |= ZVOL_DUMPIFIED;
1858 error = zap_update(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 8, 1,
1859 &zv->zv_volsize, tx);
1863 (void) zvol_dump_fini(zv);
1867 txg_wait_synced(dmu_objset_pool(os), 0);
1872 zvol_dump_fini(zvol_state_t *zv)
1875 objset_t *os = zv->zv_objset;
1878 uint64_t checksum, compress, refresrv, vbs, dedup;
1879 uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset));
1882 * Attempt to restore the zvol back to its pre-dumpified state.
1883 * This is a best-effort attempt as it's possible that not all
1884 * of these properties were initialized during the dumpify process
1885 * (i.e. error during zvol_dump_init).
1888 tx = dmu_tx_create(os);
1889 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
1890 error = dmu_tx_assign(tx, TXG_WAIT);
1895 (void) zap_remove(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, tx);
1898 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
1899 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum);
1900 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
1901 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, &compress);
1902 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
1903 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, &refresrv);
1904 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
1905 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, &vbs);
1907 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1908 (void) nvlist_add_uint64(nv,
1909 zfs_prop_to_name(ZFS_PROP_CHECKSUM), checksum);
1910 (void) nvlist_add_uint64(nv,
1911 zfs_prop_to_name(ZFS_PROP_COMPRESSION), compress);
1912 (void) nvlist_add_uint64(nv,
1913 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), refresrv);
1914 if (version >= SPA_VERSION_DEDUP &&
1915 zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
1916 zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, &dedup) == 0) {
1917 (void) nvlist_add_uint64(nv,
1918 zfs_prop_to_name(ZFS_PROP_DEDUP), dedup);
1920 (void) zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
1924 zvol_free_extents(zv);
1925 zv->zv_flags &= ~ZVOL_DUMPIFIED;
1926 (void) dmu_free_long_range(os, ZVOL_OBJ, 0, DMU_OBJECT_END);
1927 /* wait for dmu_free_long_range to actually free the blocks */
1928 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
1929 tx = dmu_tx_create(os);
1930 dmu_tx_hold_bonus(tx, ZVOL_OBJ);
1931 error = dmu_tx_assign(tx, TXG_WAIT);
1936 if (dmu_object_set_blocksize(os, ZVOL_OBJ, vbs, 0, tx) == 0)
1937 zv->zv_volblocksize = vbs;
1944 static zvol_state_t *
1945 zvol_geom_create(const char *name)
1947 struct g_provider *pp;
1951 gp = g_new_geomf(&zfs_zvol_class, "zfs::zvol::%s", name);
1952 gp->start = zvol_geom_start;
1953 gp->access = zvol_geom_access;
1954 pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, name);
1955 pp->sectorsize = DEV_BSIZE;
1957 zv = kmem_zalloc(sizeof(*zv), KM_SLEEP);
1958 zv->zv_provider = pp;
1960 bioq_init(&zv->zv_queue);
1961 mtx_init(&zv->zv_queue_mtx, "zvol", NULL, MTX_DEF);
1969 zvol_geom_run(zvol_state_t *zv)
1971 struct g_provider *pp;
1973 pp = zv->zv_provider;
1974 g_error_provider(pp, 0);
1976 kproc_kthread_add(zvol_geom_worker, zv, &zfsproc, NULL, 0, 0,
1977 "zfskern", "zvol %s", pp->name + sizeof(ZVOL_DRIVER));
1981 zvol_geom_destroy(zvol_state_t *zv)
1983 struct g_provider *pp;
1985 g_topology_assert();
1987 mtx_lock(&zv->zv_queue_mtx);
1989 wakeup_one(&zv->zv_queue);
1990 while (zv->zv_state != 2)
1991 msleep(&zv->zv_state, &zv->zv_queue_mtx, 0, "zvol:w", 0);
1992 mtx_destroy(&zv->zv_queue_mtx);
1994 pp = zv->zv_provider;
1995 zv->zv_provider = NULL;
1997 g_wither_geom(pp->geom, ENXIO);
1999 kmem_free(zv, sizeof(*zv));
2003 zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace)
2005 int count, error, flags;
2007 g_topology_assert();
2010 * To make it easier we expect either open or close, but not both
2013 KASSERT((acr >= 0 && acw >= 0 && ace >= 0) ||
2014 (acr <= 0 && acw <= 0 && ace <= 0),
2015 ("Unsupported access request to %s (acr=%d, acw=%d, ace=%d).",
2016 pp->name, acr, acw, ace));
2018 if (pp->private == NULL) {
2019 if (acr <= 0 && acw <= 0 && ace <= 0)
2025 * We don't pass FEXCL flag to zvol_open()/zvol_close() if ace != 0,
2026 * because GEOM already handles that and handles it a bit differently.
2027 * GEOM allows for multiple read/exclusive consumers and ZFS allows
2028 * only one exclusive consumer, no matter if it is reader or writer.
2029 * I like better the way GEOM works so I'll leave it for GEOM to
2030 * decide what to do.
2033 count = acr + acw + ace;
2038 if (acr != 0 || ace != 0)
2043 g_topology_unlock();
2045 error = zvol_open(pp, flags, count);
2047 error = zvol_close(pp, flags, -count);
2053 zvol_geom_start(struct bio *bp)
2058 switch (bp->bio_cmd) {
2062 zv = bp->bio_to->private;
2064 mtx_lock(&zv->zv_queue_mtx);
2065 first = (bioq_first(&zv->zv_queue) == NULL);
2066 bioq_insert_tail(&zv->zv_queue, bp);
2067 mtx_unlock(&zv->zv_queue_mtx);
2069 wakeup_one(&zv->zv_queue);
2074 g_io_deliver(bp, EOPNOTSUPP);
2080 zvol_geom_worker(void *arg)
2085 thread_lock(curthread);
2086 sched_prio(curthread, PRIBIO);
2087 thread_unlock(curthread);
2091 mtx_lock(&zv->zv_queue_mtx);
2092 bp = bioq_takefirst(&zv->zv_queue);
2094 if (zv->zv_state == 1) {
2096 wakeup(&zv->zv_state);
2097 mtx_unlock(&zv->zv_queue_mtx);
2100 msleep(&zv->zv_queue, &zv->zv_queue_mtx, PRIBIO | PDROP,
2104 mtx_unlock(&zv->zv_queue_mtx);
2105 switch (bp->bio_cmd) {
2107 zil_commit(zv->zv_zilog, ZVOL_OBJ);
2108 g_io_deliver(bp, 0);
2118 extern boolean_t dataset_name_hidden(const char *name);
2121 zvol_create_snapshots(objset_t *os, const char *name)
2123 uint64_t cookie, obj;
2128 sname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2130 (void) dmu_objset_find(name, dmu_objset_prefetch, NULL,
2134 len = snprintf(sname, MAXPATHLEN, "%s@", name);
2135 if (len >= MAXPATHLEN) {
2136 dmu_objset_rele(os, FTAG);
2137 error = ENAMETOOLONG;
2141 error = dmu_snapshot_list_next(os, MAXPATHLEN - len,
2142 sname + len, &obj, &cookie, NULL);
2144 if (error == ENOENT)
2149 if ((error = zvol_create_minor(sname)) != 0) {
2150 printf("ZFS WARNING: Unable to create ZVOL %s (error=%d).\n",
2156 kmem_free(sname, MAXPATHLEN);
2161 zvol_create_minors(const char *name)
2168 if (dataset_name_hidden(name))
2171 if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) {
2172 printf("ZFS WARNING: Unable to put hold on %s (error=%d).\n",
2176 if (dmu_objset_type(os) == DMU_OST_ZVOL) {
2177 if ((error = zvol_create_minor(name)) == 0)
2178 error = zvol_create_snapshots(os, name);
2180 printf("ZFS WARNING: Unable to create ZVOL %s (error=%d).\n",
2183 dmu_objset_rele(os, FTAG);
2186 if (dmu_objset_type(os) != DMU_OST_ZFS) {
2187 dmu_objset_rele(os, FTAG);
2191 osname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2192 if (snprintf(osname, MAXPATHLEN, "%s/", name) >= MAXPATHLEN) {
2193 dmu_objset_rele(os, FTAG);
2194 kmem_free(osname, MAXPATHLEN);
2197 p = osname + strlen(osname);
2198 len = MAXPATHLEN - (p - osname);
2200 /* Prefetch the datasets. */
2202 while (dmu_dir_list_next(os, len, p, NULL, &cookie) == 0) {
2203 if (!dataset_name_hidden(osname))
2204 (void) dmu_objset_prefetch(osname, NULL);
2208 while (dmu_dir_list_next(os, MAXPATHLEN - (p - osname), p, NULL,
2210 dmu_objset_rele(os, FTAG);
2211 (void)zvol_create_minors(osname);
2212 if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) {
2213 printf("ZFS WARNING: Unable to put hold on %s (error=%d).\n",
2219 dmu_objset_rele(os, FTAG);
2220 kmem_free(osname, MAXPATHLEN);
2225 zvol_rename_minor(struct g_geom *gp, const char *newname)
2227 struct g_provider *pp;
2230 ASSERT(MUTEX_HELD(&spa_namespace_lock));
2231 g_topology_assert();
2233 pp = LIST_FIRST(&gp->provider);
2238 zv->zv_provider = NULL;
2239 g_wither_provider(pp, ENXIO);
2241 pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, newname);
2242 pp->sectorsize = DEV_BSIZE;
2243 pp->mediasize = zv->zv_volsize;
2245 zv->zv_provider = pp;
2246 strlcpy(zv->zv_name, newname, sizeof(zv->zv_name));
2247 g_error_provider(pp, 0);
2251 zvol_rename_minors(const char *oldname, const char *newname)
2253 char name[MAXPATHLEN];
2254 struct g_provider *pp;
2256 size_t oldnamelen, newnamelen;
2260 oldnamelen = strlen(oldname);
2261 newnamelen = strlen(newname);
2264 mutex_enter(&spa_namespace_lock);
2267 LIST_FOREACH(gp, &zfs_zvol_class.geom, geom) {
2268 pp = LIST_FIRST(&gp->provider);
2274 if (strcmp(zv->zv_name, oldname) == 0) {
2275 zvol_rename_minor(gp, newname);
2276 } else if (strncmp(zv->zv_name, oldname, oldnamelen) == 0 &&
2277 (zv->zv_name[oldnamelen] == '/' ||
2278 zv->zv_name[oldnamelen] == '@')) {
2279 snprintf(name, sizeof(name), "%s%c%s", newname,
2280 zv->zv_name[oldnamelen],
2281 zv->zv_name + oldnamelen + 1);
2282 zvol_rename_minor(gp, name);
2286 g_topology_unlock();
2287 mutex_exit(&spa_namespace_lock);