4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2006-2010 Pawel Jakub Dawidek <pjd@FreeBSD.org>
25 * All rights reserved.
26 * Copyright (c) 2013 by Delphix. All rights reserved.
27 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
30 /* Portions Copyright 2010 Robert Milkowski */
31 /* Portions Copyright 2011 Martin Matuska <mm@FreeBSD.org> */
34 * ZFS volume emulation driver.
36 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
37 * Volumes are accessed through the symbolic links named:
39 * /dev/zvol/dsk/<pool_name>/<dataset_name>
40 * /dev/zvol/rdsk/<pool_name>/<dataset_name>
42 * These links are created by the /dev filesystem (sdev_zvolops.c).
43 * Volumes are persistent through reboot. No user command needs to be
44 * run before opening and using a device.
47 * On FreeBSD ZVOLs are simply GEOM providers like any other storage device
51 #include <sys/types.h>
52 #include <sys/param.h>
53 #include <sys/kernel.h>
54 #include <sys/errno.h>
60 #include <sys/cmn_err.h>
64 #include <sys/spa_impl.h>
66 #include <sys/dmu_traverse.h>
67 #include <sys/dnode.h>
68 #include <sys/dsl_dataset.h>
69 #include <sys/dsl_prop.h>
71 #include <sys/byteorder.h>
72 #include <sys/sunddi.h>
73 #include <sys/dirent.h>
74 #include <sys/policy.h>
75 #include <sys/fs/zfs.h>
76 #include <sys/zfs_ioctl.h>
78 #include <sys/refcount.h>
79 #include <sys/zfs_znode.h>
80 #include <sys/zfs_rlock.h>
81 #include <sys/vdev_impl.h>
82 #include <sys/vdev_raidz.h>
84 #include <sys/zil_impl.h>
86 #include <sys/dmu_tx.h>
87 #include <sys/zfeature.h>
88 #include <sys/zio_checksum.h>
90 #include <geom/geom.h>
92 #include "zfs_namecheck.h"
94 struct g_class zfs_zvol_class = {
99 DECLARE_GEOM_CLASS(zfs_zvol_class, zfs_zvol);
102 static char *zvol_tag = "zvol_tag";
104 #define ZVOL_DUMPSIZE "dumpsize"
107 * The spa_namespace_lock protects the zfsdev_state structure from being
108 * modified while it's being used, e.g. an open that comes in before a
109 * create finishes. It also protects temporary opens of the dataset so that,
110 * e.g., an open doesn't get a spurious EBUSY.
112 static uint32_t zvol_minors;
114 typedef struct zvol_extent {
116 dva_t ze_dva; /* dva associated with this extent */
117 uint64_t ze_nblks; /* number of blocks in extent */
121 * The in-core state of each volume.
123 typedef struct zvol_state {
124 char zv_name[MAXPATHLEN]; /* pool/dd name */
125 uint64_t zv_volsize; /* amount of space we advertise */
126 uint64_t zv_volblocksize; /* volume block size */
127 struct g_provider *zv_provider; /* GEOM provider */
128 uint8_t zv_min_bs; /* minimum addressable block shift */
129 uint8_t zv_flags; /* readonly, dumpified, etc. */
130 objset_t *zv_objset; /* objset handle */
131 uint32_t zv_total_opens; /* total open count */
132 zilog_t *zv_zilog; /* ZIL handle */
133 list_t zv_extents; /* List of extents for dump */
134 znode_t zv_znode; /* for range locking */
135 dmu_buf_t *zv_dbuf; /* bonus handle */
137 struct bio_queue_head zv_queue;
138 struct mtx zv_queue_mtx; /* zv_queue mutex */
142 * zvol specific flags
144 #define ZVOL_RDONLY 0x1
145 #define ZVOL_DUMPIFIED 0x2
146 #define ZVOL_EXCL 0x4
150 * zvol maximum transfer in one DMU tx.
152 int zvol_maxphys = DMU_MAX_ACCESS/2;
154 extern int zfs_set_prop_nvlist(const char *, zprop_source_t,
155 nvlist_t *, nvlist_t *);
156 static int zvol_remove_zv(zvol_state_t *);
157 static int zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio);
158 static int zvol_dumpify(zvol_state_t *zv);
159 static int zvol_dump_fini(zvol_state_t *zv);
160 static int zvol_dump_init(zvol_state_t *zv, boolean_t resize);
162 static zvol_state_t *zvol_geom_create(const char *name);
163 static void zvol_geom_run(zvol_state_t *zv);
164 static void zvol_geom_destroy(zvol_state_t *zv);
165 static int zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace);
166 static void zvol_geom_start(struct bio *bp);
167 static void zvol_geom_worker(void *arg);
170 zvol_size_changed(zvol_state_t *zv)
173 dev_t dev = makedevice(maj, min);
175 VERIFY(ddi_prop_update_int64(dev, zfs_dip,
176 "Size", volsize) == DDI_SUCCESS);
177 VERIFY(ddi_prop_update_int64(dev, zfs_dip,
178 "Nblocks", lbtodb(volsize)) == DDI_SUCCESS);
180 /* Notify specfs to invalidate the cached size */
181 spec_size_invalidate(dev, VBLK);
182 spec_size_invalidate(dev, VCHR);
184 struct g_provider *pp;
186 pp = zv->zv_provider;
190 g_resize_provider(pp, zv->zv_volsize);
196 zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
199 return (SET_ERROR(EINVAL));
201 if (volsize % blocksize != 0)
202 return (SET_ERROR(EINVAL));
205 if (volsize - 1 > SPEC_MAXOFFSET_T)
206 return (SET_ERROR(EOVERFLOW));
212 zvol_check_volblocksize(uint64_t volblocksize)
214 if (volblocksize < SPA_MINBLOCKSIZE ||
215 volblocksize > SPA_MAXBLOCKSIZE ||
217 return (SET_ERROR(EDOM));
223 zvol_get_stats(objset_t *os, nvlist_t *nv)
226 dmu_object_info_t doi;
229 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
233 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
235 error = dmu_object_info(os, ZVOL_OBJ, &doi);
238 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
239 doi.doi_data_block_size);
245 static zvol_state_t *
246 zvol_minor_lookup(const char *name)
248 struct g_provider *pp;
250 zvol_state_t *zv = NULL;
252 ASSERT(MUTEX_HELD(&spa_namespace_lock));
255 LIST_FOREACH(gp, &zfs_zvol_class.geom, geom) {
256 pp = LIST_FIRST(&gp->provider);
262 if (strcmp(zv->zv_name, name) == 0)
267 return (gp != NULL ? zv : NULL);
270 /* extent mapping arg */
278 zvol_map_block(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
279 const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
281 struct maparg *ma = arg;
283 int bs = ma->ma_zv->zv_volblocksize;
285 if (bp == NULL || zb->zb_object != ZVOL_OBJ || zb->zb_level != 0)
288 VERIFY3U(ma->ma_blks, ==, zb->zb_blkid);
291 /* Abort immediately if we have encountered gang blocks */
293 return (SET_ERROR(EFRAGS));
296 * See if the block is at the end of the previous extent.
298 ze = list_tail(&ma->ma_zv->zv_extents);
300 DVA_GET_VDEV(BP_IDENTITY(bp)) == DVA_GET_VDEV(&ze->ze_dva) &&
301 DVA_GET_OFFSET(BP_IDENTITY(bp)) ==
302 DVA_GET_OFFSET(&ze->ze_dva) + ze->ze_nblks * bs) {
307 dprintf_bp(bp, "%s", "next blkptr:");
309 /* start a new extent */
310 ze = kmem_zalloc(sizeof (zvol_extent_t), KM_SLEEP);
311 ze->ze_dva = bp->blk_dva[0]; /* structure assignment */
313 list_insert_tail(&ma->ma_zv->zv_extents, ze);
318 zvol_free_extents(zvol_state_t *zv)
322 while (ze = list_head(&zv->zv_extents)) {
323 list_remove(&zv->zv_extents, ze);
324 kmem_free(ze, sizeof (zvol_extent_t));
329 zvol_get_lbas(zvol_state_t *zv)
331 objset_t *os = zv->zv_objset;
337 zvol_free_extents(zv);
339 /* commit any in-flight changes before traversing the dataset */
340 txg_wait_synced(dmu_objset_pool(os), 0);
341 err = traverse_dataset(dmu_objset_ds(os), 0,
342 TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, zvol_map_block, &ma);
343 if (err || ma.ma_blks != (zv->zv_volsize / zv->zv_volblocksize)) {
344 zvol_free_extents(zv);
345 return (err ? err : EIO);
353 zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
355 zfs_creat_t *zct = arg;
356 nvlist_t *nvprops = zct->zct_props;
358 uint64_t volblocksize, volsize;
360 VERIFY(nvlist_lookup_uint64(nvprops,
361 zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
362 if (nvlist_lookup_uint64(nvprops,
363 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
364 volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
367 * These properties must be removed from the list so the generic
368 * property setting step won't apply to them.
370 VERIFY(nvlist_remove_all(nvprops,
371 zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
372 (void) nvlist_remove_all(nvprops,
373 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
375 error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
379 error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
383 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
388 * Replay a TX_WRITE ZIL transaction that didn't get committed
389 * after a system failure
392 zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap)
394 objset_t *os = zv->zv_objset;
395 char *data = (char *)(lr + 1); /* data follows lr_write_t */
396 uint64_t offset, length;
401 byteswap_uint64_array(lr, sizeof (*lr));
403 offset = lr->lr_offset;
404 length = lr->lr_length;
406 /* If it's a dmu_sync() block, write the whole block */
407 if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
408 uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
409 if (length < blocksize) {
410 offset -= offset % blocksize;
415 tx = dmu_tx_create(os);
416 dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length);
417 error = dmu_tx_assign(tx, TXG_WAIT);
421 dmu_write(os, ZVOL_OBJ, offset, length, data, tx);
430 zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap)
432 return (SET_ERROR(ENOTSUP));
436 * Callback vectors for replaying records.
437 * Only TX_WRITE is needed for zvol.
439 zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = {
440 zvol_replay_err, /* 0 no such transaction type */
441 zvol_replay_err, /* TX_CREATE */
442 zvol_replay_err, /* TX_MKDIR */
443 zvol_replay_err, /* TX_MKXATTR */
444 zvol_replay_err, /* TX_SYMLINK */
445 zvol_replay_err, /* TX_REMOVE */
446 zvol_replay_err, /* TX_RMDIR */
447 zvol_replay_err, /* TX_LINK */
448 zvol_replay_err, /* TX_RENAME */
449 zvol_replay_write, /* TX_WRITE */
450 zvol_replay_err, /* TX_TRUNCATE */
451 zvol_replay_err, /* TX_SETATTR */
452 zvol_replay_err, /* TX_ACL */
453 zvol_replay_err, /* TX_CREATE_ACL */
454 zvol_replay_err, /* TX_CREATE_ATTR */
455 zvol_replay_err, /* TX_CREATE_ACL_ATTR */
456 zvol_replay_err, /* TX_MKDIR_ACL */
457 zvol_replay_err, /* TX_MKDIR_ATTR */
458 zvol_replay_err, /* TX_MKDIR_ACL_ATTR */
459 zvol_replay_err, /* TX_WRITE2 */
464 zvol_name2minor(const char *name, minor_t *minor)
468 mutex_enter(&spa_namespace_lock);
469 zv = zvol_minor_lookup(name);
471 *minor = zv->zv_minor;
472 mutex_exit(&spa_namespace_lock);
473 return (zv ? 0 : -1);
478 * Create a minor node (plus a whole lot more) for the specified volume.
481 zvol_create_minor(const char *name)
483 zfs_soft_state_t *zs;
486 dmu_object_info_t doi;
490 ZFS_LOG(1, "Creating ZVOL %s...", name);
492 mutex_enter(&spa_namespace_lock);
494 if (zvol_minor_lookup(name) != NULL) {
495 mutex_exit(&spa_namespace_lock);
496 return (SET_ERROR(EEXIST));
499 /* lie and say we're read-only */
500 error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, FTAG, &os);
503 mutex_exit(&spa_namespace_lock);
508 if ((minor = zfsdev_minor_alloc()) == 0) {
509 dmu_objset_disown(os, FTAG);
510 mutex_exit(&spa_namespace_lock);
511 return (SET_ERROR(ENXIO));
514 if (ddi_soft_state_zalloc(zfsdev_state, minor) != DDI_SUCCESS) {
515 dmu_objset_disown(os, FTAG);
516 mutex_exit(&spa_namespace_lock);
517 return (SET_ERROR(EAGAIN));
519 (void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME,
522 (void) snprintf(chrbuf, sizeof (chrbuf), "%u,raw", minor);
524 if (ddi_create_minor_node(zfs_dip, chrbuf, S_IFCHR,
525 minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
526 ddi_soft_state_free(zfsdev_state, minor);
527 dmu_objset_disown(os, FTAG);
528 mutex_exit(&spa_namespace_lock);
529 return (SET_ERROR(EAGAIN));
532 (void) snprintf(blkbuf, sizeof (blkbuf), "%u", minor);
534 if (ddi_create_minor_node(zfs_dip, blkbuf, S_IFBLK,
535 minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
536 ddi_remove_minor_node(zfs_dip, chrbuf);
537 ddi_soft_state_free(zfsdev_state, minor);
538 dmu_objset_disown(os, FTAG);
539 mutex_exit(&spa_namespace_lock);
540 return (SET_ERROR(EAGAIN));
543 zs = ddi_get_soft_state(zfsdev_state, minor);
544 zs->zss_type = ZSST_ZVOL;
545 zv = zs->zss_data = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
548 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
551 dmu_objset_disown(os, zvol_tag);
552 mutex_exit(&spa_namespace_lock);
558 zv = zvol_geom_create(name);
559 zv->zv_volsize = volsize;
560 zv->zv_provider->mediasize = zv->zv_volsize;
564 (void) strlcpy(zv->zv_name, name, MAXPATHLEN);
565 zv->zv_min_bs = DEV_BSHIFT;
567 if (dmu_objset_is_snapshot(os) || !spa_writeable(dmu_objset_spa(os)))
568 zv->zv_flags |= ZVOL_RDONLY;
569 mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL);
570 avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare,
571 sizeof (rl_t), offsetof(rl_t, r_node));
572 list_create(&zv->zv_extents, sizeof (zvol_extent_t),
573 offsetof(zvol_extent_t, ze_node));
574 /* get and cache the blocksize */
575 error = dmu_object_info(os, ZVOL_OBJ, &doi);
577 zv->zv_volblocksize = doi.doi_data_block_size;
579 if (spa_writeable(dmu_objset_spa(os))) {
580 if (zil_replay_disable)
581 zil_destroy(dmu_objset_zil(os), B_FALSE);
583 zil_replay(os, zv, zvol_replay_vector);
585 dmu_objset_disown(os, FTAG);
586 zv->zv_objset = NULL;
590 mutex_exit(&spa_namespace_lock);
597 ZFS_LOG(1, "ZVOL %s created.", name);
603 * Remove minor node for the specified volume.
606 zvol_remove_zv(zvol_state_t *zv)
609 minor_t minor = zv->zv_minor;
612 ASSERT(MUTEX_HELD(&spa_namespace_lock));
613 if (zv->zv_total_opens != 0)
614 return (SET_ERROR(EBUSY));
616 ZFS_LOG(1, "ZVOL %s destroyed.", zv->zv_name);
619 (void) snprintf(nmbuf, sizeof (nmbuf), "%u,raw", minor);
620 ddi_remove_minor_node(zfs_dip, nmbuf);
623 avl_destroy(&zv->zv_znode.z_range_avl);
624 mutex_destroy(&zv->zv_znode.z_range_lock);
626 zvol_geom_destroy(zv);
633 zvol_remove_minor(const char *name)
638 mutex_enter(&spa_namespace_lock);
639 if ((zv = zvol_minor_lookup(name)) == NULL) {
640 mutex_exit(&spa_namespace_lock);
641 return (SET_ERROR(ENXIO));
644 rc = zvol_remove_zv(zv);
646 mutex_exit(&spa_namespace_lock);
651 zvol_first_open(zvol_state_t *zv)
658 /* lie and say we're read-only */
659 error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, B_TRUE,
664 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
667 dmu_objset_disown(os, zvol_tag);
671 error = dmu_bonus_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dbuf);
673 dmu_objset_disown(os, zvol_tag);
676 zv->zv_volsize = volsize;
677 zv->zv_zilog = zil_open(os, zvol_get_data);
678 zvol_size_changed(zv);
680 VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &readonly,
682 if (readonly || dmu_objset_is_snapshot(os) ||
683 !spa_writeable(dmu_objset_spa(os)))
684 zv->zv_flags |= ZVOL_RDONLY;
686 zv->zv_flags &= ~ZVOL_RDONLY;
691 zvol_last_close(zvol_state_t *zv)
693 zil_close(zv->zv_zilog);
696 dmu_buf_rele(zv->zv_dbuf, zvol_tag);
702 if (dsl_dataset_is_dirty(dmu_objset_ds(zv->zv_objset)) &&
703 !(zv->zv_flags & ZVOL_RDONLY))
704 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
705 dmu_objset_evict_dbufs(zv->zv_objset);
707 dmu_objset_disown(zv->zv_objset, zvol_tag);
708 zv->zv_objset = NULL;
713 zvol_prealloc(zvol_state_t *zv)
715 objset_t *os = zv->zv_objset;
717 uint64_t refd, avail, usedobjs, availobjs;
718 uint64_t resid = zv->zv_volsize;
721 /* Check the space usage before attempting to allocate the space */
722 dmu_objset_space(os, &refd, &avail, &usedobjs, &availobjs);
723 if (avail < zv->zv_volsize)
724 return (SET_ERROR(ENOSPC));
726 /* Free old extents if they exist */
727 zvol_free_extents(zv);
731 uint64_t bytes = MIN(resid, SPA_MAXBLOCKSIZE);
733 tx = dmu_tx_create(os);
734 dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
735 error = dmu_tx_assign(tx, TXG_WAIT);
738 (void) dmu_free_long_range(os, ZVOL_OBJ, 0, off);
741 dmu_prealloc(os, ZVOL_OBJ, off, bytes, tx);
746 txg_wait_synced(dmu_objset_pool(os), 0);
753 zvol_update_volsize(objset_t *os, uint64_t volsize)
758 ASSERT(MUTEX_HELD(&spa_namespace_lock));
760 tx = dmu_tx_create(os);
761 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
762 error = dmu_tx_assign(tx, TXG_WAIT);
768 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1,
773 error = dmu_free_long_range(os,
774 ZVOL_OBJ, volsize, DMU_OBJECT_END);
779 zvol_remove_minors(const char *name)
781 struct g_geom *gp, *gptmp;
782 struct g_provider *pp;
786 namelen = strlen(name);
789 mutex_enter(&spa_namespace_lock);
792 LIST_FOREACH_SAFE(gp, &zfs_zvol_class.geom, geom, gptmp) {
793 pp = LIST_FIRST(&gp->provider);
799 if (strcmp(zv->zv_name, name) == 0 ||
800 (strncmp(zv->zv_name, name, namelen) == 0 &&
801 zv->zv_name[namelen] == '/')) {
802 (void) zvol_remove_zv(zv);
807 mutex_exit(&spa_namespace_lock);
812 zvol_set_volsize(const char *name, major_t maj, uint64_t volsize)
814 zvol_state_t *zv = NULL;
817 dmu_object_info_t doi;
818 uint64_t old_volsize = 0ULL;
821 mutex_enter(&spa_namespace_lock);
822 zv = zvol_minor_lookup(name);
823 if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) {
824 mutex_exit(&spa_namespace_lock);
828 if ((error = dmu_object_info(os, ZVOL_OBJ, &doi)) != 0 ||
829 (error = zvol_check_volsize(volsize,
830 doi.doi_data_block_size)) != 0)
833 VERIFY(dsl_prop_get_integer(name, "readonly", &readonly,
840 error = zvol_update_volsize(os, volsize);
842 * Reinitialize the dump area to the new size. If we
843 * failed to resize the dump area then restore it back to
846 if (zv && error == 0) {
848 if (zv->zv_flags & ZVOL_DUMPIFIED) {
849 old_volsize = zv->zv_volsize;
850 zv->zv_volsize = volsize;
851 if ((error = zvol_dumpify(zv)) != 0 ||
852 (error = dumpvp_resize()) != 0) {
853 (void) zvol_update_volsize(os, old_volsize);
854 zv->zv_volsize = old_volsize;
855 error = zvol_dumpify(zv);
858 #endif /* ZVOL_DUMP */
860 zv->zv_volsize = volsize;
861 zvol_size_changed(zv);
867 * Generate a LUN expansion event.
869 if (zv && error == 0) {
872 char *physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
874 (void) snprintf(physpath, MAXPATHLEN, "%s%u", ZVOL_PSEUDO_DEV,
877 VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0);
878 VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0);
880 (void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS,
881 ESC_DEV_DLE, attr, &eid, DDI_SLEEP);
884 kmem_free(physpath, MAXPATHLEN);
889 dmu_objset_rele(os, FTAG);
891 mutex_exit(&spa_namespace_lock);
898 zvol_open(struct g_provider *pp, int flag, int count)
902 boolean_t locked = B_FALSE;
905 * Protect against recursively entering spa_namespace_lock
906 * when spa_open() is used for a pool on a (local) ZVOL(s).
907 * This is needed since we replaced upstream zfsdev_state_lock
908 * with spa_namespace_lock in the ZVOL code.
909 * We are using the same trick as spa_open().
910 * Note that calls in zvol_first_open which need to resolve
911 * pool name to a spa object will enter spa_open()
912 * recursively, but that function already has all the
913 * necessary protection.
915 if (!MUTEX_HELD(&spa_namespace_lock)) {
916 mutex_enter(&spa_namespace_lock);
923 mutex_exit(&spa_namespace_lock);
924 return (SET_ERROR(ENXIO));
927 if (zv->zv_total_opens == 0)
928 err = zvol_first_open(zv);
931 mutex_exit(&spa_namespace_lock);
934 if ((flag & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
935 err = SET_ERROR(EROFS);
938 if (zv->zv_flags & ZVOL_EXCL) {
939 err = SET_ERROR(EBUSY);
944 if (zv->zv_total_opens != 0) {
945 err = SET_ERROR(EBUSY);
948 zv->zv_flags |= ZVOL_EXCL;
952 zv->zv_total_opens += count;
954 mutex_exit(&spa_namespace_lock);
958 if (zv->zv_total_opens == 0)
961 mutex_exit(&spa_namespace_lock);
967 zvol_close(struct g_provider *pp, int flag, int count)
971 boolean_t locked = B_FALSE;
973 /* See comment in zvol_open(). */
974 if (!MUTEX_HELD(&spa_namespace_lock)) {
975 mutex_enter(&spa_namespace_lock);
982 mutex_exit(&spa_namespace_lock);
983 return (SET_ERROR(ENXIO));
986 if (zv->zv_flags & ZVOL_EXCL) {
987 ASSERT(zv->zv_total_opens == 1);
988 zv->zv_flags &= ~ZVOL_EXCL;
992 * If the open count is zero, this is a spurious close.
993 * That indicates a bug in the kernel / DDI framework.
995 ASSERT(zv->zv_total_opens != 0);
998 * You may get multiple opens, but only one close.
1000 zv->zv_total_opens -= count;
1002 if (zv->zv_total_opens == 0)
1003 zvol_last_close(zv);
1006 mutex_exit(&spa_namespace_lock);
1011 zvol_get_done(zgd_t *zgd, int error)
1014 dmu_buf_rele(zgd->zgd_db, zgd);
1016 zfs_range_unlock(zgd->zgd_rl);
1018 if (error == 0 && zgd->zgd_bp)
1019 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
1021 kmem_free(zgd, sizeof (zgd_t));
1025 * Get data to generate a TX_WRITE intent log record.
1028 zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
1030 zvol_state_t *zv = arg;
1031 objset_t *os = zv->zv_objset;
1032 uint64_t object = ZVOL_OBJ;
1033 uint64_t offset = lr->lr_offset;
1034 uint64_t size = lr->lr_length; /* length of user data */
1035 blkptr_t *bp = &lr->lr_blkptr;
1040 ASSERT(zio != NULL);
1043 zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
1044 zgd->zgd_zilog = zv->zv_zilog;
1045 zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
1048 * Write records come in two flavors: immediate and indirect.
1049 * For small writes it's cheaper to store the data with the
1050 * log record (immediate); for large writes it's cheaper to
1051 * sync the data and get a pointer to it (indirect) so that
1052 * we don't have to write the data twice.
1054 if (buf != NULL) { /* immediate write */
1055 error = dmu_read(os, object, offset, size, buf,
1056 DMU_READ_NO_PREFETCH);
1058 size = zv->zv_volblocksize;
1059 offset = P2ALIGN(offset, size);
1060 error = dmu_buf_hold(os, object, offset, zgd, &db,
1061 DMU_READ_NO_PREFETCH);
1063 blkptr_t *obp = dmu_buf_get_blkptr(db);
1065 ASSERT(BP_IS_HOLE(bp));
1072 ASSERT(db->db_offset == offset);
1073 ASSERT(db->db_size == size);
1075 error = dmu_sync(zio, lr->lr_common.lrc_txg,
1076 zvol_get_done, zgd);
1083 zvol_get_done(zgd, error);
1089 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
1091 * We store data in the log buffers if it's small enough.
1092 * Otherwise we will later flush the data out via dmu_sync().
1094 ssize_t zvol_immediate_write_sz = 32768;
1097 zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, offset_t off, ssize_t resid,
1100 uint32_t blocksize = zv->zv_volblocksize;
1101 zilog_t *zilog = zv->zv_zilog;
1103 ssize_t immediate_write_sz;
1105 if (zil_replaying(zilog, tx))
1108 immediate_write_sz = (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
1109 ? 0 : zvol_immediate_write_sz;
1111 slogging = spa_has_slogs(zilog->zl_spa) &&
1112 (zilog->zl_logbias == ZFS_LOGBIAS_LATENCY);
1118 itx_wr_state_t write_state;
1121 * Unlike zfs_log_write() we can be called with
1122 * upto DMU_MAX_ACCESS/2 (5MB) writes.
1124 if (blocksize > immediate_write_sz && !slogging &&
1125 resid >= blocksize && off % blocksize == 0) {
1126 write_state = WR_INDIRECT; /* uses dmu_sync */
1129 write_state = WR_COPIED;
1130 len = MIN(ZIL_MAX_LOG_DATA, resid);
1132 write_state = WR_NEED_COPY;
1133 len = MIN(ZIL_MAX_LOG_DATA, resid);
1136 itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
1137 (write_state == WR_COPIED ? len : 0));
1138 lr = (lr_write_t *)&itx->itx_lr;
1139 if (write_state == WR_COPIED && dmu_read(zv->zv_objset,
1140 ZVOL_OBJ, off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) {
1141 zil_itx_destroy(itx);
1142 itx = zil_itx_create(TX_WRITE, sizeof (*lr));
1143 lr = (lr_write_t *)&itx->itx_lr;
1144 write_state = WR_NEED_COPY;
1147 itx->itx_wr_state = write_state;
1148 if (write_state == WR_NEED_COPY)
1149 itx->itx_sod += len;
1150 lr->lr_foid = ZVOL_OBJ;
1151 lr->lr_offset = off;
1152 lr->lr_length = len;
1154 BP_ZERO(&lr->lr_blkptr);
1156 itx->itx_private = zv;
1157 itx->itx_sync = sync;
1159 zil_itx_assign(zilog, itx, tx);
1168 zvol_dumpio_vdev(vdev_t *vd, void *addr, uint64_t offset, uint64_t origoffset,
1169 uint64_t size, boolean_t doread, boolean_t isdump)
1175 if (vd->vdev_ops == &vdev_mirror_ops ||
1176 vd->vdev_ops == &vdev_replacing_ops ||
1177 vd->vdev_ops == &vdev_spare_ops) {
1178 for (c = 0; c < vd->vdev_children; c++) {
1179 int err = zvol_dumpio_vdev(vd->vdev_child[c],
1180 addr, offset, origoffset, size, doread, isdump);
1183 } else if (doread) {
1189 if (!vd->vdev_ops->vdev_op_leaf && vd->vdev_ops != &vdev_raidz_ops)
1190 return (numerrors < vd->vdev_children ? 0 : EIO);
1192 if (doread && !vdev_readable(vd))
1193 return (SET_ERROR(EIO));
1194 else if (!doread && !vdev_writeable(vd))
1195 return (SET_ERROR(EIO));
1197 if (vd->vdev_ops == &vdev_raidz_ops) {
1198 return (vdev_raidz_physio(vd,
1199 addr, size, offset, origoffset, doread, isdump));
1202 offset += VDEV_LABEL_START_SIZE;
1204 if (ddi_in_panic() || isdump) {
1207 return (SET_ERROR(EIO));
1209 ASSERT3P(dvd, !=, NULL);
1210 return (ldi_dump(dvd->vd_lh, addr, lbtodb(offset),
1214 ASSERT3P(dvd, !=, NULL);
1215 return (vdev_disk_ldi_physio(dvd->vd_lh, addr, size,
1216 offset, doread ? B_READ : B_WRITE));
1221 zvol_dumpio(zvol_state_t *zv, void *addr, uint64_t offset, uint64_t size,
1222 boolean_t doread, boolean_t isdump)
1227 spa_t *spa = dmu_objset_spa(zv->zv_objset);
1229 /* Must be sector aligned, and not stradle a block boundary. */
1230 if (P2PHASE(offset, DEV_BSIZE) || P2PHASE(size, DEV_BSIZE) ||
1231 P2BOUNDARY(offset, size, zv->zv_volblocksize)) {
1232 return (SET_ERROR(EINVAL));
1234 ASSERT(size <= zv->zv_volblocksize);
1236 /* Locate the extent this belongs to */
1237 ze = list_head(&zv->zv_extents);
1238 while (offset >= ze->ze_nblks * zv->zv_volblocksize) {
1239 offset -= ze->ze_nblks * zv->zv_volblocksize;
1240 ze = list_next(&zv->zv_extents, ze);
1244 return (SET_ERROR(EINVAL));
1246 if (!ddi_in_panic())
1247 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
1249 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&ze->ze_dva));
1250 offset += DVA_GET_OFFSET(&ze->ze_dva);
1251 error = zvol_dumpio_vdev(vd, addr, offset, DVA_GET_OFFSET(&ze->ze_dva),
1252 size, doread, isdump);
1254 if (!ddi_in_panic())
1255 spa_config_exit(spa, SCL_STATE, FTAG);
1262 zvol_strategy(struct bio *bp)
1264 zvol_state_t *zv = bp->bio_to->private;
1265 uint64_t off, volsize;
1271 boolean_t doread = (bp->bio_cmd == BIO_READ);
1272 boolean_t is_dumpified;
1276 g_io_deliver(bp, ENXIO);
1280 if (bp->bio_cmd != BIO_READ && (zv->zv_flags & ZVOL_RDONLY)) {
1281 g_io_deliver(bp, EROFS);
1285 off = bp->bio_offset;
1286 volsize = zv->zv_volsize;
1291 addr = bp->bio_data;
1292 resid = bp->bio_length;
1294 if (resid > 0 && (off < 0 || off >= volsize)) {
1295 g_io_deliver(bp, EIO);
1300 is_dumpified = zv->zv_flags & ZVOL_DUMPIFIED;
1302 is_dumpified = B_FALSE;
1304 sync = !doread && !is_dumpified &&
1305 zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS;
1308 * There must be no buffer changes when doing a dmu_sync() because
1309 * we can't change the data whilst calculating the checksum.
1311 rl = zfs_range_lock(&zv->zv_znode, off, resid,
1312 doread ? RL_READER : RL_WRITER);
1314 while (resid != 0 && off < volsize) {
1315 size_t size = MIN(resid, zvol_maxphys);
1318 size = MIN(size, P2END(off, zv->zv_volblocksize) - off);
1319 error = zvol_dumpio(zv, addr, off, size,
1321 } else if (doread) {
1325 error = dmu_read(os, ZVOL_OBJ, off, size, addr,
1328 dmu_tx_t *tx = dmu_tx_create(os);
1329 dmu_tx_hold_write(tx, ZVOL_OBJ, off, size);
1330 error = dmu_tx_assign(tx, TXG_WAIT);
1334 dmu_write(os, ZVOL_OBJ, off, size, addr, tx);
1335 zvol_log_write(zv, tx, off, size, sync);
1340 /* convert checksum errors into IO errors */
1341 if (error == ECKSUM)
1342 error = SET_ERROR(EIO);
1349 zfs_range_unlock(rl);
1351 bp->bio_completed = bp->bio_length - resid;
1352 if (bp->bio_completed < bp->bio_length)
1353 bp->bio_error = (off > volsize ? EINVAL : error);
1356 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1357 g_io_deliver(bp, 0);
1364 * Set the buffer count to the zvol maximum transfer.
1365 * Using our own routine instead of the default minphys()
1366 * means that for larger writes we write bigger buffers on X86
1367 * (128K instead of 56K) and flush the disk write cache less often
1368 * (every zvol_maxphys - currently 1MB) instead of minphys (currently
1369 * 56K on X86 and 128K on sparc).
1372 zvol_minphys(struct buf *bp)
1374 if (bp->b_bcount > zvol_maxphys)
1375 bp->b_bcount = zvol_maxphys;
1379 zvol_dump(dev_t dev, caddr_t addr, daddr_t blkno, int nblocks)
1381 minor_t minor = getminor(dev);
1388 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1390 return (SET_ERROR(ENXIO));
1392 if ((zv->zv_flags & ZVOL_DUMPIFIED) == 0)
1393 return (SET_ERROR(EINVAL));
1395 boff = ldbtob(blkno);
1396 resid = ldbtob(nblocks);
1398 VERIFY3U(boff + resid, <=, zv->zv_volsize);
1401 size = MIN(resid, P2END(boff, zv->zv_volblocksize) - boff);
1402 error = zvol_dumpio(zv, addr, boff, size, B_FALSE, B_TRUE);
1415 zvol_read(dev_t dev, uio_t *uio, cred_t *cr)
1417 minor_t minor = getminor(dev);
1423 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1425 return (SET_ERROR(ENXIO));
1427 volsize = zv->zv_volsize;
1428 if (uio->uio_resid > 0 &&
1429 (uio->uio_loffset < 0 || uio->uio_loffset >= volsize))
1430 return (SET_ERROR(EIO));
1432 if (zv->zv_flags & ZVOL_DUMPIFIED) {
1433 error = physio(zvol_strategy, NULL, dev, B_READ,
1438 rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1440 while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1441 uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1443 /* don't read past the end */
1444 if (bytes > volsize - uio->uio_loffset)
1445 bytes = volsize - uio->uio_loffset;
1447 error = dmu_read_uio(zv->zv_objset, ZVOL_OBJ, uio, bytes);
1449 /* convert checksum errors into IO errors */
1450 if (error == ECKSUM)
1451 error = SET_ERROR(EIO);
1455 zfs_range_unlock(rl);
1461 zvol_write(dev_t dev, uio_t *uio, cred_t *cr)
1463 minor_t minor = getminor(dev);
1470 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1472 return (SET_ERROR(ENXIO));
1474 volsize = zv->zv_volsize;
1475 if (uio->uio_resid > 0 &&
1476 (uio->uio_loffset < 0 || uio->uio_loffset >= volsize))
1477 return (SET_ERROR(EIO));
1479 if (zv->zv_flags & ZVOL_DUMPIFIED) {
1480 error = physio(zvol_strategy, NULL, dev, B_WRITE,
1485 sync = !(zv->zv_flags & ZVOL_WCE) ||
1486 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS);
1488 rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1490 while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1491 uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1492 uint64_t off = uio->uio_loffset;
1493 dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
1495 if (bytes > volsize - off) /* don't write past the end */
1496 bytes = volsize - off;
1498 dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
1499 error = dmu_tx_assign(tx, TXG_WAIT);
1504 error = dmu_write_uio_dbuf(zv->zv_dbuf, uio, bytes, tx);
1506 zvol_log_write(zv, tx, off, bytes, sync);
1512 zfs_range_unlock(rl);
1514 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1519 zvol_getefi(void *arg, int flag, uint64_t vs, uint8_t bs)
1521 struct uuid uuid = EFI_RESERVED;
1522 efi_gpe_t gpe = { 0 };
1528 if (ddi_copyin(arg, &efi, sizeof (dk_efi_t), flag))
1529 return (SET_ERROR(EFAULT));
1530 ptr = (char *)(uintptr_t)efi.dki_data_64;
1531 length = efi.dki_length;
1533 * Some clients may attempt to request a PMBR for the
1534 * zvol. Currently this interface will return EINVAL to
1535 * such requests. These requests could be supported by
1536 * adding a check for lba == 0 and consing up an appropriate
1539 if (efi.dki_lba < 1 || efi.dki_lba > 2 || length <= 0)
1540 return (SET_ERROR(EINVAL));
1542 gpe.efi_gpe_StartingLBA = LE_64(34ULL);
1543 gpe.efi_gpe_EndingLBA = LE_64((vs >> bs) - 1);
1544 UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid);
1546 if (efi.dki_lba == 1) {
1547 efi_gpt_t gpt = { 0 };
1549 gpt.efi_gpt_Signature = LE_64(EFI_SIGNATURE);
1550 gpt.efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT);
1551 gpt.efi_gpt_HeaderSize = LE_32(sizeof (gpt));
1552 gpt.efi_gpt_MyLBA = LE_64(1ULL);
1553 gpt.efi_gpt_FirstUsableLBA = LE_64(34ULL);
1554 gpt.efi_gpt_LastUsableLBA = LE_64((vs >> bs) - 1);
1555 gpt.efi_gpt_PartitionEntryLBA = LE_64(2ULL);
1556 gpt.efi_gpt_NumberOfPartitionEntries = LE_32(1);
1557 gpt.efi_gpt_SizeOfPartitionEntry =
1558 LE_32(sizeof (efi_gpe_t));
1559 CRC32(crc, &gpe, sizeof (gpe), -1U, crc32_table);
1560 gpt.efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc);
1561 CRC32(crc, &gpt, sizeof (gpt), -1U, crc32_table);
1562 gpt.efi_gpt_HeaderCRC32 = LE_32(~crc);
1563 if (ddi_copyout(&gpt, ptr, MIN(sizeof (gpt), length),
1565 return (SET_ERROR(EFAULT));
1566 ptr += sizeof (gpt);
1567 length -= sizeof (gpt);
1569 if (length > 0 && ddi_copyout(&gpe, ptr, MIN(sizeof (gpe),
1571 return (SET_ERROR(EFAULT));
1576 * BEGIN entry points to allow external callers access to the volume.
1579 * Return the volume parameters needed for access from an external caller.
1580 * These values are invariant as long as the volume is held open.
1583 zvol_get_volume_params(minor_t minor, uint64_t *blksize,
1584 uint64_t *max_xfer_len, void **minor_hdl, void **objset_hdl, void **zil_hdl,
1585 void **rl_hdl, void **bonus_hdl)
1589 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1591 return (SET_ERROR(ENXIO));
1592 if (zv->zv_flags & ZVOL_DUMPIFIED)
1593 return (SET_ERROR(ENXIO));
1595 ASSERT(blksize && max_xfer_len && minor_hdl &&
1596 objset_hdl && zil_hdl && rl_hdl && bonus_hdl);
1598 *blksize = zv->zv_volblocksize;
1599 *max_xfer_len = (uint64_t)zvol_maxphys;
1601 *objset_hdl = zv->zv_objset;
1602 *zil_hdl = zv->zv_zilog;
1603 *rl_hdl = &zv->zv_znode;
1604 *bonus_hdl = zv->zv_dbuf;
1609 * Return the current volume size to an external caller.
1610 * The size can change while the volume is open.
1613 zvol_get_volume_size(void *minor_hdl)
1615 zvol_state_t *zv = minor_hdl;
1617 return (zv->zv_volsize);
1621 * Return the current WCE setting to an external caller.
1622 * The WCE setting can change while the volume is open.
1625 zvol_get_volume_wce(void *minor_hdl)
1627 zvol_state_t *zv = minor_hdl;
1629 return ((zv->zv_flags & ZVOL_WCE) ? 1 : 0);
1633 * Entry point for external callers to zvol_log_write
1636 zvol_log_write_minor(void *minor_hdl, dmu_tx_t *tx, offset_t off, ssize_t resid,
1639 zvol_state_t *zv = minor_hdl;
1641 zvol_log_write(zv, tx, off, resid, sync);
1644 * END entry points to allow external callers access to the volume.
1648 * Dirtbag ioctls to support mkfs(1M) for UFS filesystems. See dkio(7I).
1652 zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
1655 struct dk_cinfo dki;
1656 struct dk_minfo dkm;
1657 struct dk_callback *dkc;
1661 mutex_enter(&spa_namespace_lock);
1663 zv = zfsdev_get_soft_state(getminor(dev), ZSST_ZVOL);
1666 mutex_exit(&spa_namespace_lock);
1667 return (SET_ERROR(ENXIO));
1669 ASSERT(zv->zv_total_opens > 0);
1674 bzero(&dki, sizeof (dki));
1675 (void) strcpy(dki.dki_cname, "zvol");
1676 (void) strcpy(dki.dki_dname, "zvol");
1677 dki.dki_ctype = DKC_UNKNOWN;
1678 dki.dki_unit = getminor(dev);
1679 dki.dki_maxtransfer = 1 << (SPA_MAXBLOCKSHIFT - zv->zv_min_bs);
1680 mutex_exit(&spa_namespace_lock);
1681 if (ddi_copyout(&dki, (void *)arg, sizeof (dki), flag))
1682 error = SET_ERROR(EFAULT);
1685 case DKIOCGMEDIAINFO:
1686 bzero(&dkm, sizeof (dkm));
1687 dkm.dki_lbsize = 1U << zv->zv_min_bs;
1688 dkm.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
1689 dkm.dki_media_type = DK_UNKNOWN;
1690 mutex_exit(&spa_namespace_lock);
1691 if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag))
1692 error = SET_ERROR(EFAULT);
1697 uint64_t vs = zv->zv_volsize;
1698 uint8_t bs = zv->zv_min_bs;
1700 mutex_exit(&spa_namespace_lock);
1701 error = zvol_getefi((void *)arg, flag, vs, bs);
1705 case DKIOCFLUSHWRITECACHE:
1706 dkc = (struct dk_callback *)arg;
1707 mutex_exit(&spa_namespace_lock);
1708 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1709 if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback) {
1710 (*dkc->dkc_callback)(dkc->dkc_cookie, error);
1717 int wce = (zv->zv_flags & ZVOL_WCE) ? 1 : 0;
1718 if (ddi_copyout(&wce, (void *)arg, sizeof (int),
1720 error = SET_ERROR(EFAULT);
1726 if (ddi_copyin((void *)arg, &wce, sizeof (int),
1728 error = SET_ERROR(EFAULT);
1732 zv->zv_flags |= ZVOL_WCE;
1733 mutex_exit(&spa_namespace_lock);
1735 zv->zv_flags &= ~ZVOL_WCE;
1736 mutex_exit(&spa_namespace_lock);
1737 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1745 * commands using these (like prtvtoc) expect ENOTSUP
1746 * since we're emulating an EFI label
1748 error = SET_ERROR(ENOTSUP);
1752 rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
1754 error = zvol_dumpify(zv);
1755 zfs_range_unlock(rl);
1759 if (!(zv->zv_flags & ZVOL_DUMPIFIED))
1761 rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
1763 error = zvol_dump_fini(zv);
1764 zfs_range_unlock(rl);
1772 if (ddi_copyin((void *)arg, &df, sizeof (df), flag)) {
1773 error = SET_ERROR(EFAULT);
1778 * Apply Postel's Law to length-checking. If they overshoot,
1779 * just blank out until the end, if there's a need to blank
1782 if (df.df_start >= zv->zv_volsize)
1783 break; /* No need to do anything... */
1784 if (df.df_start + df.df_length > zv->zv_volsize)
1785 df.df_length = DMU_OBJECT_END;
1787 rl = zfs_range_lock(&zv->zv_znode, df.df_start, df.df_length,
1789 tx = dmu_tx_create(zv->zv_objset);
1790 error = dmu_tx_assign(tx, TXG_WAIT);
1794 zvol_log_truncate(zv, tx, df.df_start,
1795 df.df_length, B_TRUE);
1797 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
1798 df.df_start, df.df_length);
1801 zfs_range_unlock(rl);
1805 * If the write-cache is disabled or 'sync' property
1806 * is set to 'always' then treat this as a synchronous
1807 * operation (i.e. commit to zil).
1809 if (!(zv->zv_flags & ZVOL_WCE) ||
1810 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS))
1811 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1814 * If the caller really wants synchronous writes, and
1815 * can't wait for them, don't return until the write
1818 if (df.df_flags & DF_WAIT_SYNC) {
1820 dmu_objset_pool(zv->zv_objset), 0);
1827 error = SET_ERROR(ENOTTY);
1831 mutex_exit(&spa_namespace_lock);
1839 return (zvol_minors != 0);
1845 VERIFY(ddi_soft_state_init(&zfsdev_state, sizeof (zfs_soft_state_t),
1847 ZFS_LOG(1, "ZVOL Initialized.");
1853 ddi_soft_state_fini(&zfsdev_state);
1854 ZFS_LOG(1, "ZVOL Deinitialized.");
1860 zfs_mvdev_dump_feature_check(void *arg, dmu_tx_t *tx)
1862 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
1864 if (spa_feature_is_active(spa,
1865 &spa_feature_table[SPA_FEATURE_MULTI_VDEV_CRASH_DUMP]))
1872 zfs_mvdev_dump_activate_feature_sync(void *arg, dmu_tx_t *tx)
1874 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
1876 spa_feature_incr(spa,
1877 &spa_feature_table[SPA_FEATURE_MULTI_VDEV_CRASH_DUMP], tx);
1881 zvol_dump_init(zvol_state_t *zv, boolean_t resize)
1885 objset_t *os = zv->zv_objset;
1886 spa_t *spa = dmu_objset_spa(os);
1887 vdev_t *vd = spa->spa_root_vdev;
1888 nvlist_t *nv = NULL;
1889 uint64_t version = spa_version(spa);
1890 enum zio_checksum checksum;
1892 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1893 ASSERT(vd->vdev_ops == &vdev_root_ops);
1895 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 0,
1897 /* wait for dmu_free_long_range to actually free the blocks */
1898 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
1901 * If the pool on which the dump device is being initialized has more
1902 * than one child vdev, check that the MULTI_VDEV_CRASH_DUMP feature is
1903 * enabled. If so, bump that feature's counter to indicate that the
1904 * feature is active. We also check the vdev type to handle the
1906 * # zpool create test raidz disk1 disk2 disk3
1907 * Now have spa_root_vdev->vdev_children == 1 (the raidz vdev),
1908 * the raidz vdev itself has 3 children.
1910 if (vd->vdev_children > 1 || vd->vdev_ops == &vdev_raidz_ops) {
1911 if (!spa_feature_is_enabled(spa,
1912 &spa_feature_table[SPA_FEATURE_MULTI_VDEV_CRASH_DUMP]))
1913 return (SET_ERROR(ENOTSUP));
1914 (void) dsl_sync_task(spa_name(spa),
1915 zfs_mvdev_dump_feature_check,
1916 zfs_mvdev_dump_activate_feature_sync, NULL, 2);
1919 tx = dmu_tx_create(os);
1920 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
1921 dmu_tx_hold_bonus(tx, ZVOL_OBJ);
1922 error = dmu_tx_assign(tx, TXG_WAIT);
1929 * If MULTI_VDEV_CRASH_DUMP is active, use the NOPARITY checksum
1930 * function. Otherwise, use the old default -- OFF.
1932 checksum = spa_feature_is_active(spa,
1933 &spa_feature_table[SPA_FEATURE_MULTI_VDEV_CRASH_DUMP]) ?
1934 ZIO_CHECKSUM_NOPARITY : ZIO_CHECKSUM_OFF;
1937 * If we are resizing the dump device then we only need to
1938 * update the refreservation to match the newly updated
1939 * zvolsize. Otherwise, we save off the original state of the
1940 * zvol so that we can restore them if the zvol is ever undumpified.
1943 error = zap_update(os, ZVOL_ZAP_OBJ,
1944 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
1945 &zv->zv_volsize, tx);
1947 uint64_t checksum, compress, refresrv, vbs, dedup;
1949 error = dsl_prop_get_integer(zv->zv_name,
1950 zfs_prop_to_name(ZFS_PROP_COMPRESSION), &compress, NULL);
1951 error = error ? error : dsl_prop_get_integer(zv->zv_name,
1952 zfs_prop_to_name(ZFS_PROP_CHECKSUM), &checksum, NULL);
1953 error = error ? error : dsl_prop_get_integer(zv->zv_name,
1954 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), &refresrv, NULL);
1955 error = error ? error : dsl_prop_get_integer(zv->zv_name,
1956 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &vbs, NULL);
1957 if (version >= SPA_VERSION_DEDUP) {
1958 error = error ? error :
1959 dsl_prop_get_integer(zv->zv_name,
1960 zfs_prop_to_name(ZFS_PROP_DEDUP), &dedup, NULL);
1963 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1964 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1,
1966 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1967 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum, tx);
1968 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1969 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
1971 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1972 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1,
1974 error = error ? error : dmu_object_set_blocksize(
1975 os, ZVOL_OBJ, SPA_MAXBLOCKSIZE, 0, tx);
1976 if (version >= SPA_VERSION_DEDUP) {
1977 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1978 zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1,
1982 zv->zv_volblocksize = SPA_MAXBLOCKSIZE;
1987 * We only need update the zvol's property if we are initializing
1988 * the dump area for the first time.
1991 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1992 VERIFY(nvlist_add_uint64(nv,
1993 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 0) == 0);
1994 VERIFY(nvlist_add_uint64(nv,
1995 zfs_prop_to_name(ZFS_PROP_COMPRESSION),
1996 ZIO_COMPRESS_OFF) == 0);
1997 VERIFY(nvlist_add_uint64(nv,
1998 zfs_prop_to_name(ZFS_PROP_CHECKSUM),
2000 if (version >= SPA_VERSION_DEDUP) {
2001 VERIFY(nvlist_add_uint64(nv,
2002 zfs_prop_to_name(ZFS_PROP_DEDUP),
2003 ZIO_CHECKSUM_OFF) == 0);
2006 error = zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
2014 /* Allocate the space for the dump */
2015 error = zvol_prealloc(zv);
2020 zvol_dumpify(zvol_state_t *zv)
2023 uint64_t dumpsize = 0;
2025 objset_t *os = zv->zv_objset;
2027 if (zv->zv_flags & ZVOL_RDONLY)
2028 return (SET_ERROR(EROFS));
2030 if (zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE,
2031 8, 1, &dumpsize) != 0 || dumpsize != zv->zv_volsize) {
2032 boolean_t resize = (dumpsize > 0);
2034 if ((error = zvol_dump_init(zv, resize)) != 0) {
2035 (void) zvol_dump_fini(zv);
2041 * Build up our lba mapping.
2043 error = zvol_get_lbas(zv);
2045 (void) zvol_dump_fini(zv);
2049 tx = dmu_tx_create(os);
2050 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2051 error = dmu_tx_assign(tx, TXG_WAIT);
2054 (void) zvol_dump_fini(zv);
2058 zv->zv_flags |= ZVOL_DUMPIFIED;
2059 error = zap_update(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 8, 1,
2060 &zv->zv_volsize, tx);
2064 (void) zvol_dump_fini(zv);
2068 txg_wait_synced(dmu_objset_pool(os), 0);
2073 zvol_dump_fini(zvol_state_t *zv)
2076 objset_t *os = zv->zv_objset;
2079 uint64_t checksum, compress, refresrv, vbs, dedup;
2080 uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset));
2083 * Attempt to restore the zvol back to its pre-dumpified state.
2084 * This is a best-effort attempt as it's possible that not all
2085 * of these properties were initialized during the dumpify process
2086 * (i.e. error during zvol_dump_init).
2089 tx = dmu_tx_create(os);
2090 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2091 error = dmu_tx_assign(tx, TXG_WAIT);
2096 (void) zap_remove(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, tx);
2099 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2100 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum);
2101 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2102 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, &compress);
2103 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2104 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, &refresrv);
2105 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2106 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, &vbs);
2108 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2109 (void) nvlist_add_uint64(nv,
2110 zfs_prop_to_name(ZFS_PROP_CHECKSUM), checksum);
2111 (void) nvlist_add_uint64(nv,
2112 zfs_prop_to_name(ZFS_PROP_COMPRESSION), compress);
2113 (void) nvlist_add_uint64(nv,
2114 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), refresrv);
2115 if (version >= SPA_VERSION_DEDUP &&
2116 zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2117 zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, &dedup) == 0) {
2118 (void) nvlist_add_uint64(nv,
2119 zfs_prop_to_name(ZFS_PROP_DEDUP), dedup);
2121 (void) zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
2125 zvol_free_extents(zv);
2126 zv->zv_flags &= ~ZVOL_DUMPIFIED;
2127 (void) dmu_free_long_range(os, ZVOL_OBJ, 0, DMU_OBJECT_END);
2128 /* wait for dmu_free_long_range to actually free the blocks */
2129 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
2130 tx = dmu_tx_create(os);
2131 dmu_tx_hold_bonus(tx, ZVOL_OBJ);
2132 error = dmu_tx_assign(tx, TXG_WAIT);
2137 if (dmu_object_set_blocksize(os, ZVOL_OBJ, vbs, 0, tx) == 0)
2138 zv->zv_volblocksize = vbs;
2145 static zvol_state_t *
2146 zvol_geom_create(const char *name)
2148 struct g_provider *pp;
2152 gp = g_new_geomf(&zfs_zvol_class, "zfs::zvol::%s", name);
2153 gp->start = zvol_geom_start;
2154 gp->access = zvol_geom_access;
2155 pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, name);
2156 pp->flags |= G_PF_DIRECT_RECEIVE | G_PF_DIRECT_SEND;
2157 pp->sectorsize = DEV_BSIZE;
2159 zv = kmem_zalloc(sizeof(*zv), KM_SLEEP);
2160 zv->zv_provider = pp;
2162 bioq_init(&zv->zv_queue);
2163 mtx_init(&zv->zv_queue_mtx, "zvol", NULL, MTX_DEF);
2171 zvol_geom_run(zvol_state_t *zv)
2173 struct g_provider *pp;
2175 pp = zv->zv_provider;
2176 g_error_provider(pp, 0);
2178 kproc_kthread_add(zvol_geom_worker, zv, &zfsproc, NULL, 0, 0,
2179 "zfskern", "zvol %s", pp->name + sizeof(ZVOL_DRIVER));
2183 zvol_geom_destroy(zvol_state_t *zv)
2185 struct g_provider *pp;
2187 g_topology_assert();
2189 mtx_lock(&zv->zv_queue_mtx);
2191 wakeup_one(&zv->zv_queue);
2192 while (zv->zv_state != 2)
2193 msleep(&zv->zv_state, &zv->zv_queue_mtx, 0, "zvol:w", 0);
2194 mtx_destroy(&zv->zv_queue_mtx);
2196 pp = zv->zv_provider;
2197 zv->zv_provider = NULL;
2199 g_wither_geom(pp->geom, ENXIO);
2201 kmem_free(zv, sizeof(*zv));
2205 zvol_geom_access(struct g_provider *pp, int acr, int acw, int ace)
2207 int count, error, flags;
2209 g_topology_assert();
2212 * To make it easier we expect either open or close, but not both
2215 KASSERT((acr >= 0 && acw >= 0 && ace >= 0) ||
2216 (acr <= 0 && acw <= 0 && ace <= 0),
2217 ("Unsupported access request to %s (acr=%d, acw=%d, ace=%d).",
2218 pp->name, acr, acw, ace));
2220 if (pp->private == NULL) {
2221 if (acr <= 0 && acw <= 0 && ace <= 0)
2227 * We don't pass FEXCL flag to zvol_open()/zvol_close() if ace != 0,
2228 * because GEOM already handles that and handles it a bit differently.
2229 * GEOM allows for multiple read/exclusive consumers and ZFS allows
2230 * only one exclusive consumer, no matter if it is reader or writer.
2231 * I like better the way GEOM works so I'll leave it for GEOM to
2232 * decide what to do.
2235 count = acr + acw + ace;
2240 if (acr != 0 || ace != 0)
2245 g_topology_unlock();
2247 error = zvol_open(pp, flags, count);
2249 error = zvol_close(pp, flags, -count);
2255 zvol_geom_start(struct bio *bp)
2260 zv = bp->bio_to->private;
2262 switch (bp->bio_cmd) {
2264 if (!THREAD_CAN_SLEEP())
2266 zil_commit(zv->zv_zilog, ZVOL_OBJ);
2267 g_io_deliver(bp, 0);
2271 if (!THREAD_CAN_SLEEP())
2278 g_io_deliver(bp, EOPNOTSUPP);
2284 mtx_lock(&zv->zv_queue_mtx);
2285 first = (bioq_first(&zv->zv_queue) == NULL);
2286 bioq_insert_tail(&zv->zv_queue, bp);
2287 mtx_unlock(&zv->zv_queue_mtx);
2289 wakeup_one(&zv->zv_queue);
2293 zvol_geom_worker(void *arg)
2298 thread_lock(curthread);
2299 sched_prio(curthread, PRIBIO);
2300 thread_unlock(curthread);
2304 mtx_lock(&zv->zv_queue_mtx);
2305 bp = bioq_takefirst(&zv->zv_queue);
2307 if (zv->zv_state == 1) {
2309 wakeup(&zv->zv_state);
2310 mtx_unlock(&zv->zv_queue_mtx);
2313 msleep(&zv->zv_queue, &zv->zv_queue_mtx, PRIBIO | PDROP,
2317 mtx_unlock(&zv->zv_queue_mtx);
2318 switch (bp->bio_cmd) {
2320 zil_commit(zv->zv_zilog, ZVOL_OBJ);
2321 g_io_deliver(bp, 0);
2331 extern boolean_t dataset_name_hidden(const char *name);
2334 zvol_create_snapshots(objset_t *os, const char *name)
2336 uint64_t cookie, obj;
2341 sname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2344 (void) dmu_objset_find(name, dmu_objset_prefetch, NULL,
2349 len = snprintf(sname, MAXPATHLEN, "%s@", name);
2350 if (len >= MAXPATHLEN) {
2351 dmu_objset_rele(os, FTAG);
2352 error = ENAMETOOLONG;
2356 dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
2357 error = dmu_snapshot_list_next(os, MAXPATHLEN - len,
2358 sname + len, &obj, &cookie, NULL);
2359 dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
2361 if (error == ENOENT)
2366 if ((error = zvol_create_minor(sname)) != 0) {
2367 printf("ZFS WARNING: Unable to create ZVOL %s (error=%d).\n",
2373 kmem_free(sname, MAXPATHLEN);
2378 zvol_create_minors(const char *name)
2385 if (dataset_name_hidden(name))
2388 if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) {
2389 printf("ZFS WARNING: Unable to put hold on %s (error=%d).\n",
2393 if (dmu_objset_type(os) == DMU_OST_ZVOL) {
2394 dsl_dataset_long_hold(os->os_dsl_dataset, FTAG);
2395 dsl_pool_rele(dmu_objset_pool(os), FTAG);
2396 if ((error = zvol_create_minor(name)) == 0)
2397 error = zvol_create_snapshots(os, name);
2399 printf("ZFS WARNING: Unable to create ZVOL %s (error=%d).\n",
2402 dsl_dataset_long_rele(os->os_dsl_dataset, FTAG);
2403 dsl_dataset_rele(os->os_dsl_dataset, FTAG);
2406 if (dmu_objset_type(os) != DMU_OST_ZFS) {
2407 dmu_objset_rele(os, FTAG);
2411 osname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
2412 if (snprintf(osname, MAXPATHLEN, "%s/", name) >= MAXPATHLEN) {
2413 dmu_objset_rele(os, FTAG);
2414 kmem_free(osname, MAXPATHLEN);
2417 p = osname + strlen(osname);
2418 len = MAXPATHLEN - (p - osname);
2421 /* Prefetch the datasets. */
2423 while (dmu_dir_list_next(os, len, p, NULL, &cookie) == 0) {
2424 if (!dataset_name_hidden(osname))
2425 (void) dmu_objset_prefetch(osname, NULL);
2430 while (dmu_dir_list_next(os, MAXPATHLEN - (p - osname), p, NULL,
2432 dmu_objset_rele(os, FTAG);
2433 (void)zvol_create_minors(osname);
2434 if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) {
2435 printf("ZFS WARNING: Unable to put hold on %s (error=%d).\n",
2441 dmu_objset_rele(os, FTAG);
2442 kmem_free(osname, MAXPATHLEN);
2447 zvol_rename_minor(struct g_geom *gp, const char *newname)
2449 struct g_provider *pp;
2452 ASSERT(MUTEX_HELD(&spa_namespace_lock));
2453 g_topology_assert();
2455 pp = LIST_FIRST(&gp->provider);
2460 zv->zv_provider = NULL;
2461 g_wither_provider(pp, ENXIO);
2463 pp = g_new_providerf(gp, "%s/%s", ZVOL_DRIVER, newname);
2464 pp->flags |= G_PF_DIRECT_RECEIVE | G_PF_DIRECT_SEND;
2465 pp->sectorsize = DEV_BSIZE;
2466 pp->mediasize = zv->zv_volsize;
2468 zv->zv_provider = pp;
2469 strlcpy(zv->zv_name, newname, sizeof(zv->zv_name));
2470 g_error_provider(pp, 0);
2474 zvol_rename_minors(const char *oldname, const char *newname)
2476 char name[MAXPATHLEN];
2477 struct g_provider *pp;
2479 size_t oldnamelen, newnamelen;
2483 oldnamelen = strlen(oldname);
2484 newnamelen = strlen(newname);
2487 mutex_enter(&spa_namespace_lock);
2490 LIST_FOREACH(gp, &zfs_zvol_class.geom, geom) {
2491 pp = LIST_FIRST(&gp->provider);
2497 if (strcmp(zv->zv_name, oldname) == 0) {
2498 zvol_rename_minor(gp, newname);
2499 } else if (strncmp(zv->zv_name, oldname, oldnamelen) == 0 &&
2500 (zv->zv_name[oldnamelen] == '/' ||
2501 zv->zv_name[oldnamelen] == '@')) {
2502 snprintf(name, sizeof(name), "%s%c%s", newname,
2503 zv->zv_name[oldnamelen],
2504 zv->zv_name + oldnamelen + 1);
2505 zvol_rename_minor(gp, name);
2509 g_topology_unlock();
2510 mutex_exit(&spa_namespace_lock);