4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC.
23 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
24 * Rewritten for Linux by Brian Behlendorf <behlendorf1@llnl.gov>.
27 * ZFS volume emulation driver.
29 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
30 * Volumes are accessed through the symbolic links named:
32 * /dev/<pool_name>/<dataset_name>
34 * Volumes are persistent through reboot and module load. No user command
35 * needs to be run before opening and using a device.
37 * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
38 * Copyright (c) 2016 Actifio, Inc. All rights reserved.
39 * Copyright (c) 2012, 2019 by Delphix. All rights reserved.
43 * Note on locking of zvol state structures.
45 * These structures are used to maintain internal state used to emulate block
46 * devices on top of zvols. In particular, management of device minor number
47 * operations - create, remove, rename, and set_snapdev - involves access to
48 * these structures. The zvol_state_lock is primarily used to protect the
49 * zvol_state_list. The zv->zv_state_lock is used to protect the contents
50 * of the zvol_state_t structures, as well as to make sure that when the
51 * time comes to remove the structure from the list, it is not in use, and
52 * therefore, it can be taken off zvol_state_list and freed.
54 * The zv_suspend_lock was introduced to allow for suspending I/O to a zvol,
55 * e.g. for the duration of receive and rollback operations. This lock can be
56 * held for significant periods of time. Given that it is undesirable to hold
57 * mutexes for long periods of time, the following lock ordering applies:
58 * - take zvol_state_lock if necessary, to protect zvol_state_list
59 * - take zv_suspend_lock if necessary, by the code path in question
60 * - take zv_state_lock to protect zvol_state_t
62 * The minor operations are issued to spa->spa_zvol_taskq queues, that are
63 * single-threaded (to preserve order of minor operations), and are executed
64 * through the zvol_task_cb that dispatches the specific operations. Therefore,
65 * these operations are serialized per pool. Consequently, we can be certain
66 * that for a given zvol, there is only one operation at a time in progress.
67 * That is why one can be sure that first, zvol_state_t for a given zvol is
68 * allocated and placed on zvol_state_list, and then other minor operations
69 * for this zvol are going to proceed in the order of issue.
73 #include <sys/dataset_kstats.h>
75 #include <sys/dmu_traverse.h>
76 #include <sys/dsl_dataset.h>
77 #include <sys/dsl_prop.h>
78 #include <sys/dsl_dir.h>
80 #include <sys/zfeature.h>
81 #include <sys/zil_impl.h>
82 #include <sys/dmu_tx.h>
84 #include <sys/zfs_rlock.h>
85 #include <sys/spa_impl.h>
87 #include <sys/zvol_impl.h>
89 unsigned int zvol_inhibit_dev = 0;
90 unsigned int zvol_volmode = ZFS_VOLMODE_GEOM;
92 struct hlist_head *zvol_htable;
93 static list_t zvol_state_list;
94 krwlock_t zvol_state_lock;
97 ZVOL_ASYNC_REMOVE_MINORS,
98 ZVOL_ASYNC_RENAME_MINORS,
99 ZVOL_ASYNC_SET_SNAPDEV,
100 ZVOL_ASYNC_SET_VOLMODE,
106 char name1[MAXNAMELEN];
107 char name2[MAXNAMELEN];
112 zvol_name_hash(const char *name)
115 uint64_t crc = -1ULL;
116 const uint8_t *p = (const uint8_t *)name;
117 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
118 for (i = 0; i < MAXNAMELEN - 1 && *p; i++, p++) {
119 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (*p)) & 0xFF];
125 * Find a zvol_state_t given the name and hash generated by zvol_name_hash.
126 * If found, return with zv_suspend_lock and zv_state_lock taken, otherwise,
127 * return (NULL) without the taking locks. The zv_suspend_lock is always taken
128 * before zv_state_lock. The mode argument indicates the mode (including none)
129 * for zv_suspend_lock to be taken.
132 zvol_find_by_name_hash(const char *name, uint64_t hash, int mode)
135 struct hlist_node *p = NULL;
137 rw_enter(&zvol_state_lock, RW_READER);
138 hlist_for_each(p, ZVOL_HT_HEAD(hash)) {
139 zv = hlist_entry(p, zvol_state_t, zv_hlink);
140 mutex_enter(&zv->zv_state_lock);
141 if (zv->zv_hash == hash &&
142 strncmp(zv->zv_name, name, MAXNAMELEN) == 0) {
144 * this is the right zvol, take the locks in the
147 if (mode != RW_NONE &&
148 !rw_tryenter(&zv->zv_suspend_lock, mode)) {
149 mutex_exit(&zv->zv_state_lock);
150 rw_enter(&zv->zv_suspend_lock, mode);
151 mutex_enter(&zv->zv_state_lock);
153 * zvol cannot be renamed as we continue
154 * to hold zvol_state_lock
156 ASSERT(zv->zv_hash == hash &&
157 strncmp(zv->zv_name, name, MAXNAMELEN)
160 rw_exit(&zvol_state_lock);
163 mutex_exit(&zv->zv_state_lock);
165 rw_exit(&zvol_state_lock);
171 * Find a zvol_state_t given the name.
172 * If found, return with zv_suspend_lock and zv_state_lock taken, otherwise,
173 * return (NULL) without the taking locks. The zv_suspend_lock is always taken
174 * before zv_state_lock. The mode argument indicates the mode (including none)
175 * for zv_suspend_lock to be taken.
177 static zvol_state_t *
178 zvol_find_by_name(const char *name, int mode)
180 return (zvol_find_by_name_hash(name, zvol_name_hash(name), mode));
184 * ZFS_IOC_CREATE callback handles dmu zvol and zap object creation.
187 zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
189 zfs_creat_t *zct = arg;
190 nvlist_t *nvprops = zct->zct_props;
192 uint64_t volblocksize, volsize;
194 VERIFY(nvlist_lookup_uint64(nvprops,
195 zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
196 if (nvlist_lookup_uint64(nvprops,
197 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
198 volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
201 * These properties must be removed from the list so the generic
202 * property setting step won't apply to them.
204 VERIFY(nvlist_remove_all(nvprops,
205 zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
206 (void) nvlist_remove_all(nvprops,
207 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
209 error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
213 error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
217 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
222 * ZFS_IOC_OBJSET_STATS entry point.
225 zvol_get_stats(objset_t *os, nvlist_t *nv)
228 dmu_object_info_t *doi;
231 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
233 return (SET_ERROR(error));
235 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
236 doi = kmem_alloc(sizeof (dmu_object_info_t), KM_SLEEP);
237 error = dmu_object_info(os, ZVOL_OBJ, doi);
240 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
241 doi->doi_data_block_size);
244 kmem_free(doi, sizeof (dmu_object_info_t));
246 return (SET_ERROR(error));
250 * Sanity check volume size.
253 zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
256 return (SET_ERROR(EINVAL));
258 if (volsize % blocksize != 0)
259 return (SET_ERROR(EINVAL));
262 if (volsize - 1 > SPEC_MAXOFFSET_T)
263 return (SET_ERROR(EOVERFLOW));
269 * Ensure the zap is flushed then inform the VFS of the capacity change.
272 zvol_update_volsize(uint64_t volsize, objset_t *os)
278 tx = dmu_tx_create(os);
279 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
280 dmu_tx_mark_netfree(tx);
281 error = dmu_tx_assign(tx, TXG_WAIT);
284 return (SET_ERROR(error));
286 txg = dmu_tx_get_txg(tx);
288 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1,
292 txg_wait_synced(dmu_objset_pool(os), txg);
295 error = dmu_free_long_range(os,
296 ZVOL_OBJ, volsize, DMU_OBJECT_END);
302 * Set ZFS_PROP_VOLSIZE set entry point. Note that modifying the volume
303 * size will result in a udev "change" event being generated.
306 zvol_set_volsize(const char *name, uint64_t volsize)
311 boolean_t owned = B_FALSE;
313 error = dsl_prop_get_integer(name,
314 zfs_prop_to_name(ZFS_PROP_READONLY), &readonly, NULL);
316 return (SET_ERROR(error));
318 return (SET_ERROR(EROFS));
320 zvol_state_t *zv = zvol_find_by_name(name, RW_READER);
322 ASSERT(zv == NULL || (MUTEX_HELD(&zv->zv_state_lock) &&
323 RW_READ_HELD(&zv->zv_suspend_lock)));
325 if (zv == NULL || zv->zv_objset == NULL) {
327 rw_exit(&zv->zv_suspend_lock);
328 if ((error = dmu_objset_own(name, DMU_OST_ZVOL, B_FALSE, B_TRUE,
331 mutex_exit(&zv->zv_state_lock);
332 return (SET_ERROR(error));
341 dmu_object_info_t *doi = kmem_alloc(sizeof (*doi), KM_SLEEP);
343 if ((error = dmu_object_info(os, ZVOL_OBJ, doi)) ||
344 (error = zvol_check_volsize(volsize, doi->doi_data_block_size)))
347 error = zvol_update_volsize(volsize, os);
348 if (error == 0 && zv != NULL) {
349 zv->zv_volsize = volsize;
353 kmem_free(doi, sizeof (dmu_object_info_t));
356 dmu_objset_disown(os, B_TRUE, FTAG);
358 zv->zv_objset = NULL;
360 rw_exit(&zv->zv_suspend_lock);
364 mutex_exit(&zv->zv_state_lock);
366 if (error == 0 && zv != NULL)
367 zvol_os_update_volsize(zv, volsize);
369 return (SET_ERROR(error));
373 * Sanity check volume block size.
376 zvol_check_volblocksize(const char *name, uint64_t volblocksize)
378 /* Record sizes above 128k need the feature to be enabled */
379 if (volblocksize > SPA_OLD_MAXBLOCKSIZE) {
383 if ((error = spa_open(name, &spa, FTAG)) != 0)
386 if (!spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) {
387 spa_close(spa, FTAG);
388 return (SET_ERROR(ENOTSUP));
392 * We don't allow setting the property above 1MB,
393 * unless the tunable has been changed.
395 if (volblocksize > zfs_max_recordsize)
396 return (SET_ERROR(EDOM));
398 spa_close(spa, FTAG);
401 if (volblocksize < SPA_MINBLOCKSIZE ||
402 volblocksize > SPA_MAXBLOCKSIZE ||
404 return (SET_ERROR(EDOM));
410 * Replay a TX_TRUNCATE ZIL transaction if asked. TX_TRUNCATE is how we
411 * implement DKIOCFREE/free-long-range.
414 zvol_replay_truncate(void *arg1, void *arg2, boolean_t byteswap)
416 zvol_state_t *zv = arg1;
417 lr_truncate_t *lr = arg2;
418 uint64_t offset, length;
421 byteswap_uint64_array(lr, sizeof (*lr));
423 offset = lr->lr_offset;
424 length = lr->lr_length;
426 dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
427 dmu_tx_mark_netfree(tx);
428 int error = dmu_tx_assign(tx, TXG_WAIT);
432 (void) zil_replaying(zv->zv_zilog, tx);
434 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset,
442 * Replay a TX_WRITE ZIL transaction that didn't get committed
443 * after a system failure
446 zvol_replay_write(void *arg1, void *arg2, boolean_t byteswap)
448 zvol_state_t *zv = arg1;
449 lr_write_t *lr = arg2;
450 objset_t *os = zv->zv_objset;
451 char *data = (char *)(lr + 1); /* data follows lr_write_t */
452 uint64_t offset, length;
457 byteswap_uint64_array(lr, sizeof (*lr));
459 offset = lr->lr_offset;
460 length = lr->lr_length;
462 /* If it's a dmu_sync() block, write the whole block */
463 if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
464 uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
465 if (length < blocksize) {
466 offset -= offset % blocksize;
471 tx = dmu_tx_create(os);
472 dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length);
473 error = dmu_tx_assign(tx, TXG_WAIT);
477 dmu_write(os, ZVOL_OBJ, offset, length, data, tx);
478 (void) zil_replaying(zv->zv_zilog, tx);
486 zvol_replay_err(void *arg1, void *arg2, boolean_t byteswap)
488 (void) arg1, (void) arg2, (void) byteswap;
489 return (SET_ERROR(ENOTSUP));
493 * Callback vectors for replaying records.
494 * Only TX_WRITE and TX_TRUNCATE are needed for zvol.
496 zil_replay_func_t *const zvol_replay_vector[TX_MAX_TYPE] = {
497 zvol_replay_err, /* no such transaction type */
498 zvol_replay_err, /* TX_CREATE */
499 zvol_replay_err, /* TX_MKDIR */
500 zvol_replay_err, /* TX_MKXATTR */
501 zvol_replay_err, /* TX_SYMLINK */
502 zvol_replay_err, /* TX_REMOVE */
503 zvol_replay_err, /* TX_RMDIR */
504 zvol_replay_err, /* TX_LINK */
505 zvol_replay_err, /* TX_RENAME */
506 zvol_replay_write, /* TX_WRITE */
507 zvol_replay_truncate, /* TX_TRUNCATE */
508 zvol_replay_err, /* TX_SETATTR */
509 zvol_replay_err, /* TX_ACL */
510 zvol_replay_err, /* TX_CREATE_ATTR */
511 zvol_replay_err, /* TX_CREATE_ACL_ATTR */
512 zvol_replay_err, /* TX_MKDIR_ACL */
513 zvol_replay_err, /* TX_MKDIR_ATTR */
514 zvol_replay_err, /* TX_MKDIR_ACL_ATTR */
515 zvol_replay_err, /* TX_WRITE2 */
516 zvol_replay_err, /* TX_SETSAXATTR */
517 zvol_replay_err, /* TX_RENAME_EXCHANGE */
518 zvol_replay_err, /* TX_RENAME_WHITEOUT */
522 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
524 * We store data in the log buffers if it's small enough.
525 * Otherwise we will later flush the data out via dmu_sync().
527 static const ssize_t zvol_immediate_write_sz = 32768;
530 zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, uint64_t offset,
531 uint64_t size, int sync)
533 uint32_t blocksize = zv->zv_volblocksize;
534 zilog_t *zilog = zv->zv_zilog;
535 itx_wr_state_t write_state;
538 if (zil_replaying(zilog, tx))
541 if (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
542 write_state = WR_INDIRECT;
543 else if (!spa_has_slogs(zilog->zl_spa) &&
544 size >= blocksize && blocksize > zvol_immediate_write_sz)
545 write_state = WR_INDIRECT;
547 write_state = WR_COPIED;
549 write_state = WR_NEED_COPY;
554 itx_wr_state_t wr_state = write_state;
557 if (wr_state == WR_COPIED && size > zil_max_copied_data(zilog))
558 wr_state = WR_NEED_COPY;
559 else if (wr_state == WR_INDIRECT)
560 len = MIN(blocksize - P2PHASE(offset, blocksize), size);
562 itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
563 (wr_state == WR_COPIED ? len : 0));
564 lr = (lr_write_t *)&itx->itx_lr;
565 if (wr_state == WR_COPIED && dmu_read_by_dnode(zv->zv_dn,
566 offset, len, lr+1, DMU_READ_NO_PREFETCH) != 0) {
567 zil_itx_destroy(itx);
568 itx = zil_itx_create(TX_WRITE, sizeof (*lr));
569 lr = (lr_write_t *)&itx->itx_lr;
570 wr_state = WR_NEED_COPY;
573 itx->itx_wr_state = wr_state;
574 lr->lr_foid = ZVOL_OBJ;
575 lr->lr_offset = offset;
578 BP_ZERO(&lr->lr_blkptr);
580 itx->itx_private = zv;
581 itx->itx_sync = sync;
583 (void) zil_itx_assign(zilog, itx, tx);
589 if (write_state == WR_COPIED || write_state == WR_NEED_COPY) {
590 dsl_pool_wrlog_count(zilog->zl_dmu_pool, sz, tx->tx_txg);
595 * Log a DKIOCFREE/free-long-range to the ZIL with TX_TRUNCATE.
598 zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off, uint64_t len,
603 zilog_t *zilog = zv->zv_zilog;
605 if (zil_replaying(zilog, tx))
608 itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr));
609 lr = (lr_truncate_t *)&itx->itx_lr;
610 lr->lr_foid = ZVOL_OBJ;
614 itx->itx_sync = sync;
615 zil_itx_assign(zilog, itx, tx);
620 zvol_get_done(zgd_t *zgd, int error)
624 dmu_buf_rele(zgd->zgd_db, zgd);
626 zfs_rangelock_exit(zgd->zgd_lr);
628 kmem_free(zgd, sizeof (zgd_t));
632 * Get data to generate a TX_WRITE intent log record.
635 zvol_get_data(void *arg, uint64_t arg2, lr_write_t *lr, char *buf,
636 struct lwb *lwb, zio_t *zio)
638 zvol_state_t *zv = arg;
639 uint64_t offset = lr->lr_offset;
640 uint64_t size = lr->lr_length;
645 ASSERT3P(lwb, !=, NULL);
646 ASSERT3P(zio, !=, NULL);
647 ASSERT3U(size, !=, 0);
649 zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
653 * Write records come in two flavors: immediate and indirect.
654 * For small writes it's cheaper to store the data with the
655 * log record (immediate); for large writes it's cheaper to
656 * sync the data and get a pointer to it (indirect) so that
657 * we don't have to write the data twice.
659 if (buf != NULL) { /* immediate write */
660 zgd->zgd_lr = zfs_rangelock_enter(&zv->zv_rangelock, offset,
662 error = dmu_read_by_dnode(zv->zv_dn, offset, size, buf,
663 DMU_READ_NO_PREFETCH);
664 } else { /* indirect write */
666 * Have to lock the whole block to ensure when it's written out
667 * and its checksum is being calculated that no one can change
668 * the data. Contrarily to zfs_get_data we need not re-check
669 * blocksize after we get the lock because it cannot be changed.
671 size = zv->zv_volblocksize;
672 offset = P2ALIGN_TYPED(offset, size, uint64_t);
673 zgd->zgd_lr = zfs_rangelock_enter(&zv->zv_rangelock, offset,
675 error = dmu_buf_hold_by_dnode(zv->zv_dn, offset, zgd, &db,
676 DMU_READ_NO_PREFETCH);
678 blkptr_t *bp = &lr->lr_blkptr;
684 ASSERT(db->db_offset == offset);
685 ASSERT(db->db_size == size);
687 error = dmu_sync(zio, lr->lr_common.lrc_txg,
695 zvol_get_done(zgd, error);
697 return (SET_ERROR(error));
701 * The zvol_state_t's are inserted into zvol_state_list and zvol_htable.
705 zvol_insert(zvol_state_t *zv)
707 ASSERT(RW_WRITE_HELD(&zvol_state_lock));
708 list_insert_head(&zvol_state_list, zv);
709 hlist_add_head(&zv->zv_hlink, ZVOL_HT_HEAD(zv->zv_hash));
713 * Simply remove the zvol from to list of zvols.
716 zvol_remove(zvol_state_t *zv)
718 ASSERT(RW_WRITE_HELD(&zvol_state_lock));
719 list_remove(&zvol_state_list, zv);
720 hlist_del(&zv->zv_hlink);
724 * Setup zv after we just own the zv->objset
727 zvol_setup_zv(zvol_state_t *zv)
732 objset_t *os = zv->zv_objset;
734 ASSERT(MUTEX_HELD(&zv->zv_state_lock));
735 ASSERT(RW_LOCK_HELD(&zv->zv_suspend_lock));
738 zv->zv_flags &= ~ZVOL_WRITTEN_TO;
740 error = dsl_prop_get_integer(zv->zv_name, "readonly", &ro, NULL);
742 return (SET_ERROR(error));
744 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
746 return (SET_ERROR(error));
748 error = dnode_hold(os, ZVOL_OBJ, zv, &zv->zv_dn);
750 return (SET_ERROR(error));
752 zvol_os_set_capacity(zv, volsize >> 9);
753 zv->zv_volsize = volsize;
755 if (ro || dmu_objset_is_snapshot(os) ||
756 !spa_writeable(dmu_objset_spa(os))) {
757 zvol_os_set_disk_ro(zv, 1);
758 zv->zv_flags |= ZVOL_RDONLY;
760 zvol_os_set_disk_ro(zv, 0);
761 zv->zv_flags &= ~ZVOL_RDONLY;
767 * Shutdown every zv_objset related stuff except zv_objset itself.
768 * The is the reverse of zvol_setup_zv.
771 zvol_shutdown_zv(zvol_state_t *zv)
773 ASSERT(MUTEX_HELD(&zv->zv_state_lock) &&
774 RW_LOCK_HELD(&zv->zv_suspend_lock));
776 if (zv->zv_flags & ZVOL_WRITTEN_TO) {
777 ASSERT(zv->zv_zilog != NULL);
778 zil_close(zv->zv_zilog);
783 dnode_rele(zv->zv_dn, zv);
787 * Evict cached data. We must write out any dirty data before
788 * disowning the dataset.
790 if (zv->zv_flags & ZVOL_WRITTEN_TO)
791 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
792 (void) dmu_objset_evict_dbufs(zv->zv_objset);
796 * return the proper tag for rollback and recv
799 zvol_tag(zvol_state_t *zv)
801 ASSERT(RW_WRITE_HELD(&zv->zv_suspend_lock));
802 return (zv->zv_open_count > 0 ? zv : NULL);
806 * Suspend the zvol for recv and rollback.
809 zvol_suspend(const char *name)
813 zv = zvol_find_by_name(name, RW_WRITER);
818 /* block all I/O, release in zvol_resume. */
819 ASSERT(MUTEX_HELD(&zv->zv_state_lock));
820 ASSERT(RW_WRITE_HELD(&zv->zv_suspend_lock));
822 atomic_inc(&zv->zv_suspend_ref);
824 if (zv->zv_open_count > 0)
825 zvol_shutdown_zv(zv);
828 * do not hold zv_state_lock across suspend/resume to
829 * avoid locking up zvol lookups
831 mutex_exit(&zv->zv_state_lock);
833 /* zv_suspend_lock is released in zvol_resume() */
838 zvol_resume(zvol_state_t *zv)
842 ASSERT(RW_WRITE_HELD(&zv->zv_suspend_lock));
844 mutex_enter(&zv->zv_state_lock);
846 if (zv->zv_open_count > 0) {
847 VERIFY0(dmu_objset_hold(zv->zv_name, zv, &zv->zv_objset));
848 VERIFY3P(zv->zv_objset->os_dsl_dataset->ds_owner, ==, zv);
849 VERIFY(dsl_dataset_long_held(zv->zv_objset->os_dsl_dataset));
850 dmu_objset_rele(zv->zv_objset, zv);
852 error = zvol_setup_zv(zv);
855 mutex_exit(&zv->zv_state_lock);
857 rw_exit(&zv->zv_suspend_lock);
859 * We need this because we don't hold zvol_state_lock while releasing
860 * zv_suspend_lock. zvol_remove_minors_impl thus cannot check
861 * zv_suspend_lock to determine it is safe to free because rwlock is
862 * not inherent atomic.
864 atomic_dec(&zv->zv_suspend_ref);
866 return (SET_ERROR(error));
870 zvol_first_open(zvol_state_t *zv, boolean_t readonly)
875 ASSERT(RW_READ_HELD(&zv->zv_suspend_lock));
876 ASSERT(MUTEX_HELD(&zv->zv_state_lock));
877 ASSERT(mutex_owned(&spa_namespace_lock));
879 boolean_t ro = (readonly || (strchr(zv->zv_name, '@') != NULL));
880 error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, ro, B_TRUE, zv, &os);
882 return (SET_ERROR(error));
886 error = zvol_setup_zv(zv);
888 dmu_objset_disown(os, 1, zv);
889 zv->zv_objset = NULL;
896 zvol_last_close(zvol_state_t *zv)
898 ASSERT(RW_READ_HELD(&zv->zv_suspend_lock));
899 ASSERT(MUTEX_HELD(&zv->zv_state_lock));
901 zvol_shutdown_zv(zv);
903 dmu_objset_disown(zv->zv_objset, 1, zv);
904 zv->zv_objset = NULL;
907 typedef struct minors_job {
917 * Prefetch zvol dnodes for the minors_job
920 zvol_prefetch_minors_impl(void *arg)
922 minors_job_t *job = arg;
923 char *dsname = job->name;
926 job->error = dmu_objset_own(dsname, DMU_OST_ZVOL, B_TRUE, B_TRUE,
928 if (job->error == 0) {
929 dmu_prefetch(os, ZVOL_OBJ, 0, 0, 0, ZIO_PRIORITY_SYNC_READ);
930 dmu_objset_disown(os, B_TRUE, FTAG);
935 * Mask errors to continue dmu_objset_find() traversal
938 zvol_create_snap_minor_cb(const char *dsname, void *arg)
940 minors_job_t *j = arg;
941 list_t *minors_list = j->list;
942 const char *name = j->name;
944 ASSERT0(MUTEX_HELD(&spa_namespace_lock));
946 /* skip the designated dataset */
947 if (name && strcmp(dsname, name) == 0)
950 /* at this point, the dsname should name a snapshot */
951 if (strchr(dsname, '@') == 0) {
952 dprintf("zvol_create_snap_minor_cb(): "
953 "%s is not a snapshot name\n", dsname);
956 char *n = kmem_strdup(dsname);
960 job = kmem_alloc(sizeof (minors_job_t), KM_SLEEP);
962 job->list = minors_list;
964 list_insert_tail(minors_list, job);
965 /* don't care if dispatch fails, because job->error is 0 */
966 taskq_dispatch(system_taskq, zvol_prefetch_minors_impl, job,
974 * If spa_keystore_load_wkey() is called for an encrypted zvol,
975 * we need to look for any clones also using the key. This function
976 * is "best effort" - so we just skip over it if there are failures.
979 zvol_add_clones(const char *dsname, list_t *minors_list)
981 /* Also check if it has clones */
982 dsl_dir_t *dd = NULL;
983 dsl_pool_t *dp = NULL;
985 if (dsl_pool_hold(dsname, FTAG, &dp) != 0)
988 if (!spa_feature_is_enabled(dp->dp_spa,
989 SPA_FEATURE_ENCRYPTION))
992 if (dsl_dir_hold(dp, dsname, FTAG, &dd, NULL) != 0)
995 if (dsl_dir_phys(dd)->dd_clones == 0)
998 zap_cursor_t *zc = kmem_alloc(sizeof (zap_cursor_t), KM_SLEEP);
999 zap_attribute_t *za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
1000 objset_t *mos = dd->dd_pool->dp_meta_objset;
1002 for (zap_cursor_init(zc, mos, dsl_dir_phys(dd)->dd_clones);
1003 zap_cursor_retrieve(zc, za) == 0;
1004 zap_cursor_advance(zc)) {
1005 dsl_dataset_t *clone;
1008 if (dsl_dataset_hold_obj(dd->dd_pool,
1009 za->za_first_integer, FTAG, &clone) == 0) {
1011 char name[ZFS_MAX_DATASET_NAME_LEN];
1012 dsl_dataset_name(clone, name);
1014 char *n = kmem_strdup(name);
1015 job = kmem_alloc(sizeof (minors_job_t), KM_SLEEP);
1017 job->list = minors_list;
1019 list_insert_tail(minors_list, job);
1021 dsl_dataset_rele(clone, FTAG);
1024 zap_cursor_fini(zc);
1025 kmem_free(za, sizeof (zap_attribute_t));
1026 kmem_free(zc, sizeof (zap_cursor_t));
1030 dsl_dir_rele(dd, FTAG);
1031 dsl_pool_rele(dp, FTAG);
1035 * Mask errors to continue dmu_objset_find() traversal
1038 zvol_create_minors_cb(const char *dsname, void *arg)
1042 list_t *minors_list = arg;
1044 ASSERT0(MUTEX_HELD(&spa_namespace_lock));
1046 error = dsl_prop_get_integer(dsname, "snapdev", &snapdev, NULL);
1051 * Given the name and the 'snapdev' property, create device minor nodes
1052 * with the linkages to zvols/snapshots as needed.
1053 * If the name represents a zvol, create a minor node for the zvol, then
1054 * check if its snapshots are 'visible', and if so, iterate over the
1055 * snapshots and create device minor nodes for those.
1057 if (strchr(dsname, '@') == 0) {
1059 char *n = kmem_strdup(dsname);
1063 job = kmem_alloc(sizeof (minors_job_t), KM_SLEEP);
1065 job->list = minors_list;
1067 list_insert_tail(minors_list, job);
1068 /* don't care if dispatch fails, because job->error is 0 */
1069 taskq_dispatch(system_taskq, zvol_prefetch_minors_impl, job,
1072 zvol_add_clones(dsname, minors_list);
1074 if (snapdev == ZFS_SNAPDEV_VISIBLE) {
1076 * traverse snapshots only, do not traverse children,
1077 * and skip the 'dsname'
1079 (void) dmu_objset_find(dsname,
1080 zvol_create_snap_minor_cb, (void *)job,
1084 dprintf("zvol_create_minors_cb(): %s is not a zvol name\n",
1092 * Create minors for the specified dataset, including children and snapshots.
1093 * Pay attention to the 'snapdev' property and iterate over the snapshots
1094 * only if they are 'visible'. This approach allows one to assure that the
1095 * snapshot metadata is read from disk only if it is needed.
1097 * The name can represent a dataset to be recursively scanned for zvols and
1098 * their snapshots, or a single zvol snapshot. If the name represents a
1099 * dataset, the scan is performed in two nested stages:
1100 * - scan the dataset for zvols, and
1101 * - for each zvol, create a minor node, then check if the zvol's snapshots
1102 * are 'visible', and only then iterate over the snapshots if needed
1104 * If the name represents a snapshot, a check is performed if the snapshot is
1105 * 'visible' (which also verifies that the parent is a zvol), and if so,
1106 * a minor node for that snapshot is created.
1109 zvol_create_minors_recursive(const char *name)
1114 if (zvol_inhibit_dev)
1118 * This is the list for prefetch jobs. Whenever we found a match
1119 * during dmu_objset_find, we insert a minors_job to the list and do
1120 * taskq_dispatch to parallel prefetch zvol dnodes. Note we don't need
1121 * any lock because all list operation is done on the current thread.
1123 * We will use this list to do zvol_os_create_minor after prefetch
1124 * so we don't have to traverse using dmu_objset_find again.
1126 list_create(&minors_list, sizeof (minors_job_t),
1127 offsetof(minors_job_t, link));
1130 if (strchr(name, '@') != NULL) {
1133 int error = dsl_prop_get_integer(name, "snapdev",
1136 if (error == 0 && snapdev == ZFS_SNAPDEV_VISIBLE)
1137 (void) zvol_os_create_minor(name);
1139 fstrans_cookie_t cookie = spl_fstrans_mark();
1140 (void) dmu_objset_find(name, zvol_create_minors_cb,
1141 &minors_list, DS_FIND_CHILDREN);
1142 spl_fstrans_unmark(cookie);
1145 taskq_wait_outstanding(system_taskq, 0);
1148 * Prefetch is completed, we can do zvol_os_create_minor
1151 while ((job = list_head(&minors_list)) != NULL) {
1152 list_remove(&minors_list, job);
1154 (void) zvol_os_create_minor(job->name);
1155 kmem_strfree(job->name);
1156 kmem_free(job, sizeof (minors_job_t));
1159 list_destroy(&minors_list);
1163 zvol_create_minor(const char *name)
1166 * Note: the dsl_pool_config_lock must not be held.
1167 * Minor node creation needs to obtain the zvol_state_lock.
1168 * zvol_open() obtains the zvol_state_lock and then the dsl pool
1169 * config lock. Therefore, we can't have the config lock now if
1170 * we are going to wait for the zvol_state_lock, because it
1171 * would be a lock order inversion which could lead to deadlock.
1174 if (zvol_inhibit_dev)
1177 if (strchr(name, '@') != NULL) {
1180 int error = dsl_prop_get_integer(name,
1181 "snapdev", &snapdev, NULL);
1183 if (error == 0 && snapdev == ZFS_SNAPDEV_VISIBLE)
1184 (void) zvol_os_create_minor(name);
1186 (void) zvol_os_create_minor(name);
1191 * Remove minors for specified dataset including children and snapshots.
1195 zvol_free_task(void *arg)
1201 zvol_remove_minors_impl(const char *name)
1203 zvol_state_t *zv, *zv_next;
1204 int namelen = ((name) ? strlen(name) : 0);
1208 if (zvol_inhibit_dev)
1211 list_create(&free_list, sizeof (zvol_state_t),
1212 offsetof(zvol_state_t, zv_next));
1214 rw_enter(&zvol_state_lock, RW_WRITER);
1216 for (zv = list_head(&zvol_state_list); zv != NULL; zv = zv_next) {
1217 zv_next = list_next(&zvol_state_list, zv);
1219 mutex_enter(&zv->zv_state_lock);
1220 if (name == NULL || strcmp(zv->zv_name, name) == 0 ||
1221 (strncmp(zv->zv_name, name, namelen) == 0 &&
1222 (zv->zv_name[namelen] == '/' ||
1223 zv->zv_name[namelen] == '@'))) {
1225 * By holding zv_state_lock here, we guarantee that no
1226 * one is currently using this zv
1229 /* If in use, leave alone */
1230 if (zv->zv_open_count > 0 ||
1231 atomic_read(&zv->zv_suspend_ref)) {
1232 mutex_exit(&zv->zv_state_lock);
1239 * Cleared while holding zvol_state_lock as a writer
1240 * which will prevent zvol_open() from opening it.
1242 zvol_os_clear_private(zv);
1244 /* Drop zv_state_lock before zvol_free() */
1245 mutex_exit(&zv->zv_state_lock);
1247 /* Try parallel zv_free, if failed do it in place */
1248 t = taskq_dispatch(system_taskq, zvol_free_task, zv,
1250 if (t == TASKQID_INVALID)
1251 list_insert_head(&free_list, zv);
1253 mutex_exit(&zv->zv_state_lock);
1256 rw_exit(&zvol_state_lock);
1258 /* Drop zvol_state_lock before calling zvol_free() */
1259 while ((zv = list_head(&free_list)) != NULL) {
1260 list_remove(&free_list, zv);
1265 /* Remove minor for this specific volume only */
1267 zvol_remove_minor_impl(const char *name)
1269 zvol_state_t *zv = NULL, *zv_next;
1271 if (zvol_inhibit_dev)
1274 rw_enter(&zvol_state_lock, RW_WRITER);
1276 for (zv = list_head(&zvol_state_list); zv != NULL; zv = zv_next) {
1277 zv_next = list_next(&zvol_state_list, zv);
1279 mutex_enter(&zv->zv_state_lock);
1280 if (strcmp(zv->zv_name, name) == 0) {
1282 * By holding zv_state_lock here, we guarantee that no
1283 * one is currently using this zv
1286 /* If in use, leave alone */
1287 if (zv->zv_open_count > 0 ||
1288 atomic_read(&zv->zv_suspend_ref)) {
1289 mutex_exit(&zv->zv_state_lock);
1294 zvol_os_clear_private(zv);
1295 mutex_exit(&zv->zv_state_lock);
1298 mutex_exit(&zv->zv_state_lock);
1302 /* Drop zvol_state_lock before calling zvol_free() */
1303 rw_exit(&zvol_state_lock);
1310 * Rename minors for specified dataset including children and snapshots.
1313 zvol_rename_minors_impl(const char *oldname, const char *newname)
1315 zvol_state_t *zv, *zv_next;
1318 if (zvol_inhibit_dev)
1321 oldnamelen = strlen(oldname);
1323 rw_enter(&zvol_state_lock, RW_READER);
1325 for (zv = list_head(&zvol_state_list); zv != NULL; zv = zv_next) {
1326 zv_next = list_next(&zvol_state_list, zv);
1328 mutex_enter(&zv->zv_state_lock);
1330 if (strcmp(zv->zv_name, oldname) == 0) {
1331 zvol_os_rename_minor(zv, newname);
1332 } else if (strncmp(zv->zv_name, oldname, oldnamelen) == 0 &&
1333 (zv->zv_name[oldnamelen] == '/' ||
1334 zv->zv_name[oldnamelen] == '@')) {
1335 char *name = kmem_asprintf("%s%c%s", newname,
1336 zv->zv_name[oldnamelen],
1337 zv->zv_name + oldnamelen + 1);
1338 zvol_os_rename_minor(zv, name);
1342 mutex_exit(&zv->zv_state_lock);
1345 rw_exit(&zvol_state_lock);
1348 typedef struct zvol_snapdev_cb_arg {
1350 } zvol_snapdev_cb_arg_t;
1353 zvol_set_snapdev_cb(const char *dsname, void *param)
1355 zvol_snapdev_cb_arg_t *arg = param;
1357 if (strchr(dsname, '@') == NULL)
1360 switch (arg->snapdev) {
1361 case ZFS_SNAPDEV_VISIBLE:
1362 (void) zvol_os_create_minor(dsname);
1364 case ZFS_SNAPDEV_HIDDEN:
1365 (void) zvol_remove_minor_impl(dsname);
1373 zvol_set_snapdev_impl(char *name, uint64_t snapdev)
1375 zvol_snapdev_cb_arg_t arg = {snapdev};
1376 fstrans_cookie_t cookie = spl_fstrans_mark();
1378 * The zvol_set_snapdev_sync() sets snapdev appropriately
1379 * in the dataset hierarchy. Here, we only scan snapshots.
1381 dmu_objset_find(name, zvol_set_snapdev_cb, &arg, DS_FIND_SNAPSHOTS);
1382 spl_fstrans_unmark(cookie);
1386 zvol_set_volmode_impl(char *name, uint64_t volmode)
1388 fstrans_cookie_t cookie;
1389 uint64_t old_volmode;
1392 if (strchr(name, '@') != NULL)
1396 * It's unfortunate we need to remove minors before we create new ones:
1397 * this is necessary because our backing gendisk (zvol_state->zv_disk)
1398 * could be different when we set, for instance, volmode from "geom"
1399 * to "dev" (or vice versa).
1401 zv = zvol_find_by_name(name, RW_NONE);
1402 if (zv == NULL && volmode == ZFS_VOLMODE_NONE)
1405 old_volmode = zv->zv_volmode;
1406 mutex_exit(&zv->zv_state_lock);
1407 if (old_volmode == volmode)
1409 zvol_wait_close(zv);
1411 cookie = spl_fstrans_mark();
1413 case ZFS_VOLMODE_NONE:
1414 (void) zvol_remove_minor_impl(name);
1416 case ZFS_VOLMODE_GEOM:
1417 case ZFS_VOLMODE_DEV:
1418 (void) zvol_remove_minor_impl(name);
1419 (void) zvol_os_create_minor(name);
1421 case ZFS_VOLMODE_DEFAULT:
1422 (void) zvol_remove_minor_impl(name);
1423 if (zvol_volmode == ZFS_VOLMODE_NONE)
1425 else /* if zvol_volmode is invalid defaults to "geom" */
1426 (void) zvol_os_create_minor(name);
1429 spl_fstrans_unmark(cookie);
1432 static zvol_task_t *
1433 zvol_task_alloc(zvol_async_op_t op, const char *name1, const char *name2,
1438 /* Never allow tasks on hidden names. */
1439 if (name1[0] == '$')
1442 task = kmem_zalloc(sizeof (zvol_task_t), KM_SLEEP);
1444 task->value = value;
1446 strlcpy(task->name1, name1, MAXNAMELEN);
1448 strlcpy(task->name2, name2, MAXNAMELEN);
1454 zvol_task_free(zvol_task_t *task)
1456 kmem_free(task, sizeof (zvol_task_t));
1460 * The worker thread function performed asynchronously.
1463 zvol_task_cb(void *arg)
1465 zvol_task_t *task = arg;
1468 case ZVOL_ASYNC_REMOVE_MINORS:
1469 zvol_remove_minors_impl(task->name1);
1471 case ZVOL_ASYNC_RENAME_MINORS:
1472 zvol_rename_minors_impl(task->name1, task->name2);
1474 case ZVOL_ASYNC_SET_SNAPDEV:
1475 zvol_set_snapdev_impl(task->name1, task->value);
1477 case ZVOL_ASYNC_SET_VOLMODE:
1478 zvol_set_volmode_impl(task->name1, task->value);
1485 zvol_task_free(task);
1488 typedef struct zvol_set_prop_int_arg {
1489 const char *zsda_name;
1490 uint64_t zsda_value;
1491 zprop_source_t zsda_source;
1493 } zvol_set_prop_int_arg_t;
1496 * Sanity check the dataset for safe use by the sync task. No additional
1497 * conditions are imposed.
1500 zvol_set_snapdev_check(void *arg, dmu_tx_t *tx)
1502 zvol_set_prop_int_arg_t *zsda = arg;
1503 dsl_pool_t *dp = dmu_tx_pool(tx);
1507 error = dsl_dir_hold(dp, zsda->zsda_name, FTAG, &dd, NULL);
1511 dsl_dir_rele(dd, FTAG);
1517 zvol_set_snapdev_sync_cb(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
1520 char dsname[MAXNAMELEN];
1524 dsl_dataset_name(ds, dsname);
1525 if (dsl_prop_get_int_ds(ds, "snapdev", &snapdev) != 0)
1527 task = zvol_task_alloc(ZVOL_ASYNC_SET_SNAPDEV, dsname, NULL, snapdev);
1531 (void) taskq_dispatch(dp->dp_spa->spa_zvol_taskq, zvol_task_cb,
1537 * Traverse all child datasets and apply snapdev appropriately.
1538 * We call dsl_prop_set_sync_impl() here to set the value only on the toplevel
1539 * dataset and read the effective "snapdev" on every child in the callback
1540 * function: this is because the value is not guaranteed to be the same in the
1541 * whole dataset hierarchy.
1544 zvol_set_snapdev_sync(void *arg, dmu_tx_t *tx)
1546 zvol_set_prop_int_arg_t *zsda = arg;
1547 dsl_pool_t *dp = dmu_tx_pool(tx);
1552 VERIFY0(dsl_dir_hold(dp, zsda->zsda_name, FTAG, &dd, NULL));
1555 error = dsl_dataset_hold(dp, zsda->zsda_name, FTAG, &ds);
1557 dsl_prop_set_sync_impl(ds, zfs_prop_to_name(ZFS_PROP_SNAPDEV),
1558 zsda->zsda_source, sizeof (zsda->zsda_value), 1,
1559 &zsda->zsda_value, zsda->zsda_tx);
1560 dsl_dataset_rele(ds, FTAG);
1562 dmu_objset_find_dp(dp, dd->dd_object, zvol_set_snapdev_sync_cb,
1563 zsda, DS_FIND_CHILDREN);
1565 dsl_dir_rele(dd, FTAG);
1569 zvol_set_snapdev(const char *ddname, zprop_source_t source, uint64_t snapdev)
1571 zvol_set_prop_int_arg_t zsda;
1573 zsda.zsda_name = ddname;
1574 zsda.zsda_source = source;
1575 zsda.zsda_value = snapdev;
1577 return (dsl_sync_task(ddname, zvol_set_snapdev_check,
1578 zvol_set_snapdev_sync, &zsda, 0, ZFS_SPACE_CHECK_NONE));
1582 * Sanity check the dataset for safe use by the sync task. No additional
1583 * conditions are imposed.
1586 zvol_set_volmode_check(void *arg, dmu_tx_t *tx)
1588 zvol_set_prop_int_arg_t *zsda = arg;
1589 dsl_pool_t *dp = dmu_tx_pool(tx);
1593 error = dsl_dir_hold(dp, zsda->zsda_name, FTAG, &dd, NULL);
1597 dsl_dir_rele(dd, FTAG);
1603 zvol_set_volmode_sync_cb(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
1606 char dsname[MAXNAMELEN];
1610 dsl_dataset_name(ds, dsname);
1611 if (dsl_prop_get_int_ds(ds, "volmode", &volmode) != 0)
1613 task = zvol_task_alloc(ZVOL_ASYNC_SET_VOLMODE, dsname, NULL, volmode);
1617 (void) taskq_dispatch(dp->dp_spa->spa_zvol_taskq, zvol_task_cb,
1623 * Traverse all child datasets and apply volmode appropriately.
1624 * We call dsl_prop_set_sync_impl() here to set the value only on the toplevel
1625 * dataset and read the effective "volmode" on every child in the callback
1626 * function: this is because the value is not guaranteed to be the same in the
1627 * whole dataset hierarchy.
1630 zvol_set_volmode_sync(void *arg, dmu_tx_t *tx)
1632 zvol_set_prop_int_arg_t *zsda = arg;
1633 dsl_pool_t *dp = dmu_tx_pool(tx);
1638 VERIFY0(dsl_dir_hold(dp, zsda->zsda_name, FTAG, &dd, NULL));
1641 error = dsl_dataset_hold(dp, zsda->zsda_name, FTAG, &ds);
1643 dsl_prop_set_sync_impl(ds, zfs_prop_to_name(ZFS_PROP_VOLMODE),
1644 zsda->zsda_source, sizeof (zsda->zsda_value), 1,
1645 &zsda->zsda_value, zsda->zsda_tx);
1646 dsl_dataset_rele(ds, FTAG);
1649 dmu_objset_find_dp(dp, dd->dd_object, zvol_set_volmode_sync_cb,
1650 zsda, DS_FIND_CHILDREN);
1652 dsl_dir_rele(dd, FTAG);
1656 zvol_set_volmode(const char *ddname, zprop_source_t source, uint64_t volmode)
1658 zvol_set_prop_int_arg_t zsda;
1660 zsda.zsda_name = ddname;
1661 zsda.zsda_source = source;
1662 zsda.zsda_value = volmode;
1664 return (dsl_sync_task(ddname, zvol_set_volmode_check,
1665 zvol_set_volmode_sync, &zsda, 0, ZFS_SPACE_CHECK_NONE));
1669 zvol_remove_minors(spa_t *spa, const char *name, boolean_t async)
1674 task = zvol_task_alloc(ZVOL_ASYNC_REMOVE_MINORS, name, NULL, ~0ULL);
1678 id = taskq_dispatch(spa->spa_zvol_taskq, zvol_task_cb, task, TQ_SLEEP);
1679 if ((async == B_FALSE) && (id != TASKQID_INVALID))
1680 taskq_wait_id(spa->spa_zvol_taskq, id);
1684 zvol_rename_minors(spa_t *spa, const char *name1, const char *name2,
1690 task = zvol_task_alloc(ZVOL_ASYNC_RENAME_MINORS, name1, name2, ~0ULL);
1694 id = taskq_dispatch(spa->spa_zvol_taskq, zvol_task_cb, task, TQ_SLEEP);
1695 if ((async == B_FALSE) && (id != TASKQID_INVALID))
1696 taskq_wait_id(spa->spa_zvol_taskq, id);
1700 zvol_is_zvol(const char *name)
1703 return (zvol_os_is_zvol(name));
1707 zvol_init_impl(void)
1711 list_create(&zvol_state_list, sizeof (zvol_state_t),
1712 offsetof(zvol_state_t, zv_next));
1713 rw_init(&zvol_state_lock, NULL, RW_DEFAULT, NULL);
1715 zvol_htable = kmem_alloc(ZVOL_HT_SIZE * sizeof (struct hlist_head),
1717 for (i = 0; i < ZVOL_HT_SIZE; i++)
1718 INIT_HLIST_HEAD(&zvol_htable[i]);
1724 zvol_fini_impl(void)
1726 zvol_remove_minors_impl(NULL);
1729 * The call to "zvol_remove_minors_impl" may dispatch entries to
1730 * the system_taskq, but it doesn't wait for those entries to
1731 * complete before it returns. Thus, we must wait for all of the
1732 * removals to finish, before we can continue.
1734 taskq_wait_outstanding(system_taskq, 0);
1736 kmem_free(zvol_htable, ZVOL_HT_SIZE * sizeof (struct hlist_head));
1737 list_destroy(&zvol_state_list);
1738 rw_destroy(&zvol_state_lock);