4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2012, 2020 by Delphix. All rights reserved.
25 #include <sys/dataset_kstats.h>
27 #include <sys/dmu_traverse.h>
28 #include <sys/dsl_dataset.h>
29 #include <sys/dsl_prop.h>
30 #include <sys/dsl_dir.h>
32 #include <sys/zfeature.h>
33 #include <sys/zil_impl.h>
34 #include <sys/dmu_tx.h>
36 #include <sys/zfs_rlock.h>
37 #include <sys/spa_impl.h>
39 #include <sys/zvol_impl.h>
41 #include <linux/blkdev_compat.h>
42 #include <linux/task_io_accounting_ops.h>
44 unsigned int zvol_major = ZVOL_MAJOR;
45 unsigned int zvol_request_sync = 0;
46 unsigned int zvol_prefetch_bytes = (128 * 1024);
47 unsigned long zvol_max_discard_blocks = 16384;
48 unsigned int zvol_threads = 32;
49 unsigned int zvol_open_timeout_ms = 1000;
51 struct zvol_state_os {
52 struct gendisk *zvo_disk; /* generic disk */
53 struct request_queue *zvo_queue; /* request queue */
54 dev_t zvo_dev; /* device id */
58 static struct ida zvol_ida;
60 typedef struct zv_request_stack {
65 typedef struct zv_request_task {
70 static zv_request_task_t *
71 zv_request_task_create(zv_request_t zvr)
73 zv_request_task_t *task;
74 task = kmem_alloc(sizeof (zv_request_task_t), KM_SLEEP);
75 taskq_init_ent(&task->ent);
81 zv_request_task_free(zv_request_task_t *task)
83 kmem_free(task, sizeof (*task));
87 * Given a path, return TRUE if path is a ZVOL.
90 zvol_is_zvol_impl(const char *path)
94 if (vdev_lookup_bdev(path, &dev) != 0)
97 if (MAJOR(dev) == zvol_major)
104 zvol_write(zv_request_t *zvr)
106 struct bio *bio = zvr->bio;
110 zfs_uio_bvec_init(&uio, bio);
112 zvol_state_t *zv = zvr->zv;
113 ASSERT3P(zv, !=, NULL);
114 ASSERT3U(zv->zv_open_count, >, 0);
115 ASSERT3P(zv->zv_zilog, !=, NULL);
117 /* bio marked as FLUSH need to flush before write */
118 if (bio_is_flush(bio))
119 zil_commit(zv->zv_zilog, ZVOL_OBJ);
121 /* Some requests are just for flush and nothing else. */
122 if (uio.uio_resid == 0) {
123 rw_exit(&zv->zv_suspend_lock);
128 struct request_queue *q = zv->zv_zso->zvo_queue;
129 struct gendisk *disk = zv->zv_zso->zvo_disk;
130 ssize_t start_resid = uio.uio_resid;
131 unsigned long start_time;
133 boolean_t acct = blk_queue_io_stat(q);
135 start_time = blk_generic_start_io_acct(q, disk, WRITE, bio);
138 bio_is_fua(bio) || zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS;
140 zfs_locked_range_t *lr = zfs_rangelock_enter(&zv->zv_rangelock,
141 uio.uio_loffset, uio.uio_resid, RL_WRITER);
143 uint64_t volsize = zv->zv_volsize;
144 while (uio.uio_resid > 0 && uio.uio_loffset < volsize) {
145 uint64_t bytes = MIN(uio.uio_resid, DMU_MAX_ACCESS >> 1);
146 uint64_t off = uio.uio_loffset;
147 dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
149 if (bytes > volsize - off) /* don't write past the end */
150 bytes = volsize - off;
152 dmu_tx_hold_write_by_dnode(tx, zv->zv_dn, off, bytes);
154 /* This will only fail for ENOSPC */
155 error = dmu_tx_assign(tx, TXG_WAIT);
160 error = dmu_write_uio_dnode(zv->zv_dn, &uio, bytes, tx);
162 zvol_log_write(zv, tx, off, bytes, sync);
169 zfs_rangelock_exit(lr);
171 int64_t nwritten = start_resid - uio.uio_resid;
172 dataset_kstats_update_write_kstats(&zv->zv_kstat, nwritten);
173 task_io_account_write(nwritten);
176 zil_commit(zv->zv_zilog, ZVOL_OBJ);
178 rw_exit(&zv->zv_suspend_lock);
181 blk_generic_end_io_acct(q, disk, WRITE, bio, start_time);
183 BIO_END_IO(bio, -error);
187 zvol_write_task(void *arg)
189 zv_request_task_t *task = arg;
190 zvol_write(&task->zvr);
191 zv_request_task_free(task);
195 zvol_discard(zv_request_t *zvr)
197 struct bio *bio = zvr->bio;
198 zvol_state_t *zv = zvr->zv;
199 uint64_t start = BIO_BI_SECTOR(bio) << 9;
200 uint64_t size = BIO_BI_SIZE(bio);
201 uint64_t end = start + size;
206 ASSERT3P(zv, !=, NULL);
207 ASSERT3U(zv->zv_open_count, >, 0);
208 ASSERT3P(zv->zv_zilog, !=, NULL);
210 struct request_queue *q = zv->zv_zso->zvo_queue;
211 struct gendisk *disk = zv->zv_zso->zvo_disk;
212 unsigned long start_time;
214 boolean_t acct = blk_queue_io_stat(q);
216 start_time = blk_generic_start_io_acct(q, disk, WRITE, bio);
218 sync = bio_is_fua(bio) || zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS;
220 if (end > zv->zv_volsize) {
221 error = SET_ERROR(EIO);
226 * Align the request to volume block boundaries when a secure erase is
227 * not required. This will prevent dnode_free_range() from zeroing out
228 * the unaligned parts which is slow (read-modify-write) and useless
229 * since we are not freeing any space by doing so.
231 if (!bio_is_secure_erase(bio)) {
232 start = P2ROUNDUP(start, zv->zv_volblocksize);
233 end = P2ALIGN(end, zv->zv_volblocksize);
240 zfs_locked_range_t *lr = zfs_rangelock_enter(&zv->zv_rangelock,
241 start, size, RL_WRITER);
243 tx = dmu_tx_create(zv->zv_objset);
244 dmu_tx_mark_netfree(tx);
245 error = dmu_tx_assign(tx, TXG_WAIT);
249 zvol_log_truncate(zv, tx, start, size, B_TRUE);
251 error = dmu_free_long_range(zv->zv_objset,
252 ZVOL_OBJ, start, size);
254 zfs_rangelock_exit(lr);
256 if (error == 0 && sync)
257 zil_commit(zv->zv_zilog, ZVOL_OBJ);
260 rw_exit(&zv->zv_suspend_lock);
263 blk_generic_end_io_acct(q, disk, WRITE, bio, start_time);
265 BIO_END_IO(bio, -error);
269 zvol_discard_task(void *arg)
271 zv_request_task_t *task = arg;
272 zvol_discard(&task->zvr);
273 zv_request_task_free(task);
277 zvol_read(zv_request_t *zvr)
279 struct bio *bio = zvr->bio;
283 zfs_uio_bvec_init(&uio, bio);
285 zvol_state_t *zv = zvr->zv;
286 ASSERT3P(zv, !=, NULL);
287 ASSERT3U(zv->zv_open_count, >, 0);
289 struct request_queue *q = zv->zv_zso->zvo_queue;
290 struct gendisk *disk = zv->zv_zso->zvo_disk;
291 ssize_t start_resid = uio.uio_resid;
292 unsigned long start_time;
294 boolean_t acct = blk_queue_io_stat(q);
296 start_time = blk_generic_start_io_acct(q, disk, READ, bio);
298 zfs_locked_range_t *lr = zfs_rangelock_enter(&zv->zv_rangelock,
299 uio.uio_loffset, uio.uio_resid, RL_READER);
301 uint64_t volsize = zv->zv_volsize;
302 while (uio.uio_resid > 0 && uio.uio_loffset < volsize) {
303 uint64_t bytes = MIN(uio.uio_resid, DMU_MAX_ACCESS >> 1);
305 /* don't read past the end */
306 if (bytes > volsize - uio.uio_loffset)
307 bytes = volsize - uio.uio_loffset;
309 error = dmu_read_uio_dnode(zv->zv_dn, &uio, bytes);
311 /* convert checksum errors into IO errors */
313 error = SET_ERROR(EIO);
317 zfs_rangelock_exit(lr);
319 int64_t nread = start_resid - uio.uio_resid;
320 dataset_kstats_update_read_kstats(&zv->zv_kstat, nread);
321 task_io_account_read(nread);
323 rw_exit(&zv->zv_suspend_lock);
326 blk_generic_end_io_acct(q, disk, READ, bio, start_time);
328 BIO_END_IO(bio, -error);
332 zvol_read_task(void *arg)
334 zv_request_task_t *task = arg;
335 zvol_read(&task->zvr);
336 zv_request_task_free(task);
339 #ifdef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
340 #ifdef HAVE_BDEV_SUBMIT_BIO_RETURNS_VOID
342 zvol_submit_bio(struct bio *bio)
345 zvol_submit_bio(struct bio *bio)
348 static MAKE_REQUEST_FN_RET
349 zvol_request(struct request_queue *q, struct bio *bio)
352 #ifdef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
353 #if defined(HAVE_BIO_BDEV_DISK)
354 struct request_queue *q = bio->bi_bdev->bd_disk->queue;
356 struct request_queue *q = bio->bi_disk->queue;
359 zvol_state_t *zv = q->queuedata;
360 fstrans_cookie_t cookie = spl_fstrans_mark();
361 uint64_t offset = BIO_BI_SECTOR(bio) << 9;
362 uint64_t size = BIO_BI_SIZE(bio);
363 int rw = bio_data_dir(bio);
365 if (bio_has_data(bio) && offset + size > zv->zv_volsize) {
367 "%s: bad access: offset=%llu, size=%lu\n",
368 zv->zv_zso->zvo_disk->disk_name,
369 (long long unsigned)offset,
370 (long unsigned)size);
372 BIO_END_IO(bio, -SET_ERROR(EIO));
380 zv_request_task_t *task;
383 if (unlikely(zv->zv_flags & ZVOL_RDONLY)) {
384 BIO_END_IO(bio, -SET_ERROR(EROFS));
389 * Prevents the zvol from being suspended, or the ZIL being
390 * concurrently opened. Will be released after the i/o
393 rw_enter(&zv->zv_suspend_lock, RW_READER);
396 * Open a ZIL if this is the first time we have written to this
397 * zvol. We protect zv->zv_zilog with zv_suspend_lock rather
398 * than zv_state_lock so that we don't need to acquire an
399 * additional lock in this path.
401 if (zv->zv_zilog == NULL) {
402 rw_exit(&zv->zv_suspend_lock);
403 rw_enter(&zv->zv_suspend_lock, RW_WRITER);
404 if (zv->zv_zilog == NULL) {
405 zv->zv_zilog = zil_open(zv->zv_objset,
407 zv->zv_flags |= ZVOL_WRITTEN_TO;
408 /* replay / destroy done in zvol_create_minor */
409 VERIFY0((zv->zv_zilog->zl_header->zh_flags &
412 rw_downgrade(&zv->zv_suspend_lock);
416 * We don't want this thread to be blocked waiting for i/o to
417 * complete, so we instead wait from a taskq callback. The
418 * i/o may be a ZIL write (via zil_commit()), or a read of an
419 * indirect block, or a read of a data block (if this is a
420 * partial-block write). We will indicate that the i/o is
421 * complete by calling BIO_END_IO() from the taskq callback.
423 * This design allows the calling thread to continue and
424 * initiate more concurrent operations by calling
425 * zvol_request() again. There are typically only a small
426 * number of threads available to call zvol_request() (e.g.
427 * one per iSCSI target), so keeping the latency of
428 * zvol_request() low is important for performance.
430 * The zvol_request_sync module parameter allows this
431 * behavior to be altered, for performance evaluation
432 * purposes. If the callback blocks, setting
433 * zvol_request_sync=1 will result in much worse performance.
435 * We can have up to zvol_threads concurrent i/o's being
436 * processed for all zvols on the system. This is typically
437 * a vast improvement over the zvol_request_sync=1 behavior
438 * of one i/o at a time per zvol. However, an even better
439 * design would be for zvol_request() to initiate the zio
440 * directly, and then be notified by the zio_done callback,
441 * which would call BIO_END_IO(). Unfortunately, the DMU/ZIL
442 * interfaces lack this functionality (they block waiting for
443 * the i/o to complete).
445 if (bio_is_discard(bio) || bio_is_secure_erase(bio)) {
446 if (zvol_request_sync) {
449 task = zv_request_task_create(zvr);
450 taskq_dispatch_ent(zvol_taskq,
451 zvol_discard_task, task, 0, &task->ent);
454 if (zvol_request_sync) {
457 task = zv_request_task_create(zvr);
458 taskq_dispatch_ent(zvol_taskq,
459 zvol_write_task, task, 0, &task->ent);
464 * The SCST driver, and possibly others, may issue READ I/Os
465 * with a length of zero bytes. These empty I/Os contain no
466 * data and require no additional handling.
473 rw_enter(&zv->zv_suspend_lock, RW_READER);
475 /* See comment in WRITE case above. */
476 if (zvol_request_sync) {
479 task = zv_request_task_create(zvr);
480 taskq_dispatch_ent(zvol_taskq,
481 zvol_read_task, task, 0, &task->ent);
486 spl_fstrans_unmark(cookie);
487 #if (defined(HAVE_MAKE_REQUEST_FN_RET_QC) || \
488 defined(HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS)) && \
489 !defined(HAVE_BDEV_SUBMIT_BIO_RETURNS_VOID)
490 return (BLK_QC_T_NONE);
495 zvol_open(struct block_device *bdev, fmode_t flag)
499 boolean_t drop_suspend = B_TRUE;
500 boolean_t drop_namespace = B_FALSE;
501 #ifndef HAVE_BLKDEV_GET_ERESTARTSYS
502 hrtime_t timeout = MSEC2NSEC(zvol_open_timeout_ms);
503 hrtime_t start = gethrtime();
507 rw_enter(&zvol_state_lock, RW_READER);
509 * Obtain a copy of private_data under the zvol_state_lock to make
510 * sure that either the result of zvol free code path setting
511 * bdev->bd_disk->private_data to NULL is observed, or zvol_free()
512 * is not called on this zv because of the positive zv_open_count.
514 zv = bdev->bd_disk->private_data;
516 rw_exit(&zvol_state_lock);
517 return (SET_ERROR(-ENXIO));
520 if (zv->zv_open_count == 0 && !mutex_owned(&spa_namespace_lock)) {
522 * In all other call paths the spa_namespace_lock is taken
523 * before the bdev->bd_mutex lock. However, on open(2)
524 * the __blkdev_get() function calls fops->open() with the
525 * bdev->bd_mutex lock held. This can result in a deadlock
526 * when zvols from one pool are used as vdevs in another.
528 * To prevent a lock inversion deadlock we preemptively
529 * take the spa_namespace_lock. Normally the lock will not
530 * be contended and this is safe because spa_open_common()
531 * handles the case where the caller already holds the
532 * spa_namespace_lock.
534 * When the lock cannot be aquired after multiple retries
535 * this must be the vdev on zvol deadlock case and we have
536 * no choice but to return an error. For 5.12 and older
537 * kernels returning -ERESTARTSYS will result in the
538 * bdev->bd_mutex being dropped, then reacquired, and
539 * fops->open() being called again. This process can be
540 * repeated safely until both locks are acquired. For 5.13
541 * and newer the -ERESTARTSYS retry logic was removed from
542 * the kernel so the only option is to return the error for
543 * the caller to handle it.
545 if (!mutex_tryenter(&spa_namespace_lock)) {
546 rw_exit(&zvol_state_lock);
548 #ifdef HAVE_BLKDEV_GET_ERESTARTSYS
550 return (SET_ERROR(-ERESTARTSYS));
552 if ((gethrtime() - start) > timeout)
553 return (SET_ERROR(-ERESTARTSYS));
555 schedule_timeout(MSEC_TO_TICK(10));
559 drop_namespace = B_TRUE;
563 mutex_enter(&zv->zv_state_lock);
565 * make sure zvol is not suspended during first open
566 * (hold zv_suspend_lock) and respect proper lock acquisition
567 * ordering - zv_suspend_lock before zv_state_lock
569 if (zv->zv_open_count == 0) {
570 if (!rw_tryenter(&zv->zv_suspend_lock, RW_READER)) {
571 mutex_exit(&zv->zv_state_lock);
572 rw_enter(&zv->zv_suspend_lock, RW_READER);
573 mutex_enter(&zv->zv_state_lock);
574 /* check to see if zv_suspend_lock is needed */
575 if (zv->zv_open_count != 0) {
576 rw_exit(&zv->zv_suspend_lock);
577 drop_suspend = B_FALSE;
581 drop_suspend = B_FALSE;
583 rw_exit(&zvol_state_lock);
585 ASSERT(MUTEX_HELD(&zv->zv_state_lock));
587 if (zv->zv_open_count == 0) {
588 ASSERT(RW_READ_HELD(&zv->zv_suspend_lock));
589 error = -zvol_first_open(zv, !(flag & FMODE_WRITE));
594 if ((flag & FMODE_WRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
601 mutex_exit(&zv->zv_state_lock);
603 mutex_exit(&spa_namespace_lock);
605 rw_exit(&zv->zv_suspend_lock);
607 zfs_check_media_change(bdev);
612 if (zv->zv_open_count == 0)
616 mutex_exit(&zv->zv_state_lock);
618 mutex_exit(&spa_namespace_lock);
620 rw_exit(&zv->zv_suspend_lock);
622 return (SET_ERROR(error));
626 zvol_release(struct gendisk *disk, fmode_t mode)
629 boolean_t drop_suspend = B_TRUE;
631 rw_enter(&zvol_state_lock, RW_READER);
632 zv = disk->private_data;
634 mutex_enter(&zv->zv_state_lock);
635 ASSERT3U(zv->zv_open_count, >, 0);
637 * make sure zvol is not suspended during last close
638 * (hold zv_suspend_lock) and respect proper lock acquisition
639 * ordering - zv_suspend_lock before zv_state_lock
641 if (zv->zv_open_count == 1) {
642 if (!rw_tryenter(&zv->zv_suspend_lock, RW_READER)) {
643 mutex_exit(&zv->zv_state_lock);
644 rw_enter(&zv->zv_suspend_lock, RW_READER);
645 mutex_enter(&zv->zv_state_lock);
646 /* check to see if zv_suspend_lock is needed */
647 if (zv->zv_open_count != 1) {
648 rw_exit(&zv->zv_suspend_lock);
649 drop_suspend = B_FALSE;
653 drop_suspend = B_FALSE;
655 rw_exit(&zvol_state_lock);
657 ASSERT(MUTEX_HELD(&zv->zv_state_lock));
660 if (zv->zv_open_count == 0) {
661 ASSERT(RW_READ_HELD(&zv->zv_suspend_lock));
665 mutex_exit(&zv->zv_state_lock);
668 rw_exit(&zv->zv_suspend_lock);
672 zvol_ioctl(struct block_device *bdev, fmode_t mode,
673 unsigned int cmd, unsigned long arg)
675 zvol_state_t *zv = bdev->bd_disk->private_data;
678 ASSERT3U(zv->zv_open_count, >, 0);
683 invalidate_bdev(bdev);
684 rw_enter(&zv->zv_suspend_lock, RW_READER);
686 if (!(zv->zv_flags & ZVOL_RDONLY))
687 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
689 rw_exit(&zv->zv_suspend_lock);
693 mutex_enter(&zv->zv_state_lock);
694 error = copy_to_user((void *)arg, zv->zv_name, MAXNAMELEN);
695 mutex_exit(&zv->zv_state_lock);
703 return (SET_ERROR(error));
708 zvol_compat_ioctl(struct block_device *bdev, fmode_t mode,
709 unsigned cmd, unsigned long arg)
711 return (zvol_ioctl(bdev, mode, cmd, arg));
714 #define zvol_compat_ioctl NULL
718 zvol_check_events(struct gendisk *disk, unsigned int clearing)
720 unsigned int mask = 0;
722 rw_enter(&zvol_state_lock, RW_READER);
724 zvol_state_t *zv = disk->private_data;
726 mutex_enter(&zv->zv_state_lock);
727 mask = zv->zv_changed ? DISK_EVENT_MEDIA_CHANGE : 0;
729 mutex_exit(&zv->zv_state_lock);
732 rw_exit(&zvol_state_lock);
738 zvol_revalidate_disk(struct gendisk *disk)
740 rw_enter(&zvol_state_lock, RW_READER);
742 zvol_state_t *zv = disk->private_data;
744 mutex_enter(&zv->zv_state_lock);
745 set_capacity(zv->zv_zso->zvo_disk,
746 zv->zv_volsize >> SECTOR_BITS);
747 mutex_exit(&zv->zv_state_lock);
750 rw_exit(&zvol_state_lock);
756 zvol_update_volsize(zvol_state_t *zv, uint64_t volsize)
758 struct gendisk *disk = zv->zv_zso->zvo_disk;
760 #if defined(HAVE_REVALIDATE_DISK_SIZE)
761 revalidate_disk_size(disk, zvol_revalidate_disk(disk) == 0);
762 #elif defined(HAVE_REVALIDATE_DISK)
763 revalidate_disk(disk);
765 zvol_revalidate_disk(disk);
771 zvol_clear_private(zvol_state_t *zv)
774 * Cleared while holding zvol_state_lock as a writer
775 * which will prevent zvol_open() from opening it.
777 zv->zv_zso->zvo_disk->private_data = NULL;
781 * Provide a simple virtual geometry for legacy compatibility. For devices
782 * smaller than 1 MiB a small head and sector count is used to allow very
783 * tiny devices. For devices over 1 Mib a standard head and sector count
784 * is used to keep the cylinders count reasonable.
787 zvol_getgeo(struct block_device *bdev, struct hd_geometry *geo)
789 zvol_state_t *zv = bdev->bd_disk->private_data;
792 ASSERT3U(zv->zv_open_count, >, 0);
794 sectors = get_capacity(zv->zv_zso->zvo_disk);
796 if (sectors > 2048) {
805 geo->cylinders = sectors / (geo->heads * geo->sectors);
810 static struct block_device_operations zvol_ops = {
812 .release = zvol_release,
814 .compat_ioctl = zvol_compat_ioctl,
815 .check_events = zvol_check_events,
816 #ifdef HAVE_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK
817 .revalidate_disk = zvol_revalidate_disk,
819 .getgeo = zvol_getgeo,
820 .owner = THIS_MODULE,
821 #ifdef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
822 .submit_bio = zvol_submit_bio,
827 * Allocate memory for a new zvol_state_t and setup the required
828 * request queue and generic disk structures for the block device.
830 static zvol_state_t *
831 zvol_alloc(dev_t dev, const char *name)
834 struct zvol_state_os *zso;
837 if (dsl_prop_get_integer(name, "volmode", &volmode, NULL) != 0)
840 if (volmode == ZFS_VOLMODE_DEFAULT)
841 volmode = zvol_volmode;
843 if (volmode == ZFS_VOLMODE_NONE)
846 zv = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
847 zso = kmem_zalloc(sizeof (struct zvol_state_os), KM_SLEEP);
849 zv->zv_volmode = volmode;
851 list_link_init(&zv->zv_next);
852 mutex_init(&zv->zv_state_lock, NULL, MUTEX_DEFAULT, NULL);
854 #ifdef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
855 #ifdef HAVE_BLK_ALLOC_DISK
856 zso->zvo_disk = blk_alloc_disk(NUMA_NO_NODE);
857 if (zso->zvo_disk == NULL)
860 zso->zvo_disk->minors = ZVOL_MINORS;
861 zso->zvo_queue = zso->zvo_disk->queue;
863 zso->zvo_queue = blk_alloc_queue(NUMA_NO_NODE);
864 if (zso->zvo_queue == NULL)
867 zso->zvo_disk = alloc_disk(ZVOL_MINORS);
868 if (zso->zvo_disk == NULL) {
869 blk_cleanup_queue(zso->zvo_queue);
873 zso->zvo_disk->queue = zso->zvo_queue;
874 #endif /* HAVE_BLK_ALLOC_DISK */
876 zso->zvo_queue = blk_generic_alloc_queue(zvol_request, NUMA_NO_NODE);
877 if (zso->zvo_queue == NULL)
880 zso->zvo_disk = alloc_disk(ZVOL_MINORS);
881 if (zso->zvo_disk == NULL) {
882 blk_cleanup_queue(zso->zvo_queue);
886 zso->zvo_disk->queue = zso->zvo_queue;
887 #endif /* HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS */
889 blk_queue_set_write_cache(zso->zvo_queue, B_TRUE, B_TRUE);
891 /* Limit read-ahead to a single page to prevent over-prefetching. */
892 blk_queue_set_read_ahead(zso->zvo_queue, 1);
894 /* Disable write merging in favor of the ZIO pipeline. */
895 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, zso->zvo_queue);
897 /* Enable /proc/diskstats */
898 blk_queue_flag_set(QUEUE_FLAG_IO_STAT, zso->zvo_queue);
900 zso->zvo_queue->queuedata = zv;
902 zv->zv_open_count = 0;
903 strlcpy(zv->zv_name, name, MAXNAMELEN);
905 zfs_rangelock_init(&zv->zv_rangelock, NULL, NULL);
906 rw_init(&zv->zv_suspend_lock, NULL, RW_DEFAULT, NULL);
908 zso->zvo_disk->major = zvol_major;
909 zso->zvo_disk->events = DISK_EVENT_MEDIA_CHANGE;
911 if (volmode == ZFS_VOLMODE_DEV) {
913 * ZFS_VOLMODE_DEV disable partitioning on ZVOL devices: set
914 * gendisk->minors = 1 as noted in include/linux/genhd.h.
915 * Also disable extended partition numbers (GENHD_FL_EXT_DEVT)
916 * and suppresses partition scanning (GENHD_FL_NO_PART_SCAN)
917 * setting gendisk->flags accordingly.
919 zso->zvo_disk->minors = 1;
920 #if defined(GENHD_FL_EXT_DEVT)
921 zso->zvo_disk->flags &= ~GENHD_FL_EXT_DEVT;
923 #if defined(GENHD_FL_NO_PART_SCAN)
924 zso->zvo_disk->flags |= GENHD_FL_NO_PART_SCAN;
927 zso->zvo_disk->first_minor = (dev & MINORMASK);
928 zso->zvo_disk->fops = &zvol_ops;
929 zso->zvo_disk->private_data = zv;
930 snprintf(zso->zvo_disk->disk_name, DISK_NAME_LEN, "%s%d",
931 ZVOL_DEV_NAME, (dev & MINORMASK));
936 kmem_free(zso, sizeof (struct zvol_state_os));
937 kmem_free(zv, sizeof (zvol_state_t));
942 * Cleanup then free a zvol_state_t which was created by zvol_alloc().
943 * At this time, the structure is not opened by anyone, is taken off
944 * the zvol_state_list, and has its private data set to NULL.
945 * The zvol_state_lock is dropped.
947 * This function may take many milliseconds to complete (e.g. we've seen
948 * it take over 256ms), due to the calls to "blk_cleanup_queue" and
949 * "del_gendisk". Thus, consumers need to be careful to account for this
950 * latency when calling this function.
953 zvol_free(zvol_state_t *zv)
956 ASSERT(!RW_LOCK_HELD(&zv->zv_suspend_lock));
957 ASSERT(!MUTEX_HELD(&zv->zv_state_lock));
958 ASSERT0(zv->zv_open_count);
959 ASSERT3P(zv->zv_zso->zvo_disk->private_data, ==, NULL);
961 rw_destroy(&zv->zv_suspend_lock);
962 zfs_rangelock_fini(&zv->zv_rangelock);
964 del_gendisk(zv->zv_zso->zvo_disk);
965 #if defined(HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS) && \
966 defined(HAVE_BLK_ALLOC_DISK)
967 blk_cleanup_disk(zv->zv_zso->zvo_disk);
969 blk_cleanup_queue(zv->zv_zso->zvo_queue);
970 put_disk(zv->zv_zso->zvo_disk);
973 ida_simple_remove(&zvol_ida,
974 MINOR(zv->zv_zso->zvo_dev) >> ZVOL_MINOR_BITS);
976 mutex_destroy(&zv->zv_state_lock);
977 dataset_kstats_destroy(&zv->zv_kstat);
979 kmem_free(zv->zv_zso, sizeof (struct zvol_state_os));
980 kmem_free(zv, sizeof (zvol_state_t));
984 zvol_wait_close(zvol_state_t *zv)
989 * Create a block device minor node and setup the linkage between it
990 * and the specified volume. Once this function returns the block
991 * device is live and ready for use.
994 zvol_os_create_minor(const char *name)
998 dmu_object_info_t *doi;
1004 uint64_t hash = zvol_name_hash(name);
1006 if (zvol_inhibit_dev)
1009 idx = ida_simple_get(&zvol_ida, 0, 0, kmem_flags_convert(KM_SLEEP));
1011 return (SET_ERROR(-idx));
1012 minor = idx << ZVOL_MINOR_BITS;
1014 zv = zvol_find_by_name_hash(name, hash, RW_NONE);
1016 ASSERT(MUTEX_HELD(&zv->zv_state_lock));
1017 mutex_exit(&zv->zv_state_lock);
1018 ida_simple_remove(&zvol_ida, idx);
1019 return (SET_ERROR(EEXIST));
1022 doi = kmem_alloc(sizeof (dmu_object_info_t), KM_SLEEP);
1024 error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, B_TRUE, FTAG, &os);
1028 error = dmu_object_info(os, ZVOL_OBJ, doi);
1030 goto out_dmu_objset_disown;
1032 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
1034 goto out_dmu_objset_disown;
1036 zv = zvol_alloc(MKDEV(zvol_major, minor), name);
1038 error = SET_ERROR(EAGAIN);
1039 goto out_dmu_objset_disown;
1043 if (dmu_objset_is_snapshot(os))
1044 zv->zv_flags |= ZVOL_RDONLY;
1046 zv->zv_volblocksize = doi->doi_data_block_size;
1047 zv->zv_volsize = volsize;
1050 set_capacity(zv->zv_zso->zvo_disk, zv->zv_volsize >> 9);
1052 blk_queue_max_hw_sectors(zv->zv_zso->zvo_queue,
1053 (DMU_MAX_ACCESS / 4) >> 9);
1054 blk_queue_max_segments(zv->zv_zso->zvo_queue, UINT16_MAX);
1055 blk_queue_max_segment_size(zv->zv_zso->zvo_queue, UINT_MAX);
1056 blk_queue_physical_block_size(zv->zv_zso->zvo_queue,
1057 zv->zv_volblocksize);
1058 blk_queue_io_opt(zv->zv_zso->zvo_queue, zv->zv_volblocksize);
1059 blk_queue_max_discard_sectors(zv->zv_zso->zvo_queue,
1060 (zvol_max_discard_blocks * zv->zv_volblocksize) >> 9);
1061 blk_queue_discard_granularity(zv->zv_zso->zvo_queue,
1062 zv->zv_volblocksize);
1063 blk_queue_flag_set(QUEUE_FLAG_DISCARD, zv->zv_zso->zvo_queue);
1064 #ifdef QUEUE_FLAG_NONROT
1065 blk_queue_flag_set(QUEUE_FLAG_NONROT, zv->zv_zso->zvo_queue);
1067 #ifdef QUEUE_FLAG_ADD_RANDOM
1068 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, zv->zv_zso->zvo_queue);
1070 /* This flag was introduced in kernel version 4.12. */
1071 #ifdef QUEUE_FLAG_SCSI_PASSTHROUGH
1072 blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH, zv->zv_zso->zvo_queue);
1075 ASSERT3P(zv->zv_zilog, ==, NULL);
1076 zv->zv_zilog = zil_open(os, zvol_get_data);
1077 if (spa_writeable(dmu_objset_spa(os))) {
1078 if (zil_replay_disable)
1079 zil_destroy(zv->zv_zilog, B_FALSE);
1081 zil_replay(os, zv, zvol_replay_vector);
1083 zil_close(zv->zv_zilog);
1084 zv->zv_zilog = NULL;
1085 ASSERT3P(zv->zv_kstat.dk_kstats, ==, NULL);
1086 dataset_kstats_create(&zv->zv_kstat, zv->zv_objset);
1089 * When udev detects the addition of the device it will immediately
1090 * invoke blkid(8) to determine the type of content on the device.
1091 * Prefetching the blocks commonly scanned by blkid(8) will speed
1094 len = MIN(MAX(zvol_prefetch_bytes, 0), SPA_MAXBLOCKSIZE);
1096 dmu_prefetch(os, ZVOL_OBJ, 0, 0, len, ZIO_PRIORITY_SYNC_READ);
1097 dmu_prefetch(os, ZVOL_OBJ, 0, volsize - len, len,
1098 ZIO_PRIORITY_SYNC_READ);
1101 zv->zv_objset = NULL;
1102 out_dmu_objset_disown:
1103 dmu_objset_disown(os, B_TRUE, FTAG);
1105 kmem_free(doi, sizeof (dmu_object_info_t));
1108 * Keep in mind that once add_disk() is called, the zvol is
1109 * announced to the world, and zvol_open()/zvol_release() can
1110 * be called at any time. Incidentally, add_disk() itself calls
1111 * zvol_open()->zvol_first_open() and zvol_release()->zvol_last_close()
1115 rw_enter(&zvol_state_lock, RW_WRITER);
1117 rw_exit(&zvol_state_lock);
1118 add_disk(zv->zv_zso->zvo_disk);
1120 ida_simple_remove(&zvol_ida, idx);
1127 zvol_rename_minor(zvol_state_t *zv, const char *newname)
1129 int readonly = get_disk_ro(zv->zv_zso->zvo_disk);
1131 ASSERT(RW_LOCK_HELD(&zvol_state_lock));
1132 ASSERT(MUTEX_HELD(&zv->zv_state_lock));
1134 strlcpy(zv->zv_name, newname, sizeof (zv->zv_name));
1136 /* move to new hashtable entry */
1137 zv->zv_hash = zvol_name_hash(zv->zv_name);
1138 hlist_del(&zv->zv_hlink);
1139 hlist_add_head(&zv->zv_hlink, ZVOL_HT_HEAD(zv->zv_hash));
1142 * The block device's read-only state is briefly changed causing
1143 * a KOBJ_CHANGE uevent to be issued. This ensures udev detects
1144 * the name change and fixes the symlinks. This does not change
1145 * ZVOL_RDONLY in zv->zv_flags so the actual read-only state never
1146 * changes. This would normally be done using kobject_uevent() but
1147 * that is a GPL-only symbol which is why we need this workaround.
1149 set_disk_ro(zv->zv_zso->zvo_disk, !readonly);
1150 set_disk_ro(zv->zv_zso->zvo_disk, readonly);
1154 zvol_set_disk_ro_impl(zvol_state_t *zv, int flags)
1157 set_disk_ro(zv->zv_zso->zvo_disk, flags);
1161 zvol_set_capacity_impl(zvol_state_t *zv, uint64_t capacity)
1164 set_capacity(zv->zv_zso->zvo_disk, capacity);
1167 const static zvol_platform_ops_t zvol_linux_ops = {
1168 .zv_free = zvol_free,
1169 .zv_rename_minor = zvol_rename_minor,
1170 .zv_create_minor = zvol_os_create_minor,
1171 .zv_update_volsize = zvol_update_volsize,
1172 .zv_clear_private = zvol_clear_private,
1173 .zv_is_zvol = zvol_is_zvol_impl,
1174 .zv_set_disk_ro = zvol_set_disk_ro_impl,
1175 .zv_set_capacity = zvol_set_capacity_impl,
1182 int threads = MIN(MAX(zvol_threads, 1), 1024);
1184 error = register_blkdev(zvol_major, ZVOL_DRIVER);
1186 printk(KERN_INFO "ZFS: register_blkdev() failed %d\n", error);
1189 zvol_taskq = taskq_create(ZVOL_DRIVER, threads, maxclsyspri,
1190 threads * 2, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
1191 if (zvol_taskq == NULL) {
1192 unregister_blkdev(zvol_major, ZVOL_DRIVER);
1196 ida_init(&zvol_ida);
1197 zvol_register_ops(&zvol_linux_ops);
1205 unregister_blkdev(zvol_major, ZVOL_DRIVER);
1206 taskq_destroy(zvol_taskq);
1207 ida_destroy(&zvol_ida);
1211 module_param(zvol_inhibit_dev, uint, 0644);
1212 MODULE_PARM_DESC(zvol_inhibit_dev, "Do not create zvol device nodes");
1214 module_param(zvol_major, uint, 0444);
1215 MODULE_PARM_DESC(zvol_major, "Major number for zvol device");
1217 module_param(zvol_threads, uint, 0444);
1218 MODULE_PARM_DESC(zvol_threads, "Max number of threads to handle I/O requests");
1220 module_param(zvol_request_sync, uint, 0644);
1221 MODULE_PARM_DESC(zvol_request_sync, "Synchronously handle bio requests");
1223 module_param(zvol_max_discard_blocks, ulong, 0444);
1224 MODULE_PARM_DESC(zvol_max_discard_blocks, "Max number of blocks to discard");
1226 module_param(zvol_prefetch_bytes, uint, 0644);
1227 MODULE_PARM_DESC(zvol_prefetch_bytes, "Prefetch N bytes at zvol start+end");
1229 module_param(zvol_volmode, uint, 0644);
1230 MODULE_PARM_DESC(zvol_volmode, "Default volmode property value");