2 * Copyright (c) 2003 Silicon Graphics International Corp.
3 * Copyright (c) 2009-2011 Spectra Logic Corporation
4 * Copyright (c) 2012 The FreeBSD Foundation
7 * Portions of this software were developed by Edward Tomasz Napierala
8 * under sponsorship from the FreeBSD Foundation.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions, and the following disclaimer,
15 * without modification.
16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
17 * substantially similar to the "NO WARRANTY" disclaimer below
18 * ("Disclaimer") and any redistribution must be conditioned upon
19 * including a substantially similar Disclaimer requirement for further
20 * binary redistribution.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
32 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGES.
35 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_block.c#5 $
38 * CAM Target Layer driver backend for block devices.
40 * Author: Ken Merry <ken@FreeBSD.org>
42 #include <sys/cdefs.h>
43 __FBSDID("$FreeBSD$");
45 #include <opt_kdtrace.h>
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/types.h>
51 #include <sys/kthread.h>
53 #include <sys/fcntl.h>
55 #include <sys/mutex.h>
56 #include <sys/condvar.h>
57 #include <sys/malloc.h>
59 #include <sys/ioccom.h>
60 #include <sys/queue.h>
62 #include <sys/endian.h>
65 #include <sys/taskqueue.h>
66 #include <sys/vnode.h>
67 #include <sys/namei.h>
68 #include <sys/mount.h>
70 #include <sys/fcntl.h>
71 #include <sys/filedesc.h>
74 #include <sys/module.h>
76 #include <sys/devicestat.h>
77 #include <sys/sysctl.h>
79 #include <geom/geom.h>
82 #include <cam/scsi/scsi_all.h>
83 #include <cam/scsi/scsi_da.h>
84 #include <cam/ctl/ctl_io.h>
85 #include <cam/ctl/ctl.h>
86 #include <cam/ctl/ctl_backend.h>
87 #include <cam/ctl/ctl_frontend_internal.h>
88 #include <cam/ctl/ctl_ioctl.h>
89 #include <cam/ctl/ctl_scsi_all.h>
90 #include <cam/ctl/ctl_error.h>
93 * The idea here is that we'll allocate enough S/G space to hold a 16MB
94 * I/O. If we get an I/O larger than that, we'll reject it.
96 #define CTLBLK_MAX_IO_SIZE (16 * 1024 * 1024)
97 #define CTLBLK_MAX_SEGS (CTLBLK_MAX_IO_SIZE / MAXPHYS) + 1
100 #define DPRINTF(fmt, args...) \
101 printf("cbb(%s:%d): " fmt, __FUNCTION__, __LINE__, ##args)
103 #define DPRINTF(fmt, args...) do {} while(0)
106 SDT_PROVIDER_DEFINE(cbb);
109 CTL_BE_BLOCK_LUN_UNCONFIGURED = 0x01,
110 CTL_BE_BLOCK_LUN_CONFIG_ERR = 0x02,
111 CTL_BE_BLOCK_LUN_WAITING = 0x04,
112 CTL_BE_BLOCK_LUN_MULTI_THREAD = 0x08
113 } ctl_be_block_lun_flags;
121 struct ctl_be_block_devdata {
127 struct ctl_be_block_filedata {
131 union ctl_be_block_bedata {
132 struct ctl_be_block_devdata dev;
133 struct ctl_be_block_filedata file;
136 struct ctl_be_block_io;
137 struct ctl_be_block_lun;
139 typedef void (*cbb_dispatch_t)(struct ctl_be_block_lun *be_lun,
140 struct ctl_be_block_io *beio);
143 * Backend LUN structure. There is a 1:1 mapping between a block device
144 * and a backend block LUN, and between a backend block LUN and a CTL LUN.
146 struct ctl_be_block_lun {
147 struct ctl_block_disk *disk;
150 ctl_be_block_type dev_type;
152 union ctl_be_block_bedata backend;
153 cbb_dispatch_t dispatch;
154 cbb_dispatch_t lun_flush;
157 uint64_t size_blocks;
163 struct ctl_be_block_softc *softc;
164 struct devstat *disk_stats;
165 ctl_be_block_lun_flags flags;
166 STAILQ_ENTRY(ctl_be_block_lun) links;
167 struct ctl_be_lun ctl_be_lun;
168 struct taskqueue *io_taskqueue;
171 STAILQ_HEAD(, ctl_io_hdr) input_queue;
172 STAILQ_HEAD(, ctl_io_hdr) config_write_queue;
173 STAILQ_HEAD(, ctl_io_hdr) datamove_queue;
177 * Overall softc structure for the block backend module.
179 struct ctl_be_block_softc {
182 STAILQ_HEAD(, ctl_block_disk) disk_list;
184 STAILQ_HEAD(, ctl_be_block_lun) lun_list;
187 static struct ctl_be_block_softc backend_block_softc;
190 * Per-I/O information.
192 struct ctl_be_block_io {
194 struct ctl_sg_entry sg_segs[CTLBLK_MAX_SEGS];
195 struct iovec xiovecs[CTLBLK_MAX_SEGS];
203 struct bintime ds_t0;
204 devstat_tag_type ds_tag_type;
205 devstat_trans_flags ds_trans_type;
208 struct ctl_be_block_softc *softc;
209 struct ctl_be_block_lun *lun;
212 static int cbb_num_threads = 14;
213 TUNABLE_INT("kern.cam.ctl.block.num_threads", &cbb_num_threads);
214 SYSCTL_NODE(_kern_cam_ctl, OID_AUTO, block, CTLFLAG_RD, 0,
215 "CAM Target Layer Block Backend");
216 SYSCTL_INT(_kern_cam_ctl_block, OID_AUTO, num_threads, CTLFLAG_RW,
217 &cbb_num_threads, 0, "Number of threads per backing file");
219 static struct ctl_be_block_io *ctl_alloc_beio(struct ctl_be_block_softc *softc);
220 static void ctl_free_beio(struct ctl_be_block_io *beio);
221 static void ctl_complete_beio(struct ctl_be_block_io *beio);
222 static int ctl_be_block_move_done(union ctl_io *io);
223 static void ctl_be_block_biodone(struct bio *bio);
224 static void ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun,
225 struct ctl_be_block_io *beio);
226 static void ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun,
227 struct ctl_be_block_io *beio);
228 static void ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun,
229 struct ctl_be_block_io *beio);
230 static void ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun,
231 struct ctl_be_block_io *beio);
232 static void ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun,
234 static void ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun,
236 static void ctl_be_block_worker(void *context, int pending);
237 static int ctl_be_block_submit(union ctl_io *io);
238 static int ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr,
239 int flag, struct thread *td);
240 static int ctl_be_block_open_file(struct ctl_be_block_lun *be_lun,
241 struct ctl_lun_req *req);
242 static int ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun,
243 struct ctl_lun_req *req);
244 static int ctl_be_block_close(struct ctl_be_block_lun *be_lun);
245 static int ctl_be_block_open(struct ctl_be_block_softc *softc,
246 struct ctl_be_block_lun *be_lun,
247 struct ctl_lun_req *req);
248 static int ctl_be_block_create(struct ctl_be_block_softc *softc,
249 struct ctl_lun_req *req);
250 static int ctl_be_block_rm(struct ctl_be_block_softc *softc,
251 struct ctl_lun_req *req);
252 static int ctl_be_block_modify_file(struct ctl_be_block_lun *be_lun,
253 struct ctl_lun_req *req);
254 static int ctl_be_block_modify_dev(struct ctl_be_block_lun *be_lun,
255 struct ctl_lun_req *req);
256 static int ctl_be_block_modify(struct ctl_be_block_softc *softc,
257 struct ctl_lun_req *req);
258 static void ctl_be_block_lun_shutdown(void *be_lun);
259 static void ctl_be_block_lun_config_status(void *be_lun,
260 ctl_lun_config_status status);
261 static int ctl_be_block_config_write(union ctl_io *io);
262 static int ctl_be_block_config_read(union ctl_io *io);
263 static int ctl_be_block_lun_info(void *be_lun, struct sbuf *sb);
264 int ctl_be_block_init(void);
266 static struct ctl_backend_driver ctl_be_block_driver =
269 .flags = CTL_BE_FLAG_HAS_CONFIG,
270 .init = ctl_be_block_init,
271 .data_submit = ctl_be_block_submit,
272 .data_move_done = ctl_be_block_move_done,
273 .config_read = ctl_be_block_config_read,
274 .config_write = ctl_be_block_config_write,
275 .ioctl = ctl_be_block_ioctl,
276 .lun_info = ctl_be_block_lun_info
279 MALLOC_DEFINE(M_CTLBLK, "ctlblk", "Memory used for CTL block backend");
280 CTL_BACKEND_DECLARE(cbb, ctl_be_block_driver);
282 static uma_zone_t beio_zone;
284 static struct ctl_be_block_io *
285 ctl_alloc_beio(struct ctl_be_block_softc *softc)
287 struct ctl_be_block_io *beio;
289 beio = uma_zalloc(beio_zone, M_WAITOK | M_ZERO);
295 ctl_free_beio(struct ctl_be_block_io *beio)
302 for (i = 0; i < beio->num_segs; i++) {
303 if (beio->sg_segs[i].addr == NULL)
306 uma_zfree(beio->lun->lun_zone, beio->sg_segs[i].addr);
307 beio->sg_segs[i].addr = NULL;
310 if (duplicate_free > 0) {
311 printf("%s: %d duplicate frees out of %d segments\n", __func__,
312 duplicate_free, beio->num_segs);
315 uma_zfree(beio_zone, beio);
319 ctl_complete_beio(struct ctl_be_block_io *beio)
326 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)
327 io_len = beio->io_len;
331 devstat_end_transaction(beio->lun->disk_stats,
336 /*then*/&beio->ds_t0);
343 ctl_be_block_move_done(union ctl_io *io)
345 struct ctl_be_block_io *beio;
346 struct ctl_be_block_lun *be_lun;
348 struct bintime cur_bt;
351 beio = (struct ctl_be_block_io *)
352 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr;
356 DPRINTF("entered\n");
360 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt);
361 bintime_add(&io->io_hdr.dma_bt, &cur_bt);
362 io->io_hdr.num_dmas++;
366 * We set status at this point for read commands, and write
367 * commands with errors.
369 if ((beio->bio_cmd == BIO_READ)
370 && (io->io_hdr.port_status == 0)
371 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0)
372 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE))
373 ctl_set_success(&io->scsiio);
374 else if ((io->io_hdr.port_status != 0)
375 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0)
376 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) {
378 * For hardware error sense keys, the sense key
379 * specific value is defined to be a retry count,
380 * but we use it to pass back an internal FETD
381 * error code. XXX KDM Hopefully the FETD is only
382 * using 16 bits for an error code, since that's
383 * all the space we have in the sks field.
385 ctl_set_internal_failure(&io->scsiio,
388 io->io_hdr.port_status);
392 * If this is a read, or a write with errors, it is done.
394 if ((beio->bio_cmd == BIO_READ)
395 || ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)
396 || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)) {
397 ctl_complete_beio(beio);
402 * At this point, we have a write and the DMA completed
403 * successfully. We now have to queue it to the task queue to
404 * execute the backend I/O. That is because we do blocking
405 * memory allocations, and in the file backing case, blocking I/O.
406 * This move done routine is generally called in the SIM's
407 * interrupt context, and therefore we cannot block.
409 mtx_lock(&be_lun->lock);
411 * XXX KDM make sure that links is okay to use at this point.
412 * Otherwise, we either need to add another field to ctl_io_hdr,
413 * or deal with resource allocation here.
415 STAILQ_INSERT_TAIL(&be_lun->datamove_queue, &io->io_hdr, links);
416 mtx_unlock(&be_lun->lock);
418 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task);
424 ctl_be_block_biodone(struct bio *bio)
426 struct ctl_be_block_io *beio;
427 struct ctl_be_block_lun *be_lun;
431 beio = bio->bio_caller1;
435 DPRINTF("entered\n");
437 error = bio->bio_error;
438 mtx_lock(&be_lun->lock);
442 beio->num_bios_done++;
445 * XXX KDM will this cause WITNESS to complain? Holding a lock
446 * during the free might cause it to complain.
451 * If the send complete bit isn't set, or we aren't the last I/O to
452 * complete, then we're done.
454 if ((beio->send_complete == 0)
455 || (beio->num_bios_done < beio->num_bios_sent)) {
456 mtx_unlock(&be_lun->lock);
461 * At this point, we've verified that we are the last I/O to
462 * complete, so it's safe to drop the lock.
464 mtx_unlock(&be_lun->lock);
467 * If there are any errors from the backing device, we fail the
468 * entire I/O with a medium error.
470 if (beio->num_errors > 0) {
471 if (error == EOPNOTSUPP) {
472 ctl_set_invalid_opcode(&io->scsiio);
473 } else if (beio->bio_cmd == BIO_FLUSH) {
474 /* XXX KDM is there is a better error here? */
475 ctl_set_internal_failure(&io->scsiio,
477 /*retry_count*/ 0xbad2);
479 ctl_set_medium_error(&io->scsiio);
480 ctl_complete_beio(beio);
485 * If this is a write or a flush, we're all done.
486 * If this is a read, we can now send the data to the user.
488 if ((beio->bio_cmd == BIO_WRITE)
489 || (beio->bio_cmd == BIO_FLUSH)) {
490 ctl_set_success(&io->scsiio);
491 ctl_complete_beio(beio);
493 io->scsiio.be_move_done = ctl_be_block_move_done;
494 io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs;
495 io->scsiio.kern_data_len = beio->io_len;
496 io->scsiio.kern_total_len = beio->io_len;
497 io->scsiio.kern_rel_offset = 0;
498 io->scsiio.kern_data_resid = 0;
499 io->scsiio.kern_sg_entries = beio->num_segs;
500 io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST;
502 getbintime(&io->io_hdr.dma_start_bt);
509 ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun,
510 struct ctl_be_block_io *beio)
513 struct mount *mountpoint;
514 int vfs_is_locked, error, lock_flags;
516 DPRINTF("entered\n");
520 vfs_is_locked = VFS_LOCK_GIANT(be_lun->vn->v_mount);
522 (void) vn_start_write(be_lun->vn, &mountpoint, V_WAIT);
524 if (MNT_SHARED_WRITES(mountpoint)
525 || ((mountpoint == NULL)
526 && MNT_SHARED_WRITES(be_lun->vn->v_mount)))
527 lock_flags = LK_SHARED;
529 lock_flags = LK_EXCLUSIVE;
531 vn_lock(be_lun->vn, lock_flags | LK_RETRY);
533 binuptime(&beio->ds_t0);
534 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0);
536 error = VOP_FSYNC(be_lun->vn, MNT_WAIT, curthread);
537 VOP_UNLOCK(be_lun->vn, 0);
539 vn_finished_write(mountpoint);
541 VFS_UNLOCK_GIANT(vfs_is_locked);
544 ctl_set_success(&io->scsiio);
546 /* XXX KDM is there is a better error here? */
547 ctl_set_internal_failure(&io->scsiio,
549 /*retry_count*/ 0xbad1);
552 ctl_complete_beio(beio);
555 SDT_PROBE_DEFINE1(cbb, kernel, read, file_start, "uint64_t");
556 SDT_PROBE_DEFINE1(cbb, kernel, write, file_start, "uint64_t");
557 SDT_PROBE_DEFINE1(cbb, kernel, read, file_done,"uint64_t");
558 SDT_PROBE_DEFINE1(cbb, kernel, write, file_done, "uint64_t");
561 ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun,
562 struct ctl_be_block_io *beio)
564 struct ctl_be_block_filedata *file_data;
567 struct iovec *xiovec;
568 int vfs_is_locked, flags;
571 DPRINTF("entered\n");
573 file_data = &be_lun->backend.file;
575 flags = beio->bio_flags;
577 if (beio->bio_cmd == BIO_READ) {
578 SDT_PROBE(cbb, kernel, read, file_start, 0, 0, 0, 0, 0);
580 SDT_PROBE(cbb, kernel, write, file_start, 0, 0, 0, 0, 0);
583 bzero(&xuio, sizeof(xuio));
584 if (beio->bio_cmd == BIO_READ)
585 xuio.uio_rw = UIO_READ;
587 xuio.uio_rw = UIO_WRITE;
589 xuio.uio_offset = beio->io_offset;
590 xuio.uio_resid = beio->io_len;
591 xuio.uio_segflg = UIO_SYSSPACE;
592 xuio.uio_iov = beio->xiovecs;
593 xuio.uio_iovcnt = beio->num_segs;
594 xuio.uio_td = curthread;
596 for (i = 0, xiovec = xuio.uio_iov; i < xuio.uio_iovcnt; i++, xiovec++) {
597 xiovec->iov_base = beio->sg_segs[i].addr;
598 xiovec->iov_len = beio->sg_segs[i].len;
601 vfs_is_locked = VFS_LOCK_GIANT(be_lun->vn->v_mount);
602 if (beio->bio_cmd == BIO_READ) {
603 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY);
605 binuptime(&beio->ds_t0);
606 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0);
609 * UFS pays attention to IO_DIRECT for reads. If the
610 * DIRECTIO option is configured into the kernel, it calls
611 * ffs_rawread(). But that only works for single-segment
612 * uios with user space addresses. In our case, with a
613 * kernel uio, it still reads into the buffer cache, but it
614 * will just try to release the buffer from the cache later
617 * ZFS does not pay attention to IO_DIRECT for reads.
619 * UFS does not pay attention to IO_SYNC for reads.
621 * ZFS pays attention to IO_SYNC (which translates into the
622 * Solaris define FRSYNC for zfs_read()) for reads. It
623 * attempts to sync the file before reading.
625 * So, to attempt to provide some barrier semantics in the
626 * BIO_ORDERED case, set both IO_DIRECT and IO_SYNC.
628 error = VOP_READ(be_lun->vn, &xuio, (flags & BIO_ORDERED) ?
629 (IO_DIRECT|IO_SYNC) : 0, file_data->cred);
631 VOP_UNLOCK(be_lun->vn, 0);
633 struct mount *mountpoint;
636 (void)vn_start_write(be_lun->vn, &mountpoint, V_WAIT);
638 if (MNT_SHARED_WRITES(mountpoint)
639 || ((mountpoint == NULL)
640 && MNT_SHARED_WRITES(be_lun->vn->v_mount)))
641 lock_flags = LK_SHARED;
643 lock_flags = LK_EXCLUSIVE;
645 vn_lock(be_lun->vn, lock_flags | LK_RETRY);
647 binuptime(&beio->ds_t0);
648 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0);
651 * UFS pays attention to IO_DIRECT for writes. The write
652 * is done asynchronously. (Normally the write would just
653 * get put into cache.
655 * UFS pays attention to IO_SYNC for writes. It will
656 * attempt to write the buffer out synchronously if that
659 * ZFS does not pay attention to IO_DIRECT for writes.
661 * ZFS pays attention to IO_SYNC (a.k.a. FSYNC or FRSYNC)
662 * for writes. It will flush the transaction from the
663 * cache before returning.
665 * So if we've got the BIO_ORDERED flag set, we want
666 * IO_SYNC in either the UFS or ZFS case.
668 error = VOP_WRITE(be_lun->vn, &xuio, (flags & BIO_ORDERED) ?
669 IO_SYNC : 0, file_data->cred);
670 VOP_UNLOCK(be_lun->vn, 0);
672 vn_finished_write(mountpoint);
674 VFS_UNLOCK_GIANT(vfs_is_locked);
677 * If we got an error, set the sense data to "MEDIUM ERROR" and
678 * return the I/O to the user.
683 ctl_scsi_path_string(io, path_str, sizeof(path_str));
685 * XXX KDM ZFS returns ENOSPC when the underlying
686 * filesystem fills up. What kind of SCSI error should we
689 printf("%s%s command returned errno %d\n", path_str,
690 (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE", error);
691 ctl_set_medium_error(&io->scsiio);
692 ctl_complete_beio(beio);
697 * If this is a write, we're all done.
698 * If this is a read, we can now send the data to the user.
700 if (beio->bio_cmd == BIO_WRITE) {
701 ctl_set_success(&io->scsiio);
702 SDT_PROBE(cbb, kernel, write, file_done, 0, 0, 0, 0, 0);
703 ctl_complete_beio(beio);
705 SDT_PROBE(cbb, kernel, read, file_done, 0, 0, 0, 0, 0);
706 io->scsiio.be_move_done = ctl_be_block_move_done;
707 io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs;
708 io->scsiio.kern_data_len = beio->io_len;
709 io->scsiio.kern_total_len = beio->io_len;
710 io->scsiio.kern_rel_offset = 0;
711 io->scsiio.kern_data_resid = 0;
712 io->scsiio.kern_sg_entries = beio->num_segs;
713 io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST;
715 getbintime(&io->io_hdr.dma_start_bt);
722 ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun,
723 struct ctl_be_block_io *beio)
727 struct ctl_be_block_devdata *dev_data;
729 dev_data = &be_lun->backend.dev;
732 DPRINTF("entered\n");
734 /* This can't fail, it's a blocking allocation. */
737 bio->bio_cmd = BIO_FLUSH;
738 bio->bio_flags |= BIO_ORDERED;
739 bio->bio_dev = dev_data->cdev;
742 bio->bio_done = ctl_be_block_biodone;
743 bio->bio_caller1 = beio;
747 * We don't need to acquire the LUN lock here, because we are only
748 * sending one bio, and so there is no other context to synchronize
751 beio->num_bios_sent = 1;
752 beio->send_complete = 1;
754 binuptime(&beio->ds_t0);
755 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0);
757 (*dev_data->csw->d_strategy)(bio);
761 ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun,
762 struct ctl_be_block_io *beio)
766 struct ctl_be_block_devdata *dev_data;
770 DPRINTF("entered\n");
772 dev_data = &be_lun->backend.dev;
775 * We have to limit our I/O size to the maximum supported by the
776 * backend device. Hopefully it is MAXPHYS. If the driver doesn't
777 * set it properly, use DFLTPHYS.
779 max_iosize = dev_data->cdev->si_iosize_max;
780 if (max_iosize < PAGE_SIZE)
781 max_iosize = DFLTPHYS;
783 cur_offset = beio->io_offset;
786 * XXX KDM need to accurately reflect the number of I/Os outstanding
789 binuptime(&beio->ds_t0);
790 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0);
792 for (i = 0; i < beio->num_segs; i++) {
796 cur_size = beio->sg_segs[i].len;
797 cur_ptr = beio->sg_segs[i].addr;
799 while (cur_size > 0) {
800 /* This can't fail, it's a blocking allocation. */
803 KASSERT(bio != NULL, ("g_alloc_bio() failed!\n"));
805 bio->bio_cmd = beio->bio_cmd;
806 bio->bio_flags |= beio->bio_flags;
807 bio->bio_dev = dev_data->cdev;
808 bio->bio_caller1 = beio;
809 bio->bio_length = min(cur_size, max_iosize);
810 bio->bio_offset = cur_offset;
811 bio->bio_data = cur_ptr;
812 bio->bio_done = ctl_be_block_biodone;
813 bio->bio_pblkno = cur_offset / be_lun->blocksize;
815 cur_offset += bio->bio_length;
816 cur_ptr += bio->bio_length;
817 cur_size -= bio->bio_length;
820 * Make sure we set the complete bit just before we
821 * issue the last bio so we don't wind up with a
824 * Use the LUN mutex here instead of a combination
825 * of atomic variables for simplicity.
827 * XXX KDM we could have a per-IO lock, but that
828 * would cause additional per-IO setup and teardown
829 * overhead. Hopefully there won't be too much
830 * contention on the LUN lock.
832 mtx_lock(&be_lun->lock);
834 beio->num_bios_sent++;
836 if ((i == beio->num_segs - 1)
838 beio->send_complete = 1;
840 mtx_unlock(&be_lun->lock);
842 (*dev_data->csw->d_strategy)(bio);
848 ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun,
851 struct ctl_be_block_io *beio;
852 struct ctl_be_block_softc *softc;
854 DPRINTF("entered\n");
856 softc = be_lun->softc;
857 beio = ctl_alloc_beio(softc);
858 KASSERT(beio != NULL, ("ctl_alloc_beio() failed"));
863 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr = beio;
865 switch (io->scsiio.cdb[0]) {
866 case SYNCHRONIZE_CACHE:
867 case SYNCHRONIZE_CACHE_16:
868 beio->bio_cmd = BIO_FLUSH;
869 beio->ds_trans_type = DEVSTAT_NO_DATA;
870 beio->ds_tag_type = DEVSTAT_TAG_ORDERED;
872 be_lun->lun_flush(be_lun, beio);
875 panic("Unhandled CDB type %#x", io->scsiio.cdb[0]);
880 SDT_PROBE_DEFINE1(cbb, kernel, read, start, "uint64_t");
881 SDT_PROBE_DEFINE1(cbb, kernel, write, start, "uint64_t");
882 SDT_PROBE_DEFINE1(cbb, kernel, read, alloc_done, "uint64_t");
883 SDT_PROBE_DEFINE1(cbb, kernel, write, alloc_done, "uint64_t");
886 ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun,
889 struct ctl_be_block_io *beio;
890 struct ctl_be_block_softc *softc;
891 struct ctl_lba_len lbalen;
892 uint64_t len_left, io_size_bytes;
895 softc = be_lun->softc;
897 DPRINTF("entered\n");
899 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) {
900 SDT_PROBE(cbb, kernel, read, start, 0, 0, 0, 0, 0);
902 SDT_PROBE(cbb, kernel, write, start, 0, 0, 0, 0, 0);
905 memcpy(&lbalen, io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes,
908 io_size_bytes = lbalen.len * be_lun->blocksize;
911 * XXX KDM this is temporary, until we implement chaining of beio
912 * structures and multiple datamove calls to move all the data in
915 if (io_size_bytes > CTLBLK_MAX_IO_SIZE) {
916 printf("%s: IO length %ju > max io size %u\n", __func__,
917 io_size_bytes, CTLBLK_MAX_IO_SIZE);
918 ctl_set_invalid_field(&io->scsiio,
928 beio = ctl_alloc_beio(softc);
929 KASSERT(beio != NULL, ("ctl_alloc_beio() failed"));
934 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr = beio;
937 * If the I/O came down with an ordered or head of queue tag, set
938 * the BIO_ORDERED attribute. For head of queue tags, that's
939 * pretty much the best we can do.
941 * XXX KDM we don't have a great way to easily know about the FUA
942 * bit right now (it is decoded in ctl_read_write(), but we don't
943 * pass that knowledge to the backend), and in any case we would
944 * need to determine how to handle it.
946 if ((io->scsiio.tag_type == CTL_TAG_ORDERED)
947 || (io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE))
948 beio->bio_flags = BIO_ORDERED;
950 switch (io->scsiio.tag_type) {
951 case CTL_TAG_ORDERED:
952 beio->ds_tag_type = DEVSTAT_TAG_ORDERED;
954 case CTL_TAG_HEAD_OF_QUEUE:
955 beio->ds_tag_type = DEVSTAT_TAG_HEAD;
957 case CTL_TAG_UNTAGGED:
961 beio->ds_tag_type = DEVSTAT_TAG_SIMPLE;
966 * This path handles read and write only. The config write path
967 * handles flush operations.
969 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) {
970 beio->bio_cmd = BIO_READ;
971 beio->ds_trans_type = DEVSTAT_READ;
973 beio->bio_cmd = BIO_WRITE;
974 beio->ds_trans_type = DEVSTAT_WRITE;
977 beio->io_len = lbalen.len * be_lun->blocksize;
978 beio->io_offset = lbalen.lba * be_lun->blocksize;
980 DPRINTF("%s at LBA %jx len %u\n",
981 (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE",
982 (uintmax_t)lbalen.lba, lbalen.len);
984 for (i = 0, len_left = io_size_bytes; i < CTLBLK_MAX_SEGS &&
988 * Setup the S/G entry for this chunk.
990 beio->sg_segs[i].len = min(MAXPHYS, len_left);
991 beio->sg_segs[i].addr = uma_zalloc(be_lun->lun_zone, M_WAITOK);
993 DPRINTF("segment %d addr %p len %zd\n", i,
994 beio->sg_segs[i].addr, beio->sg_segs[i].len);
997 len_left -= beio->sg_segs[i].len;
1001 * For the read case, we need to read the data into our buffers and
1002 * then we can send it back to the user. For the write case, we
1003 * need to get the data from the user first.
1005 if (beio->bio_cmd == BIO_READ) {
1006 SDT_PROBE(cbb, kernel, read, alloc_done, 0, 0, 0, 0, 0);
1007 be_lun->dispatch(be_lun, beio);
1009 SDT_PROBE(cbb, kernel, write, alloc_done, 0, 0, 0, 0, 0);
1010 io->scsiio.be_move_done = ctl_be_block_move_done;
1011 io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs;
1012 io->scsiio.kern_data_len = beio->io_len;
1013 io->scsiio.kern_total_len = beio->io_len;
1014 io->scsiio.kern_rel_offset = 0;
1015 io->scsiio.kern_data_resid = 0;
1016 io->scsiio.kern_sg_entries = beio->num_segs;
1017 io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST;
1019 getbintime(&io->io_hdr.dma_start_bt);
1026 ctl_be_block_worker(void *context, int pending)
1028 struct ctl_be_block_lun *be_lun;
1029 struct ctl_be_block_softc *softc;
1032 be_lun = (struct ctl_be_block_lun *)context;
1033 softc = be_lun->softc;
1035 DPRINTF("entered\n");
1037 mtx_lock(&be_lun->lock);
1039 io = (union ctl_io *)STAILQ_FIRST(&be_lun->datamove_queue);
1041 struct ctl_be_block_io *beio;
1043 DPRINTF("datamove queue\n");
1045 STAILQ_REMOVE(&be_lun->datamove_queue, &io->io_hdr,
1048 mtx_unlock(&be_lun->lock);
1050 beio = (struct ctl_be_block_io *)
1051 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr;
1053 be_lun->dispatch(be_lun, beio);
1055 mtx_lock(&be_lun->lock);
1058 io = (union ctl_io *)STAILQ_FIRST(&be_lun->config_write_queue);
1061 DPRINTF("config write queue\n");
1063 STAILQ_REMOVE(&be_lun->config_write_queue, &io->io_hdr,
1066 mtx_unlock(&be_lun->lock);
1068 ctl_be_block_cw_dispatch(be_lun, io);
1070 mtx_lock(&be_lun->lock);
1073 io = (union ctl_io *)STAILQ_FIRST(&be_lun->input_queue);
1075 DPRINTF("input queue\n");
1077 STAILQ_REMOVE(&be_lun->input_queue, &io->io_hdr,
1079 mtx_unlock(&be_lun->lock);
1082 * We must drop the lock, since this routine and
1083 * its children may sleep.
1085 ctl_be_block_dispatch(be_lun, io);
1087 mtx_lock(&be_lun->lock);
1092 * If we get here, there is no work left in the queues, so
1093 * just break out and let the task queue go to sleep.
1097 mtx_unlock(&be_lun->lock);
1101 * Entry point from CTL to the backend for I/O. We queue everything to a
1102 * work thread, so this just puts the I/O on a queue and wakes up the
1106 ctl_be_block_submit(union ctl_io *io)
1108 struct ctl_be_block_lun *be_lun;
1109 struct ctl_be_lun *ctl_be_lun;
1112 DPRINTF("entered\n");
1114 retval = CTL_RETVAL_COMPLETE;
1116 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
1117 CTL_PRIV_BACKEND_LUN].ptr;
1118 be_lun = (struct ctl_be_block_lun *)ctl_be_lun->be_lun;
1121 * Make sure we only get SCSI I/O.
1123 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("Non-SCSI I/O (type "
1124 "%#x) encountered", io->io_hdr.io_type));
1126 mtx_lock(&be_lun->lock);
1128 * XXX KDM make sure that links is okay to use at this point.
1129 * Otherwise, we either need to add another field to ctl_io_hdr,
1130 * or deal with resource allocation here.
1132 STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links);
1133 mtx_unlock(&be_lun->lock);
1135 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task);
1141 ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr,
1142 int flag, struct thread *td)
1144 struct ctl_be_block_softc *softc;
1147 softc = &backend_block_softc;
1153 struct ctl_lun_req *lun_req;
1155 lun_req = (struct ctl_lun_req *)addr;
1157 switch (lun_req->reqtype) {
1158 case CTL_LUNREQ_CREATE:
1159 error = ctl_be_block_create(softc, lun_req);
1162 error = ctl_be_block_rm(softc, lun_req);
1164 case CTL_LUNREQ_MODIFY:
1165 error = ctl_be_block_modify(softc, lun_req);
1168 lun_req->status = CTL_LUN_ERROR;
1169 snprintf(lun_req->error_str, sizeof(lun_req->error_str),
1170 "%s: invalid LUN request type %d", __func__,
1185 ctl_be_block_open_file(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req)
1187 struct ctl_be_block_filedata *file_data;
1188 struct ctl_lun_create_params *params;
1193 file_data = &be_lun->backend.file;
1194 params = &req->reqdata.create;
1196 be_lun->dev_type = CTL_BE_BLOCK_FILE;
1197 be_lun->dispatch = ctl_be_block_dispatch_file;
1198 be_lun->lun_flush = ctl_be_block_flush_file;
1200 error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred);
1202 snprintf(req->error_str, sizeof(req->error_str),
1203 "error calling VOP_GETATTR() for file %s",
1209 * Verify that we have the ability to upgrade to exclusive
1210 * access on this file so we can trap errors at open instead
1211 * of reporting them during first access.
1213 if (VOP_ISLOCKED(be_lun->vn) != LK_EXCLUSIVE) {
1214 vn_lock(be_lun->vn, LK_UPGRADE | LK_RETRY);
1215 if (be_lun->vn->v_iflag & VI_DOOMED) {
1217 snprintf(req->error_str, sizeof(req->error_str),
1218 "error locking file %s", be_lun->dev_path);
1224 file_data->cred = crhold(curthread->td_ucred);
1225 if (params->lun_size_bytes != 0)
1226 be_lun->size_bytes = params->lun_size_bytes;
1228 be_lun->size_bytes = vattr.va_size;
1230 * We set the multi thread flag for file operations because all
1231 * filesystems (in theory) are capable of allowing multiple readers
1232 * of a file at once. So we want to get the maximum possible
1235 be_lun->flags |= CTL_BE_BLOCK_LUN_MULTI_THREAD;
1238 * XXX KDM vattr.va_blocksize may be larger than 512 bytes here.
1239 * With ZFS, it is 131072 bytes. Block sizes that large don't work
1240 * with disklabel and UFS on FreeBSD at least. Large block sizes
1241 * may not work with other OSes as well. So just export a sector
1242 * size of 512 bytes, which should work with any OS or
1243 * application. Since our backing is a file, any block size will
1244 * work fine for the backing store.
1247 be_lun->blocksize= vattr.va_blocksize;
1249 if (params->blocksize_bytes != 0)
1250 be_lun->blocksize = params->blocksize_bytes;
1252 be_lun->blocksize = 512;
1255 * Sanity check. The media size has to be at least one
1258 if (be_lun->size_bytes < be_lun->blocksize) {
1260 snprintf(req->error_str, sizeof(req->error_str),
1261 "file %s size %ju < block size %u", be_lun->dev_path,
1262 (uintmax_t)be_lun->size_bytes, be_lun->blocksize);
1268 ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req)
1270 struct ctl_lun_create_params *params;
1273 struct cdevsw *devsw;
1275 off_t ps, pss, po, pos;
1277 params = &req->reqdata.create;
1279 be_lun->dev_type = CTL_BE_BLOCK_DEV;
1280 be_lun->dispatch = ctl_be_block_dispatch_dev;
1281 be_lun->lun_flush = ctl_be_block_flush_dev;
1282 be_lun->backend.dev.cdev = be_lun->vn->v_rdev;
1283 be_lun->backend.dev.csw = dev_refthread(be_lun->backend.dev.cdev,
1284 &be_lun->backend.dev.dev_ref);
1285 if (be_lun->backend.dev.csw == NULL)
1286 panic("Unable to retrieve device switch");
1288 error = VOP_GETATTR(be_lun->vn, &vattr, NOCRED);
1290 snprintf(req->error_str, sizeof(req->error_str),
1291 "%s: error getting vnode attributes for device %s",
1292 __func__, be_lun->dev_path);
1296 dev = be_lun->vn->v_rdev;
1297 devsw = dev->si_devsw;
1298 if (!devsw->d_ioctl) {
1299 snprintf(req->error_str, sizeof(req->error_str),
1300 "%s: no d_ioctl for device %s!", __func__,
1305 error = devsw->d_ioctl(dev, DIOCGSECTORSIZE,
1306 (caddr_t)&be_lun->blocksize, FREAD,
1309 snprintf(req->error_str, sizeof(req->error_str),
1310 "%s: error %d returned for DIOCGSECTORSIZE ioctl "
1311 "on %s!", __func__, error, be_lun->dev_path);
1316 * If the user has asked for a blocksize that is greater than the
1317 * backing device's blocksize, we can do it only if the blocksize
1318 * the user is asking for is an even multiple of the underlying
1319 * device's blocksize.
1321 if ((params->blocksize_bytes != 0)
1322 && (params->blocksize_bytes > be_lun->blocksize)) {
1323 uint32_t bs_multiple, tmp_blocksize;
1325 bs_multiple = params->blocksize_bytes / be_lun->blocksize;
1327 tmp_blocksize = bs_multiple * be_lun->blocksize;
1329 if (tmp_blocksize == params->blocksize_bytes) {
1330 be_lun->blocksize = params->blocksize_bytes;
1332 snprintf(req->error_str, sizeof(req->error_str),
1333 "%s: requested blocksize %u is not an even "
1334 "multiple of backing device blocksize %u",
1335 __func__, params->blocksize_bytes,
1340 } else if ((params->blocksize_bytes != 0)
1341 && (params->blocksize_bytes != be_lun->blocksize)) {
1342 snprintf(req->error_str, sizeof(req->error_str),
1343 "%s: requested blocksize %u < backing device "
1344 "blocksize %u", __func__, params->blocksize_bytes,
1349 error = devsw->d_ioctl(dev, DIOCGMEDIASIZE,
1350 (caddr_t)&be_lun->size_bytes, FREAD,
1353 snprintf(req->error_str, sizeof(req->error_str),
1354 "%s: error %d returned for DIOCGMEDIASIZE "
1355 " ioctl on %s!", __func__, error,
1360 if (params->lun_size_bytes != 0) {
1361 if (params->lun_size_bytes > be_lun->size_bytes) {
1362 snprintf(req->error_str, sizeof(req->error_str),
1363 "%s: requested LUN size %ju > backing device "
1364 "size %ju", __func__,
1365 (uintmax_t)params->lun_size_bytes,
1366 (uintmax_t)be_lun->size_bytes);
1370 be_lun->size_bytes = params->lun_size_bytes;
1373 error = devsw->d_ioctl(dev, DIOCGSTRIPESIZE,
1374 (caddr_t)&ps, FREAD, curthread);
1378 error = devsw->d_ioctl(dev, DIOCGSTRIPEOFFSET,
1379 (caddr_t)&po, FREAD, curthread);
1383 pss = ps / be_lun->blocksize;
1384 pos = po / be_lun->blocksize;
1385 if ((pss > 0) && (pss * be_lun->blocksize == ps) && (pss >= pos) &&
1386 ((pss & (pss - 1)) == 0) && (pos * be_lun->blocksize == po)) {
1387 be_lun->pblockexp = fls(pss) - 1;
1388 be_lun->pblockoff = (pss - pos) % pss;
1395 ctl_be_block_close(struct ctl_be_block_lun *be_lun)
1399 int flags = FREAD | FWRITE;
1400 int vfs_is_locked = 0;
1402 switch (be_lun->dev_type) {
1403 case CTL_BE_BLOCK_DEV:
1404 if (be_lun->backend.dev.csw) {
1405 dev_relthread(be_lun->backend.dev.cdev,
1406 be_lun->backend.dev.dev_ref);
1407 be_lun->backend.dev.csw = NULL;
1408 be_lun->backend.dev.cdev = NULL;
1411 case CTL_BE_BLOCK_FILE:
1412 vfs_is_locked = VFS_LOCK_GIANT(be_lun->vn->v_mount);
1414 case CTL_BE_BLOCK_NONE:
1417 panic("Unexpected backend type.");
1421 (void)vn_close(be_lun->vn, flags, NOCRED, curthread);
1424 switch (be_lun->dev_type) {
1425 case CTL_BE_BLOCK_DEV:
1427 case CTL_BE_BLOCK_FILE:
1428 VFS_UNLOCK_GIANT(vfs_is_locked);
1429 if (be_lun->backend.file.cred != NULL) {
1430 crfree(be_lun->backend.file.cred);
1431 be_lun->backend.file.cred = NULL;
1434 case CTL_BE_BLOCK_NONE:
1437 panic("Unexpected backend type.");
1447 ctl_be_block_open(struct ctl_be_block_softc *softc,
1448 struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req)
1450 struct nameidata nd;
1456 * XXX KDM allow a read-only option?
1458 flags = FREAD | FWRITE;
1461 if (rootvnode == NULL) {
1462 snprintf(req->error_str, sizeof(req->error_str),
1463 "%s: Root filesystem is not mounted", __func__);
1467 if (!curthread->td_proc->p_fd->fd_cdir) {
1468 curthread->td_proc->p_fd->fd_cdir = rootvnode;
1471 if (!curthread->td_proc->p_fd->fd_rdir) {
1472 curthread->td_proc->p_fd->fd_rdir = rootvnode;
1475 if (!curthread->td_proc->p_fd->fd_jdir) {
1476 curthread->td_proc->p_fd->fd_jdir = rootvnode;
1481 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, be_lun->dev_path, curthread);
1482 error = vn_open(&nd, &flags, 0, NULL);
1485 * This is the only reasonable guess we can make as far as
1486 * path if the user doesn't give us a fully qualified path.
1487 * If they want to specify a file, they need to specify the
1490 if (be_lun->dev_path[0] != '/') {
1491 char *dev_path = "/dev/";
1494 /* Try adding device path at beginning of name */
1495 dev_name = malloc(strlen(be_lun->dev_path)
1496 + strlen(dev_path) + 1,
1497 M_CTLBLK, M_WAITOK);
1499 sprintf(dev_name, "%s%s", dev_path,
1501 free(be_lun->dev_path, M_CTLBLK);
1502 be_lun->dev_path = dev_name;
1506 snprintf(req->error_str, sizeof(req->error_str),
1507 "%s: error opening %s", __func__, be_lun->dev_path);
1511 vfs_is_locked = NDHASGIANT(&nd);
1513 NDFREE(&nd, NDF_ONLY_PNBUF);
1515 be_lun->vn = nd.ni_vp;
1517 /* We only support disks and files. */
1518 if (vn_isdisk(be_lun->vn, &error)) {
1519 error = ctl_be_block_open_dev(be_lun, req);
1520 } else if (be_lun->vn->v_type == VREG) {
1521 error = ctl_be_block_open_file(be_lun, req);
1524 snprintf(req->error_str, sizeof(req->error_str),
1525 "%s is not a disk or plain file", be_lun->dev_path);
1527 VOP_UNLOCK(be_lun->vn, 0);
1528 VFS_UNLOCK_GIANT(vfs_is_locked);
1531 ctl_be_block_close(be_lun);
1535 be_lun->blocksize_shift = fls(be_lun->blocksize) - 1;
1536 be_lun->size_blocks = be_lun->size_bytes >> be_lun->blocksize_shift;
1542 ctl_be_block_create(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
1544 struct ctl_be_block_lun *be_lun;
1545 struct ctl_lun_create_params *params;
1546 struct ctl_be_arg *file_arg;
1548 int retval, num_threads;
1551 params = &req->reqdata.create;
1554 num_threads = cbb_num_threads;
1558 be_lun = malloc(sizeof(*be_lun), M_CTLBLK, M_ZERO | M_WAITOK);
1560 be_lun->softc = softc;
1561 STAILQ_INIT(&be_lun->input_queue);
1562 STAILQ_INIT(&be_lun->config_write_queue);
1563 STAILQ_INIT(&be_lun->datamove_queue);
1564 sprintf(be_lun->lunname, "cblk%d", softc->num_luns);
1565 mtx_init(&be_lun->lock, be_lun->lunname, NULL, MTX_DEF);
1567 be_lun->lun_zone = uma_zcreate(be_lun->lunname, MAXPHYS,
1568 NULL, NULL, NULL, NULL, /*align*/ 0, /*flags*/0);
1570 if (be_lun->lun_zone == NULL) {
1571 snprintf(req->error_str, sizeof(req->error_str),
1572 "%s: error allocating UMA zone", __func__);
1576 if (params->flags & CTL_LUN_FLAG_DEV_TYPE)
1577 be_lun->ctl_be_lun.lun_type = params->device_type;
1579 be_lun->ctl_be_lun.lun_type = T_DIRECT;
1581 if (be_lun->ctl_be_lun.lun_type == T_DIRECT) {
1582 for (i = 0; i < req->num_be_args; i++) {
1583 if (strcmp(req->kern_be_args[i].kname, "file") == 0) {
1584 file_arg = &req->kern_be_args[i];
1589 if (file_arg == NULL) {
1590 snprintf(req->error_str, sizeof(req->error_str),
1591 "%s: no file argument specified", __func__);
1595 be_lun->dev_path = malloc(file_arg->vallen, M_CTLBLK,
1598 strlcpy(be_lun->dev_path, (char *)file_arg->kvalue,
1601 retval = ctl_be_block_open(softc, be_lun, req);
1608 * Tell the user the size of the file/device.
1610 params->lun_size_bytes = be_lun->size_bytes;
1613 * The maximum LBA is the size - 1.
1615 be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1;
1618 * For processor devices, we don't have any size.
1620 be_lun->blocksize = 0;
1621 be_lun->pblockexp = 0;
1622 be_lun->pblockoff = 0;
1623 be_lun->size_blocks = 0;
1624 be_lun->size_bytes = 0;
1625 be_lun->ctl_be_lun.maxlba = 0;
1626 params->lun_size_bytes = 0;
1629 * Default to just 1 thread for processor devices.
1635 * XXX This searching loop might be refactored to be combined with
1638 for (i = 0; i < req->num_be_args; i++) {
1639 if (strcmp(req->kern_be_args[i].kname, "num_threads") == 0) {
1640 struct ctl_be_arg *thread_arg;
1641 char num_thread_str[16];
1642 int tmp_num_threads;
1645 thread_arg = &req->kern_be_args[i];
1647 strlcpy(num_thread_str, (char *)thread_arg->kvalue,
1648 min(thread_arg->vallen,
1649 sizeof(num_thread_str)));
1651 tmp_num_threads = strtol(num_thread_str, NULL, 0);
1654 * We don't let the user specify less than one
1655 * thread, but hope he's clueful enough not to
1656 * specify 1000 threads.
1658 if (tmp_num_threads < 1) {
1659 snprintf(req->error_str, sizeof(req->error_str),
1660 "%s: invalid number of threads %s",
1661 __func__, num_thread_str);
1665 num_threads = tmp_num_threads;
1669 be_lun->flags = CTL_BE_BLOCK_LUN_UNCONFIGURED;
1670 be_lun->ctl_be_lun.flags = CTL_LUN_FLAG_PRIMARY;
1671 be_lun->ctl_be_lun.be_lun = be_lun;
1672 be_lun->ctl_be_lun.blocksize = be_lun->blocksize;
1673 be_lun->ctl_be_lun.pblockexp = be_lun->pblockexp;
1674 be_lun->ctl_be_lun.pblockoff = be_lun->pblockoff;
1675 /* Tell the user the blocksize we ended up using */
1676 params->blocksize_bytes = be_lun->blocksize;
1677 if (params->flags & CTL_LUN_FLAG_ID_REQ) {
1678 be_lun->ctl_be_lun.req_lun_id = params->req_lun_id;
1679 be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_ID_REQ;
1681 be_lun->ctl_be_lun.req_lun_id = 0;
1683 be_lun->ctl_be_lun.lun_shutdown = ctl_be_block_lun_shutdown;
1684 be_lun->ctl_be_lun.lun_config_status =
1685 ctl_be_block_lun_config_status;
1686 be_lun->ctl_be_lun.be = &ctl_be_block_driver;
1688 if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) {
1689 snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%4d",
1691 strncpy((char *)be_lun->ctl_be_lun.serial_num, tmpstr,
1692 ctl_min(sizeof(be_lun->ctl_be_lun.serial_num),
1695 /* Tell the user what we used for a serial number */
1696 strncpy((char *)params->serial_num, tmpstr,
1697 ctl_min(sizeof(params->serial_num), sizeof(tmpstr)));
1699 strncpy((char *)be_lun->ctl_be_lun.serial_num,
1701 ctl_min(sizeof(be_lun->ctl_be_lun.serial_num),
1702 sizeof(params->serial_num)));
1704 if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) {
1705 snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%4d", softc->num_luns);
1706 strncpy((char *)be_lun->ctl_be_lun.device_id, tmpstr,
1707 ctl_min(sizeof(be_lun->ctl_be_lun.device_id),
1710 /* Tell the user what we used for a device ID */
1711 strncpy((char *)params->device_id, tmpstr,
1712 ctl_min(sizeof(params->device_id), sizeof(tmpstr)));
1714 strncpy((char *)be_lun->ctl_be_lun.device_id,
1716 ctl_min(sizeof(be_lun->ctl_be_lun.device_id),
1717 sizeof(params->device_id)));
1720 TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_be_block_worker, be_lun);
1722 be_lun->io_taskqueue = taskqueue_create(be_lun->lunname, M_WAITOK,
1723 taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue);
1725 if (be_lun->io_taskqueue == NULL) {
1726 snprintf(req->error_str, sizeof(req->error_str),
1727 "%s: Unable to create taskqueue", __func__);
1732 * Note that we start the same number of threads by default for
1733 * both the file case and the block device case. For the file
1734 * case, we need multiple threads to allow concurrency, because the
1735 * vnode interface is designed to be a blocking interface. For the
1736 * block device case, ZFS zvols at least will block the caller's
1737 * context in many instances, and so we need multiple threads to
1738 * overcome that problem. Other block devices don't need as many
1739 * threads, but they shouldn't cause too many problems.
1741 * If the user wants to just have a single thread for a block
1742 * device, he can specify that when the LUN is created, or change
1743 * the tunable/sysctl to alter the default number of threads.
1745 retval = taskqueue_start_threads(&be_lun->io_taskqueue,
1746 /*num threads*/num_threads,
1749 "%s taskq", be_lun->lunname);
1754 be_lun->num_threads = num_threads;
1756 mtx_lock(&softc->lock);
1758 STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links);
1760 mtx_unlock(&softc->lock);
1762 retval = ctl_add_lun(&be_lun->ctl_be_lun);
1764 mtx_lock(&softc->lock);
1765 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun,
1768 mtx_unlock(&softc->lock);
1769 snprintf(req->error_str, sizeof(req->error_str),
1770 "%s: ctl_add_lun() returned error %d, see dmesg for "
1771 "details", __func__, retval);
1776 mtx_lock(&softc->lock);
1779 * Tell the config_status routine that we're waiting so it won't
1780 * clean up the LUN in the event of an error.
1782 be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING;
1784 while (be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) {
1785 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblk", 0);
1786 if (retval == EINTR)
1789 be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING;
1791 if (be_lun->flags & CTL_BE_BLOCK_LUN_CONFIG_ERR) {
1792 snprintf(req->error_str, sizeof(req->error_str),
1793 "%s: LUN configuration error, see dmesg for details",
1795 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun,
1798 mtx_unlock(&softc->lock);
1801 params->req_lun_id = be_lun->ctl_be_lun.lun_id;
1804 mtx_unlock(&softc->lock);
1806 be_lun->disk_stats = devstat_new_entry("cbb", params->req_lun_id,
1808 DEVSTAT_ALL_SUPPORTED,
1809 be_lun->ctl_be_lun.lun_type
1810 | DEVSTAT_TYPE_IF_OTHER,
1811 DEVSTAT_PRIORITY_OTHER);
1814 req->status = CTL_LUN_OK;
1819 req->status = CTL_LUN_ERROR;
1821 ctl_be_block_close(be_lun);
1823 free(be_lun->dev_path, M_CTLBLK);
1824 free(be_lun, M_CTLBLK);
1830 ctl_be_block_rm(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
1832 struct ctl_lun_rm_params *params;
1833 struct ctl_be_block_lun *be_lun;
1836 params = &req->reqdata.rm;
1838 mtx_lock(&softc->lock);
1842 STAILQ_FOREACH(be_lun, &softc->lun_list, links) {
1843 if (be_lun->ctl_be_lun.lun_id == params->lun_id)
1846 mtx_unlock(&softc->lock);
1848 if (be_lun == NULL) {
1849 snprintf(req->error_str, sizeof(req->error_str),
1850 "%s: LUN %u is not managed by the block backend",
1851 __func__, params->lun_id);
1855 retval = ctl_disable_lun(&be_lun->ctl_be_lun);
1858 snprintf(req->error_str, sizeof(req->error_str),
1859 "%s: error %d returned from ctl_disable_lun() for "
1860 "LUN %d", __func__, retval, params->lun_id);
1865 retval = ctl_invalidate_lun(&be_lun->ctl_be_lun);
1867 snprintf(req->error_str, sizeof(req->error_str),
1868 "%s: error %d returned from ctl_invalidate_lun() for "
1869 "LUN %d", __func__, retval, params->lun_id);
1873 mtx_lock(&softc->lock);
1875 be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING;
1877 while ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) {
1878 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblk", 0);
1879 if (retval == EINTR)
1883 be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING;
1885 if ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) {
1886 snprintf(req->error_str, sizeof(req->error_str),
1887 "%s: interrupted waiting for LUN to be freed",
1889 mtx_unlock(&softc->lock);
1893 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, links);
1896 mtx_unlock(&softc->lock);
1898 taskqueue_drain(be_lun->io_taskqueue, &be_lun->io_task);
1900 taskqueue_free(be_lun->io_taskqueue);
1902 ctl_be_block_close(be_lun);
1904 if (be_lun->disk_stats != NULL)
1905 devstat_remove_entry(be_lun->disk_stats);
1907 uma_zdestroy(be_lun->lun_zone);
1909 free(be_lun->dev_path, M_CTLBLK);
1911 free(be_lun, M_CTLBLK);
1913 req->status = CTL_LUN_OK;
1919 req->status = CTL_LUN_ERROR;
1925 ctl_be_block_modify_file(struct ctl_be_block_lun *be_lun,
1926 struct ctl_lun_req *req)
1930 struct ctl_lun_modify_params *params;
1932 params = &req->reqdata.modify;
1934 if (params->lun_size_bytes != 0) {
1935 be_lun->size_bytes = params->lun_size_bytes;
1937 error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred);
1939 snprintf(req->error_str, sizeof(req->error_str),
1940 "error calling VOP_GETATTR() for file %s",
1945 be_lun->size_bytes = vattr.va_size;
1952 ctl_be_block_modify_dev(struct ctl_be_block_lun *be_lun,
1953 struct ctl_lun_req *req)
1956 struct cdevsw *devsw;
1958 struct ctl_lun_modify_params *params;
1959 uint64_t size_bytes;
1961 params = &req->reqdata.modify;
1963 dev = be_lun->vn->v_rdev;
1964 devsw = dev->si_devsw;
1965 if (!devsw->d_ioctl) {
1966 snprintf(req->error_str, sizeof(req->error_str),
1967 "%s: no d_ioctl for device %s!", __func__,
1972 error = devsw->d_ioctl(dev, DIOCGMEDIASIZE,
1973 (caddr_t)&size_bytes, FREAD,
1976 snprintf(req->error_str, sizeof(req->error_str),
1977 "%s: error %d returned for DIOCGMEDIASIZE ioctl "
1978 "on %s!", __func__, error, be_lun->dev_path);
1982 if (params->lun_size_bytes != 0) {
1983 if (params->lun_size_bytes > size_bytes) {
1984 snprintf(req->error_str, sizeof(req->error_str),
1985 "%s: requested LUN size %ju > backing device "
1986 "size %ju", __func__,
1987 (uintmax_t)params->lun_size_bytes,
1988 (uintmax_t)size_bytes);
1992 be_lun->size_bytes = params->lun_size_bytes;
1994 be_lun->size_bytes = size_bytes;
2001 ctl_be_block_modify(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
2003 struct ctl_lun_modify_params *params;
2004 struct ctl_be_block_lun *be_lun;
2005 int vfs_is_locked, error;
2007 params = &req->reqdata.modify;
2009 mtx_lock(&softc->lock);
2013 STAILQ_FOREACH(be_lun, &softc->lun_list, links) {
2014 if (be_lun->ctl_be_lun.lun_id == params->lun_id)
2017 mtx_unlock(&softc->lock);
2019 if (be_lun == NULL) {
2020 snprintf(req->error_str, sizeof(req->error_str),
2021 "%s: LUN %u is not managed by the block backend",
2022 __func__, params->lun_id);
2026 if (params->lun_size_bytes != 0) {
2027 if (params->lun_size_bytes < be_lun->blocksize) {
2028 snprintf(req->error_str, sizeof(req->error_str),
2029 "%s: LUN size %ju < blocksize %u", __func__,
2030 params->lun_size_bytes, be_lun->blocksize);
2035 vfs_is_locked = VFS_LOCK_GIANT(be_lun->vn->v_mount);
2036 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY);
2038 if (be_lun->vn->v_type == VREG)
2039 error = ctl_be_block_modify_file(be_lun, req);
2041 error = ctl_be_block_modify_dev(be_lun, req);
2043 VOP_UNLOCK(be_lun->vn, 0);
2044 VFS_UNLOCK_GIANT(vfs_is_locked);
2049 be_lun->size_blocks = be_lun->size_bytes >> be_lun->blocksize_shift;
2052 * The maximum LBA is the size - 1.
2054 * XXX: Note that this field is being updated without locking,
2055 * which might cause problems on 32-bit architectures.
2057 be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1;
2058 ctl_lun_capacity_changed(&be_lun->ctl_be_lun);
2060 /* Tell the user the exact size we ended up using */
2061 params->lun_size_bytes = be_lun->size_bytes;
2063 req->status = CTL_LUN_OK;
2068 req->status = CTL_LUN_ERROR;
2074 ctl_be_block_lun_shutdown(void *be_lun)
2076 struct ctl_be_block_lun *lun;
2077 struct ctl_be_block_softc *softc;
2079 lun = (struct ctl_be_block_lun *)be_lun;
2083 mtx_lock(&softc->lock);
2084 lun->flags |= CTL_BE_BLOCK_LUN_UNCONFIGURED;
2085 if (lun->flags & CTL_BE_BLOCK_LUN_WAITING)
2087 mtx_unlock(&softc->lock);
2092 ctl_be_block_lun_config_status(void *be_lun, ctl_lun_config_status status)
2094 struct ctl_be_block_lun *lun;
2095 struct ctl_be_block_softc *softc;
2097 lun = (struct ctl_be_block_lun *)be_lun;
2100 if (status == CTL_LUN_CONFIG_OK) {
2101 mtx_lock(&softc->lock);
2102 lun->flags &= ~CTL_BE_BLOCK_LUN_UNCONFIGURED;
2103 if (lun->flags & CTL_BE_BLOCK_LUN_WAITING)
2105 mtx_unlock(&softc->lock);
2108 * We successfully added the LUN, attempt to enable it.
2110 if (ctl_enable_lun(&lun->ctl_be_lun) != 0) {
2111 printf("%s: ctl_enable_lun() failed!\n", __func__);
2112 if (ctl_invalidate_lun(&lun->ctl_be_lun) != 0) {
2113 printf("%s: ctl_invalidate_lun() failed!\n",
2122 mtx_lock(&softc->lock);
2123 lun->flags &= ~CTL_BE_BLOCK_LUN_UNCONFIGURED;
2124 lun->flags |= CTL_BE_BLOCK_LUN_CONFIG_ERR;
2126 mtx_unlock(&softc->lock);
2131 ctl_be_block_config_write(union ctl_io *io)
2133 struct ctl_be_block_lun *be_lun;
2134 struct ctl_be_lun *ctl_be_lun;
2139 DPRINTF("entered\n");
2141 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
2142 CTL_PRIV_BACKEND_LUN].ptr;
2143 be_lun = (struct ctl_be_block_lun *)ctl_be_lun->be_lun;
2145 switch (io->scsiio.cdb[0]) {
2146 case SYNCHRONIZE_CACHE:
2147 case SYNCHRONIZE_CACHE_16:
2149 * The upper level CTL code will filter out any CDBs with
2150 * the immediate bit set and return the proper error.
2152 * We don't really need to worry about what LBA range the
2153 * user asked to be synced out. When they issue a sync
2154 * cache command, we'll sync out the whole thing.
2156 mtx_lock(&be_lun->lock);
2157 STAILQ_INSERT_TAIL(&be_lun->config_write_queue, &io->io_hdr,
2159 mtx_unlock(&be_lun->lock);
2160 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task);
2162 case START_STOP_UNIT: {
2163 struct scsi_start_stop_unit *cdb;
2165 cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb;
2167 if (cdb->how & SSS_START)
2168 retval = ctl_start_lun(ctl_be_lun);
2170 retval = ctl_stop_lun(ctl_be_lun);
2172 * XXX KDM Copan-specific offline behavior.
2173 * Figure out a reasonable way to port this?
2177 && (cdb->byte2 & SSS_ONOFFLINE))
2178 retval = ctl_lun_offline(ctl_be_lun);
2183 * In general, the above routines should not fail. They
2184 * just set state for the LUN. So we've got something
2185 * pretty wrong here if we can't start or stop the LUN.
2188 ctl_set_internal_failure(&io->scsiio,
2190 /*retry_count*/ 0xf051);
2191 retval = CTL_RETVAL_COMPLETE;
2193 ctl_set_success(&io->scsiio);
2195 ctl_config_write_done(io);
2199 ctl_set_invalid_opcode(&io->scsiio);
2200 ctl_config_write_done(io);
2201 retval = CTL_RETVAL_COMPLETE;
2210 ctl_be_block_config_read(union ctl_io *io)
2216 ctl_be_block_lun_info(void *be_lun, struct sbuf *sb)
2218 struct ctl_be_block_lun *lun;
2221 lun = (struct ctl_be_block_lun *)be_lun;
2224 retval = sbuf_printf(sb, "<num_threads>");
2229 retval = sbuf_printf(sb, "%d", lun->num_threads);
2234 retval = sbuf_printf(sb, "</num_threads>");
2237 * For processor devices, we don't have a path variable.
2240 || (lun->dev_path == NULL))
2243 retval = sbuf_printf(sb, "<file>");
2248 retval = ctl_sbuf_printf_esc(sb, lun->dev_path);
2253 retval = sbuf_printf(sb, "</file>\n");
2261 ctl_be_block_init(void)
2263 struct ctl_be_block_softc *softc;
2266 softc = &backend_block_softc;
2269 mtx_init(&softc->lock, "ctlblk", NULL, MTX_DEF);
2270 beio_zone = uma_zcreate("beio", sizeof(struct ctl_be_block_io),
2271 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
2272 STAILQ_INIT(&softc->disk_list);
2273 STAILQ_INIT(&softc->lun_list);