2 * Copyright (c) 2003 Silicon Graphics International Corp.
3 * Copyright (c) 2009-2011 Spectra Logic Corporation
4 * Copyright (c) 2012 The FreeBSD Foundation
7 * Portions of this software were developed by Edward Tomasz Napierala
8 * under sponsorship from the FreeBSD Foundation.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions, and the following disclaimer,
15 * without modification.
16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
17 * substantially similar to the "NO WARRANTY" disclaimer below
18 * ("Disclaimer") and any redistribution must be conditioned upon
19 * including a substantially similar Disclaimer requirement for further
20 * binary redistribution.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
32 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGES.
35 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_block.c#5 $
38 * CAM Target Layer driver backend for block devices.
40 * Author: Ken Merry <ken@FreeBSD.org>
42 #include <sys/cdefs.h>
43 __FBSDID("$FreeBSD$");
45 #include <opt_kdtrace.h>
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/types.h>
51 #include <sys/kthread.h>
53 #include <sys/fcntl.h>
55 #include <sys/mutex.h>
56 #include <sys/condvar.h>
57 #include <sys/malloc.h>
59 #include <sys/ioccom.h>
60 #include <sys/queue.h>
62 #include <sys/endian.h>
65 #include <sys/taskqueue.h>
66 #include <sys/vnode.h>
67 #include <sys/namei.h>
68 #include <sys/mount.h>
70 #include <sys/fcntl.h>
71 #include <sys/filedesc.h>
74 #include <sys/module.h>
76 #include <sys/devicestat.h>
77 #include <sys/sysctl.h>
79 #include <geom/geom.h>
82 #include <cam/scsi/scsi_all.h>
83 #include <cam/scsi/scsi_da.h>
84 #include <cam/ctl/ctl_io.h>
85 #include <cam/ctl/ctl.h>
86 #include <cam/ctl/ctl_backend.h>
87 #include <cam/ctl/ctl_frontend_internal.h>
88 #include <cam/ctl/ctl_ioctl.h>
89 #include <cam/ctl/ctl_scsi_all.h>
90 #include <cam/ctl/ctl_error.h>
93 * The idea here is that we'll allocate enough S/G space to hold a 16MB
94 * I/O. If we get an I/O larger than that, we'll reject it.
96 #define CTLBLK_MAX_IO_SIZE (16 * 1024 * 1024)
97 #define CTLBLK_MAX_SEGS (CTLBLK_MAX_IO_SIZE / MAXPHYS) + 1
100 #define DPRINTF(fmt, args...) \
101 printf("cbb(%s:%d): " fmt, __FUNCTION__, __LINE__, ##args)
103 #define DPRINTF(fmt, args...) do {} while(0)
106 SDT_PROVIDER_DEFINE(cbb);
109 CTL_BE_BLOCK_LUN_UNCONFIGURED = 0x01,
110 CTL_BE_BLOCK_LUN_CONFIG_ERR = 0x02,
111 CTL_BE_BLOCK_LUN_WAITING = 0x04,
112 CTL_BE_BLOCK_LUN_MULTI_THREAD = 0x08
113 } ctl_be_block_lun_flags;
121 struct ctl_be_block_devdata {
127 struct ctl_be_block_filedata {
131 union ctl_be_block_bedata {
132 struct ctl_be_block_devdata dev;
133 struct ctl_be_block_filedata file;
136 struct ctl_be_block_io;
137 struct ctl_be_block_lun;
139 typedef void (*cbb_dispatch_t)(struct ctl_be_block_lun *be_lun,
140 struct ctl_be_block_io *beio);
143 * Backend LUN structure. There is a 1:1 mapping between a block device
144 * and a backend block LUN, and between a backend block LUN and a CTL LUN.
146 struct ctl_be_block_lun {
147 struct ctl_block_disk *disk;
150 ctl_be_block_type dev_type;
152 union ctl_be_block_bedata backend;
153 cbb_dispatch_t dispatch;
154 cbb_dispatch_t lun_flush;
157 uint64_t size_blocks;
161 struct ctl_be_block_softc *softc;
162 struct devstat *disk_stats;
163 ctl_be_block_lun_flags flags;
164 STAILQ_ENTRY(ctl_be_block_lun) links;
165 struct ctl_be_lun ctl_be_lun;
166 struct taskqueue *io_taskqueue;
169 STAILQ_HEAD(, ctl_io_hdr) input_queue;
170 STAILQ_HEAD(, ctl_io_hdr) config_write_queue;
171 STAILQ_HEAD(, ctl_io_hdr) datamove_queue;
175 * Overall softc structure for the block backend module.
177 struct ctl_be_block_softc {
178 STAILQ_HEAD(, ctl_be_block_io) beio_free_queue;
182 STAILQ_HEAD(, ctl_block_disk) disk_list;
184 STAILQ_HEAD(, ctl_be_block_lun) lun_list;
187 static struct ctl_be_block_softc backend_block_softc;
190 * Per-I/O information.
192 struct ctl_be_block_io {
194 struct ctl_sg_entry sg_segs[CTLBLK_MAX_SEGS];
195 struct iovec xiovecs[CTLBLK_MAX_SEGS];
203 struct bintime ds_t0;
204 devstat_tag_type ds_tag_type;
205 devstat_trans_flags ds_trans_type;
208 struct ctl_be_block_softc *softc;
209 struct ctl_be_block_lun *lun;
210 STAILQ_ENTRY(ctl_be_block_io) links;
213 static int cbb_num_threads = 14;
214 TUNABLE_INT("kern.cam.ctl.block.num_threads", &cbb_num_threads);
215 SYSCTL_NODE(_kern_cam_ctl, OID_AUTO, block, CTLFLAG_RD, 0,
216 "CAM Target Layer Block Backend");
217 SYSCTL_INT(_kern_cam_ctl_block, OID_AUTO, num_threads, CTLFLAG_RW,
218 &cbb_num_threads, 0, "Number of threads per backing file");
220 static struct ctl_be_block_io *ctl_alloc_beio(struct ctl_be_block_softc *softc);
221 static void ctl_free_beio(struct ctl_be_block_io *beio);
222 static int ctl_grow_beio(struct ctl_be_block_softc *softc, int count);
224 static void ctl_shrink_beio(struct ctl_be_block_softc *softc);
226 static void ctl_complete_beio(struct ctl_be_block_io *beio);
227 static int ctl_be_block_move_done(union ctl_io *io);
228 static void ctl_be_block_biodone(struct bio *bio);
229 static void ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun,
230 struct ctl_be_block_io *beio);
231 static void ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun,
232 struct ctl_be_block_io *beio);
233 static void ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun,
234 struct ctl_be_block_io *beio);
235 static void ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun,
236 struct ctl_be_block_io *beio);
237 static void ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun,
239 static void ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun,
241 static void ctl_be_block_worker(void *context, int pending);
242 static int ctl_be_block_submit(union ctl_io *io);
243 static int ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr,
244 int flag, struct thread *td);
245 static int ctl_be_block_open_file(struct ctl_be_block_lun *be_lun,
246 struct ctl_lun_req *req);
247 static int ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun,
248 struct ctl_lun_req *req);
249 static int ctl_be_block_close(struct ctl_be_block_lun *be_lun);
250 static int ctl_be_block_open(struct ctl_be_block_softc *softc,
251 struct ctl_be_block_lun *be_lun,
252 struct ctl_lun_req *req);
253 static int ctl_be_block_create(struct ctl_be_block_softc *softc,
254 struct ctl_lun_req *req);
255 static int ctl_be_block_rm(struct ctl_be_block_softc *softc,
256 struct ctl_lun_req *req);
257 static int ctl_be_block_modify_file(struct ctl_be_block_lun *be_lun,
258 struct ctl_lun_req *req);
259 static int ctl_be_block_modify_dev(struct ctl_be_block_lun *be_lun,
260 struct ctl_lun_req *req);
261 static int ctl_be_block_modify(struct ctl_be_block_softc *softc,
262 struct ctl_lun_req *req);
263 static void ctl_be_block_lun_shutdown(void *be_lun);
264 static void ctl_be_block_lun_config_status(void *be_lun,
265 ctl_lun_config_status status);
266 static int ctl_be_block_config_write(union ctl_io *io);
267 static int ctl_be_block_config_read(union ctl_io *io);
268 static int ctl_be_block_lun_info(void *be_lun, struct sbuf *sb);
269 int ctl_be_block_init(void);
271 static struct ctl_backend_driver ctl_be_block_driver =
274 .flags = CTL_BE_FLAG_HAS_CONFIG,
275 .init = ctl_be_block_init,
276 .data_submit = ctl_be_block_submit,
277 .data_move_done = ctl_be_block_move_done,
278 .config_read = ctl_be_block_config_read,
279 .config_write = ctl_be_block_config_write,
280 .ioctl = ctl_be_block_ioctl,
281 .lun_info = ctl_be_block_lun_info
284 MALLOC_DEFINE(M_CTLBLK, "ctlblk", "Memory used for CTL block backend");
285 CTL_BACKEND_DECLARE(cbb, ctl_be_block_driver);
287 static struct ctl_be_block_io *
288 ctl_alloc_beio(struct ctl_be_block_softc *softc)
290 struct ctl_be_block_io *beio;
293 mtx_lock(&softc->lock);
295 beio = STAILQ_FIRST(&softc->beio_free_queue);
297 STAILQ_REMOVE(&softc->beio_free_queue, beio,
298 ctl_be_block_io, links);
300 mtx_unlock(&softc->lock);
303 bzero(beio, sizeof(*beio));
310 count = ctl_grow_beio(softc, /*count*/ 10);
313 * This shouldn't be possible, since ctl_grow_beio() uses a
320 * Since we have to drop the lock when we're allocating beio
321 * structures, it's possible someone else can come along and
322 * allocate the beio's we've just allocated.
324 mtx_lock(&softc->lock);
325 beio = STAILQ_FIRST(&softc->beio_free_queue);
327 STAILQ_REMOVE(&softc->beio_free_queue, beio,
328 ctl_be_block_io, links);
330 mtx_unlock(&softc->lock);
333 bzero(beio, sizeof(*beio));
342 ctl_free_beio(struct ctl_be_block_io *beio)
344 struct ctl_be_block_softc *softc;
351 for (i = 0; i < beio->num_segs; i++) {
352 if (beio->sg_segs[i].addr == NULL)
355 uma_zfree(beio->lun->lun_zone, beio->sg_segs[i].addr);
356 beio->sg_segs[i].addr = NULL;
359 if (duplicate_free > 0) {
360 printf("%s: %d duplicate frees out of %d segments\n", __func__,
361 duplicate_free, beio->num_segs);
363 mtx_lock(&softc->lock);
364 STAILQ_INSERT_TAIL(&softc->beio_free_queue, beio, links);
365 mtx_unlock(&softc->lock);
369 ctl_grow_beio(struct ctl_be_block_softc *softc, int count)
373 for (i = 0; i < count; i++) {
374 struct ctl_be_block_io *beio;
376 beio = (struct ctl_be_block_io *)malloc(sizeof(*beio),
380 mtx_lock(&softc->lock);
381 STAILQ_INSERT_TAIL(&softc->beio_free_queue, beio, links);
382 mtx_unlock(&softc->lock);
390 ctl_shrink_beio(struct ctl_be_block_softc *softc)
392 struct ctl_be_block_io *beio, *beio_tmp;
394 mtx_lock(&softc->lock);
395 STAILQ_FOREACH_SAFE(beio, &softc->beio_free_queue, links, beio_tmp) {
396 STAILQ_REMOVE(&softc->beio_free_queue, beio,
397 ctl_be_block_io, links);
398 free(beio, M_CTLBLK);
400 mtx_unlock(&softc->lock);
405 ctl_complete_beio(struct ctl_be_block_io *beio)
412 if ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)
413 io_len = beio->io_len;
417 devstat_end_transaction(beio->lun->disk_stats,
422 /*then*/&beio->ds_t0);
429 ctl_be_block_move_done(union ctl_io *io)
431 struct ctl_be_block_io *beio;
432 struct ctl_be_block_lun *be_lun;
434 struct bintime cur_bt;
437 beio = (struct ctl_be_block_io *)
438 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr;
442 DPRINTF("entered\n");
446 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt);
447 bintime_add(&io->io_hdr.dma_bt, &cur_bt);
448 io->io_hdr.num_dmas++;
452 * We set status at this point for read commands, and write
453 * commands with errors.
455 if ((beio->bio_cmd == BIO_READ)
456 && (io->io_hdr.port_status == 0)
457 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0)
458 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE))
459 ctl_set_success(&io->scsiio);
460 else if ((io->io_hdr.port_status != 0)
461 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0)
462 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) {
464 * For hardware error sense keys, the sense key
465 * specific value is defined to be a retry count,
466 * but we use it to pass back an internal FETD
467 * error code. XXX KDM Hopefully the FETD is only
468 * using 16 bits for an error code, since that's
469 * all the space we have in the sks field.
471 ctl_set_internal_failure(&io->scsiio,
474 io->io_hdr.port_status);
478 * If this is a read, or a write with errors, it is done.
480 if ((beio->bio_cmd == BIO_READ)
481 || ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)
482 || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)) {
483 ctl_complete_beio(beio);
488 * At this point, we have a write and the DMA completed
489 * successfully. We now have to queue it to the task queue to
490 * execute the backend I/O. That is because we do blocking
491 * memory allocations, and in the file backing case, blocking I/O.
492 * This move done routine is generally called in the SIM's
493 * interrupt context, and therefore we cannot block.
495 mtx_lock(&be_lun->lock);
497 * XXX KDM make sure that links is okay to use at this point.
498 * Otherwise, we either need to add another field to ctl_io_hdr,
499 * or deal with resource allocation here.
501 STAILQ_INSERT_TAIL(&be_lun->datamove_queue, &io->io_hdr, links);
502 mtx_unlock(&be_lun->lock);
504 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task);
510 ctl_be_block_biodone(struct bio *bio)
512 struct ctl_be_block_io *beio;
513 struct ctl_be_block_lun *be_lun;
516 beio = bio->bio_caller1;
520 DPRINTF("entered\n");
522 mtx_lock(&be_lun->lock);
523 if (bio->bio_error != 0)
526 beio->num_bios_done++;
529 * XXX KDM will this cause WITNESS to complain? Holding a lock
530 * during the free might cause it to complain.
535 * If the send complete bit isn't set, or we aren't the last I/O to
536 * complete, then we're done.
538 if ((beio->send_complete == 0)
539 || (beio->num_bios_done < beio->num_bios_sent)) {
540 mtx_unlock(&be_lun->lock);
545 * At this point, we've verified that we are the last I/O to
546 * complete, so it's safe to drop the lock.
548 mtx_unlock(&be_lun->lock);
551 * If there are any errors from the backing device, we fail the
552 * entire I/O with a medium error.
554 if (beio->num_errors > 0) {
555 if (beio->bio_cmd == BIO_FLUSH) {
556 /* XXX KDM is there is a better error here? */
557 ctl_set_internal_failure(&io->scsiio,
559 /*retry_count*/ 0xbad2);
561 ctl_set_medium_error(&io->scsiio);
562 ctl_complete_beio(beio);
567 * If this is a write or a flush, we're all done.
568 * If this is a read, we can now send the data to the user.
570 if ((beio->bio_cmd == BIO_WRITE)
571 || (beio->bio_cmd == BIO_FLUSH)) {
572 ctl_set_success(&io->scsiio);
573 ctl_complete_beio(beio);
575 io->scsiio.be_move_done = ctl_be_block_move_done;
576 io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs;
577 io->scsiio.kern_data_len = beio->io_len;
578 io->scsiio.kern_total_len = beio->io_len;
579 io->scsiio.kern_rel_offset = 0;
580 io->scsiio.kern_data_resid = 0;
581 io->scsiio.kern_sg_entries = beio->num_segs;
582 io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST;
584 getbintime(&io->io_hdr.dma_start_bt);
591 ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun,
592 struct ctl_be_block_io *beio)
595 struct mount *mountpoint;
596 int error, lock_flags;
598 DPRINTF("entered\n");
602 (void) vn_start_write(be_lun->vn, &mountpoint, V_WAIT);
604 if (MNT_SHARED_WRITES(mountpoint)
605 || ((mountpoint == NULL)
606 && MNT_SHARED_WRITES(be_lun->vn->v_mount)))
607 lock_flags = LK_SHARED;
609 lock_flags = LK_EXCLUSIVE;
611 vn_lock(be_lun->vn, lock_flags | LK_RETRY);
613 binuptime(&beio->ds_t0);
614 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0);
616 error = VOP_FSYNC(be_lun->vn, MNT_WAIT, curthread);
617 VOP_UNLOCK(be_lun->vn, 0);
619 vn_finished_write(mountpoint);
622 ctl_set_success(&io->scsiio);
624 /* XXX KDM is there is a better error here? */
625 ctl_set_internal_failure(&io->scsiio,
627 /*retry_count*/ 0xbad1);
630 ctl_complete_beio(beio);
633 SDT_PROBE_DEFINE1(cbb, kernel, read, file_start, file_start, "uint64_t");
634 SDT_PROBE_DEFINE1(cbb, kernel, write, file_start, file_start, "uint64_t");
635 SDT_PROBE_DEFINE1(cbb, kernel, read, file_done, file_done,"uint64_t");
636 SDT_PROBE_DEFINE1(cbb, kernel, write, file_done, file_done, "uint64_t");
639 ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun,
640 struct ctl_be_block_io *beio)
642 struct ctl_be_block_filedata *file_data;
645 struct iovec *xiovec;
649 DPRINTF("entered\n");
651 file_data = &be_lun->backend.file;
653 flags = beio->bio_flags;
655 if (beio->bio_cmd == BIO_READ) {
656 SDT_PROBE(cbb, kernel, read, file_start, 0, 0, 0, 0, 0);
658 SDT_PROBE(cbb, kernel, write, file_start, 0, 0, 0, 0, 0);
661 bzero(&xuio, sizeof(xuio));
662 if (beio->bio_cmd == BIO_READ)
663 xuio.uio_rw = UIO_READ;
665 xuio.uio_rw = UIO_WRITE;
667 xuio.uio_offset = beio->io_offset;
668 xuio.uio_resid = beio->io_len;
669 xuio.uio_segflg = UIO_SYSSPACE;
670 xuio.uio_iov = beio->xiovecs;
671 xuio.uio_iovcnt = beio->num_segs;
672 xuio.uio_td = curthread;
674 for (i = 0, xiovec = xuio.uio_iov; i < xuio.uio_iovcnt; i++, xiovec++) {
675 xiovec->iov_base = beio->sg_segs[i].addr;
676 xiovec->iov_len = beio->sg_segs[i].len;
679 if (beio->bio_cmd == BIO_READ) {
680 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY);
682 binuptime(&beio->ds_t0);
683 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0);
686 * UFS pays attention to IO_DIRECT for reads. If the
687 * DIRECTIO option is configured into the kernel, it calls
688 * ffs_rawread(). But that only works for single-segment
689 * uios with user space addresses. In our case, with a
690 * kernel uio, it still reads into the buffer cache, but it
691 * will just try to release the buffer from the cache later
694 * ZFS does not pay attention to IO_DIRECT for reads.
696 * UFS does not pay attention to IO_SYNC for reads.
698 * ZFS pays attention to IO_SYNC (which translates into the
699 * Solaris define FRSYNC for zfs_read()) for reads. It
700 * attempts to sync the file before reading.
702 * So, to attempt to provide some barrier semantics in the
703 * BIO_ORDERED case, set both IO_DIRECT and IO_SYNC.
705 error = VOP_READ(be_lun->vn, &xuio, (flags & BIO_ORDERED) ?
706 (IO_DIRECT|IO_SYNC) : 0, file_data->cred);
708 VOP_UNLOCK(be_lun->vn, 0);
710 struct mount *mountpoint;
713 (void)vn_start_write(be_lun->vn, &mountpoint, V_WAIT);
715 if (MNT_SHARED_WRITES(mountpoint)
716 || ((mountpoint == NULL)
717 && MNT_SHARED_WRITES(be_lun->vn->v_mount)))
718 lock_flags = LK_SHARED;
720 lock_flags = LK_EXCLUSIVE;
722 vn_lock(be_lun->vn, lock_flags | LK_RETRY);
724 binuptime(&beio->ds_t0);
725 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0);
728 * UFS pays attention to IO_DIRECT for writes. The write
729 * is done asynchronously. (Normally the write would just
730 * get put into cache.
732 * UFS pays attention to IO_SYNC for writes. It will
733 * attempt to write the buffer out synchronously if that
736 * ZFS does not pay attention to IO_DIRECT for writes.
738 * ZFS pays attention to IO_SYNC (a.k.a. FSYNC or FRSYNC)
739 * for writes. It will flush the transaction from the
740 * cache before returning.
742 * So if we've got the BIO_ORDERED flag set, we want
743 * IO_SYNC in either the UFS or ZFS case.
745 error = VOP_WRITE(be_lun->vn, &xuio, (flags & BIO_ORDERED) ?
746 IO_SYNC : 0, file_data->cred);
747 VOP_UNLOCK(be_lun->vn, 0);
749 vn_finished_write(mountpoint);
753 * If we got an error, set the sense data to "MEDIUM ERROR" and
754 * return the I/O to the user.
759 ctl_scsi_path_string(io, path_str, sizeof(path_str));
761 * XXX KDM ZFS returns ENOSPC when the underlying
762 * filesystem fills up. What kind of SCSI error should we
765 printf("%s%s command returned errno %d\n", path_str,
766 (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE", error);
767 ctl_set_medium_error(&io->scsiio);
768 ctl_complete_beio(beio);
773 * If this is a write, we're all done.
774 * If this is a read, we can now send the data to the user.
776 if (beio->bio_cmd == BIO_WRITE) {
777 ctl_set_success(&io->scsiio);
778 SDT_PROBE(cbb, kernel, write, file_done, 0, 0, 0, 0, 0);
779 ctl_complete_beio(beio);
781 SDT_PROBE(cbb, kernel, read, file_done, 0, 0, 0, 0, 0);
782 io->scsiio.be_move_done = ctl_be_block_move_done;
783 io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs;
784 io->scsiio.kern_data_len = beio->io_len;
785 io->scsiio.kern_total_len = beio->io_len;
786 io->scsiio.kern_rel_offset = 0;
787 io->scsiio.kern_data_resid = 0;
788 io->scsiio.kern_sg_entries = beio->num_segs;
789 io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST;
791 getbintime(&io->io_hdr.dma_start_bt);
798 ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun,
799 struct ctl_be_block_io *beio)
803 struct ctl_be_block_devdata *dev_data;
805 dev_data = &be_lun->backend.dev;
808 DPRINTF("entered\n");
810 /* This can't fail, it's a blocking allocation. */
813 bio->bio_cmd = BIO_FLUSH;
814 bio->bio_flags |= BIO_ORDERED;
815 bio->bio_dev = dev_data->cdev;
818 bio->bio_done = ctl_be_block_biodone;
819 bio->bio_caller1 = beio;
823 * We don't need to acquire the LUN lock here, because we are only
824 * sending one bio, and so there is no other context to synchronize
827 beio->num_bios_sent = 1;
828 beio->send_complete = 1;
830 binuptime(&beio->ds_t0);
831 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0);
833 (*dev_data->csw->d_strategy)(bio);
837 ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun,
838 struct ctl_be_block_io *beio)
842 struct ctl_be_block_devdata *dev_data;
846 DPRINTF("entered\n");
848 dev_data = &be_lun->backend.dev;
851 * We have to limit our I/O size to the maximum supported by the
852 * backend device. Hopefully it is MAXPHYS. If the driver doesn't
853 * set it properly, use DFLTPHYS.
855 max_iosize = dev_data->cdev->si_iosize_max;
856 if (max_iosize < PAGE_SIZE)
857 max_iosize = DFLTPHYS;
859 cur_offset = beio->io_offset;
862 * XXX KDM need to accurately reflect the number of I/Os outstanding
865 binuptime(&beio->ds_t0);
866 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0);
868 for (i = 0; i < beio->num_segs; i++) {
872 cur_size = beio->sg_segs[i].len;
873 cur_ptr = beio->sg_segs[i].addr;
875 while (cur_size > 0) {
876 /* This can't fail, it's a blocking allocation. */
879 KASSERT(bio != NULL, ("g_alloc_bio() failed!\n"));
881 bio->bio_cmd = beio->bio_cmd;
882 bio->bio_flags |= beio->bio_flags;
883 bio->bio_dev = dev_data->cdev;
884 bio->bio_caller1 = beio;
885 bio->bio_length = min(cur_size, max_iosize);
886 bio->bio_offset = cur_offset;
887 bio->bio_data = cur_ptr;
888 bio->bio_done = ctl_be_block_biodone;
889 bio->bio_pblkno = cur_offset / be_lun->blocksize;
891 cur_offset += bio->bio_length;
892 cur_ptr += bio->bio_length;
893 cur_size -= bio->bio_length;
896 * Make sure we set the complete bit just before we
897 * issue the last bio so we don't wind up with a
900 * Use the LUN mutex here instead of a combination
901 * of atomic variables for simplicity.
903 * XXX KDM we could have a per-IO lock, but that
904 * would cause additional per-IO setup and teardown
905 * overhead. Hopefully there won't be too much
906 * contention on the LUN lock.
908 mtx_lock(&be_lun->lock);
910 beio->num_bios_sent++;
912 if ((i == beio->num_segs - 1)
914 beio->send_complete = 1;
916 mtx_unlock(&be_lun->lock);
918 (*dev_data->csw->d_strategy)(bio);
924 ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun,
927 struct ctl_be_block_io *beio;
928 struct ctl_be_block_softc *softc;
930 DPRINTF("entered\n");
932 softc = be_lun->softc;
933 beio = ctl_alloc_beio(softc);
936 * This should not happen. ctl_alloc_beio() will call
937 * ctl_grow_beio() with a blocking malloc as needed.
938 * A malloc with M_WAITOK should not fail.
940 ctl_set_busy(&io->scsiio);
948 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr = beio;
950 switch (io->scsiio.cdb[0]) {
951 case SYNCHRONIZE_CACHE:
952 case SYNCHRONIZE_CACHE_16:
953 beio->bio_cmd = BIO_FLUSH;
954 beio->ds_trans_type = DEVSTAT_NO_DATA;
955 beio->ds_tag_type = DEVSTAT_TAG_ORDERED;
957 be_lun->lun_flush(be_lun, beio);
960 panic("Unhandled CDB type %#x", io->scsiio.cdb[0]);
965 SDT_PROBE_DEFINE1(cbb, kernel, read, start, start, "uint64_t");
966 SDT_PROBE_DEFINE1(cbb, kernel, write, start, start, "uint64_t");
967 SDT_PROBE_DEFINE1(cbb, kernel, read, alloc_done, alloc_done, "uint64_t");
968 SDT_PROBE_DEFINE1(cbb, kernel, write, alloc_done, alloc_done, "uint64_t");
971 ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun,
974 struct ctl_be_block_io *beio;
975 struct ctl_be_block_softc *softc;
976 struct ctl_lba_len lbalen;
977 uint64_t len_left, io_size_bytes;
980 softc = be_lun->softc;
982 DPRINTF("entered\n");
984 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) {
985 SDT_PROBE(cbb, kernel, read, start, 0, 0, 0, 0, 0);
987 SDT_PROBE(cbb, kernel, write, start, 0, 0, 0, 0, 0);
990 memcpy(&lbalen, io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN].bytes,
993 io_size_bytes = lbalen.len * be_lun->blocksize;
996 * XXX KDM this is temporary, until we implement chaining of beio
997 * structures and multiple datamove calls to move all the data in
1000 if (io_size_bytes > CTLBLK_MAX_IO_SIZE) {
1001 printf("%s: IO length %ju > max io size %u\n", __func__,
1002 io_size_bytes, CTLBLK_MAX_IO_SIZE);
1003 ctl_set_invalid_field(&io->scsiio,
1013 beio = ctl_alloc_beio(softc);
1016 * This should not happen. ctl_alloc_beio() will call
1017 * ctl_grow_beio() with a blocking malloc as needed.
1018 * A malloc with M_WAITOK should not fail.
1020 ctl_set_busy(&io->scsiio);
1026 beio->softc = softc;
1028 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr = beio;
1031 * If the I/O came down with an ordered or head of queue tag, set
1032 * the BIO_ORDERED attribute. For head of queue tags, that's
1033 * pretty much the best we can do.
1035 * XXX KDM we don't have a great way to easily know about the FUA
1036 * bit right now (it is decoded in ctl_read_write(), but we don't
1037 * pass that knowledge to the backend), and in any case we would
1038 * need to determine how to handle it.
1040 if ((io->scsiio.tag_type == CTL_TAG_ORDERED)
1041 || (io->scsiio.tag_type == CTL_TAG_HEAD_OF_QUEUE))
1042 beio->bio_flags = BIO_ORDERED;
1044 switch (io->scsiio.tag_type) {
1045 case CTL_TAG_ORDERED:
1046 beio->ds_tag_type = DEVSTAT_TAG_ORDERED;
1048 case CTL_TAG_HEAD_OF_QUEUE:
1049 beio->ds_tag_type = DEVSTAT_TAG_HEAD;
1051 case CTL_TAG_UNTAGGED:
1052 case CTL_TAG_SIMPLE:
1055 beio->ds_tag_type = DEVSTAT_TAG_SIMPLE;
1060 * This path handles read and write only. The config write path
1061 * handles flush operations.
1063 if ((io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_IN) {
1064 beio->bio_cmd = BIO_READ;
1065 beio->ds_trans_type = DEVSTAT_READ;
1067 beio->bio_cmd = BIO_WRITE;
1068 beio->ds_trans_type = DEVSTAT_WRITE;
1071 beio->io_len = lbalen.len * be_lun->blocksize;
1072 beio->io_offset = lbalen.lba * be_lun->blocksize;
1074 DPRINTF("%s at LBA %jx len %u\n",
1075 (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE",
1076 (uintmax_t)lbalen.lba, lbalen.len);
1078 for (i = 0, len_left = io_size_bytes; i < CTLBLK_MAX_SEGS &&
1079 len_left > 0; i++) {
1082 * Setup the S/G entry for this chunk.
1084 beio->sg_segs[i].len = min(MAXPHYS, len_left);
1085 beio->sg_segs[i].addr = uma_zalloc(be_lun->lun_zone, M_WAITOK);
1087 DPRINTF("segment %d addr %p len %zd\n", i,
1088 beio->sg_segs[i].addr, beio->sg_segs[i].len);
1091 len_left -= beio->sg_segs[i].len;
1095 * For the read case, we need to read the data into our buffers and
1096 * then we can send it back to the user. For the write case, we
1097 * need to get the data from the user first.
1099 if (beio->bio_cmd == BIO_READ) {
1100 SDT_PROBE(cbb, kernel, read, alloc_done, 0, 0, 0, 0, 0);
1101 be_lun->dispatch(be_lun, beio);
1103 SDT_PROBE(cbb, kernel, write, alloc_done, 0, 0, 0, 0, 0);
1104 io->scsiio.be_move_done = ctl_be_block_move_done;
1105 io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs;
1106 io->scsiio.kern_data_len = beio->io_len;
1107 io->scsiio.kern_total_len = beio->io_len;
1108 io->scsiio.kern_rel_offset = 0;
1109 io->scsiio.kern_data_resid = 0;
1110 io->scsiio.kern_sg_entries = beio->num_segs;
1111 io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST;
1113 getbintime(&io->io_hdr.dma_start_bt);
1120 ctl_be_block_worker(void *context, int pending)
1122 struct ctl_be_block_lun *be_lun;
1123 struct ctl_be_block_softc *softc;
1126 be_lun = (struct ctl_be_block_lun *)context;
1127 softc = be_lun->softc;
1129 DPRINTF("entered\n");
1131 mtx_lock(&be_lun->lock);
1133 io = (union ctl_io *)STAILQ_FIRST(&be_lun->datamove_queue);
1135 struct ctl_be_block_io *beio;
1137 DPRINTF("datamove queue\n");
1139 STAILQ_REMOVE(&be_lun->datamove_queue, &io->io_hdr,
1142 mtx_unlock(&be_lun->lock);
1144 beio = (struct ctl_be_block_io *)
1145 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].ptr;
1147 be_lun->dispatch(be_lun, beio);
1149 mtx_lock(&be_lun->lock);
1152 io = (union ctl_io *)STAILQ_FIRST(&be_lun->config_write_queue);
1155 DPRINTF("config write queue\n");
1157 STAILQ_REMOVE(&be_lun->config_write_queue, &io->io_hdr,
1160 mtx_unlock(&be_lun->lock);
1162 ctl_be_block_cw_dispatch(be_lun, io);
1164 mtx_lock(&be_lun->lock);
1167 io = (union ctl_io *)STAILQ_FIRST(&be_lun->input_queue);
1169 DPRINTF("input queue\n");
1171 STAILQ_REMOVE(&be_lun->input_queue, &io->io_hdr,
1173 mtx_unlock(&be_lun->lock);
1176 * We must drop the lock, since this routine and
1177 * its children may sleep.
1179 ctl_be_block_dispatch(be_lun, io);
1181 mtx_lock(&be_lun->lock);
1186 * If we get here, there is no work left in the queues, so
1187 * just break out and let the task queue go to sleep.
1191 mtx_unlock(&be_lun->lock);
1195 * Entry point from CTL to the backend for I/O. We queue everything to a
1196 * work thread, so this just puts the I/O on a queue and wakes up the
1200 ctl_be_block_submit(union ctl_io *io)
1202 struct ctl_be_block_lun *be_lun;
1203 struct ctl_be_lun *ctl_be_lun;
1206 DPRINTF("entered\n");
1208 retval = CTL_RETVAL_COMPLETE;
1210 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
1211 CTL_PRIV_BACKEND_LUN].ptr;
1212 be_lun = (struct ctl_be_block_lun *)ctl_be_lun->be_lun;
1215 * Make sure we only get SCSI I/O.
1217 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("Non-SCSI I/O (type "
1218 "%#x) encountered", io->io_hdr.io_type));
1220 mtx_lock(&be_lun->lock);
1222 * XXX KDM make sure that links is okay to use at this point.
1223 * Otherwise, we either need to add another field to ctl_io_hdr,
1224 * or deal with resource allocation here.
1226 STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links);
1227 mtx_unlock(&be_lun->lock);
1229 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task);
1235 ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr,
1236 int flag, struct thread *td)
1238 struct ctl_be_block_softc *softc;
1241 softc = &backend_block_softc;
1247 struct ctl_lun_req *lun_req;
1249 lun_req = (struct ctl_lun_req *)addr;
1251 switch (lun_req->reqtype) {
1252 case CTL_LUNREQ_CREATE:
1253 error = ctl_be_block_create(softc, lun_req);
1256 error = ctl_be_block_rm(softc, lun_req);
1258 case CTL_LUNREQ_MODIFY:
1259 error = ctl_be_block_modify(softc, lun_req);
1262 lun_req->status = CTL_LUN_ERROR;
1263 snprintf(lun_req->error_str, sizeof(lun_req->error_str),
1264 "%s: invalid LUN request type %d", __func__,
1279 ctl_be_block_open_file(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req)
1281 struct ctl_be_block_filedata *file_data;
1282 struct ctl_lun_create_params *params;
1287 file_data = &be_lun->backend.file;
1288 params = &req->reqdata.create;
1290 be_lun->dev_type = CTL_BE_BLOCK_FILE;
1291 be_lun->dispatch = ctl_be_block_dispatch_file;
1292 be_lun->lun_flush = ctl_be_block_flush_file;
1294 error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred);
1296 snprintf(req->error_str, sizeof(req->error_str),
1297 "error calling VOP_GETATTR() for file %s",
1303 * Verify that we have the ability to upgrade to exclusive
1304 * access on this file so we can trap errors at open instead
1305 * of reporting them during first access.
1307 if (VOP_ISLOCKED(be_lun->vn) != LK_EXCLUSIVE) {
1308 vn_lock(be_lun->vn, LK_UPGRADE | LK_RETRY);
1309 if (be_lun->vn->v_iflag & VI_DOOMED) {
1311 snprintf(req->error_str, sizeof(req->error_str),
1312 "error locking file %s", be_lun->dev_path);
1318 file_data->cred = crhold(curthread->td_ucred);
1319 if (params->lun_size_bytes != 0)
1320 be_lun->size_bytes = params->lun_size_bytes;
1322 be_lun->size_bytes = vattr.va_size;
1324 * We set the multi thread flag for file operations because all
1325 * filesystems (in theory) are capable of allowing multiple readers
1326 * of a file at once. So we want to get the maximum possible
1329 be_lun->flags |= CTL_BE_BLOCK_LUN_MULTI_THREAD;
1332 * XXX KDM vattr.va_blocksize may be larger than 512 bytes here.
1333 * With ZFS, it is 131072 bytes. Block sizes that large don't work
1334 * with disklabel and UFS on FreeBSD at least. Large block sizes
1335 * may not work with other OSes as well. So just export a sector
1336 * size of 512 bytes, which should work with any OS or
1337 * application. Since our backing is a file, any block size will
1338 * work fine for the backing store.
1341 be_lun->blocksize= vattr.va_blocksize;
1343 if (params->blocksize_bytes != 0)
1344 be_lun->blocksize = params->blocksize_bytes;
1346 be_lun->blocksize = 512;
1349 * Sanity check. The media size has to be at least one
1352 if (be_lun->size_bytes < be_lun->blocksize) {
1354 snprintf(req->error_str, sizeof(req->error_str),
1355 "file %s size %ju < block size %u", be_lun->dev_path,
1356 (uintmax_t)be_lun->size_bytes, be_lun->blocksize);
1362 ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req)
1364 struct ctl_lun_create_params *params;
1367 struct cdevsw *devsw;
1370 params = &req->reqdata.create;
1372 be_lun->dev_type = CTL_BE_BLOCK_DEV;
1373 be_lun->dispatch = ctl_be_block_dispatch_dev;
1374 be_lun->lun_flush = ctl_be_block_flush_dev;
1375 be_lun->backend.dev.cdev = be_lun->vn->v_rdev;
1376 be_lun->backend.dev.csw = dev_refthread(be_lun->backend.dev.cdev,
1377 &be_lun->backend.dev.dev_ref);
1378 if (be_lun->backend.dev.csw == NULL)
1379 panic("Unable to retrieve device switch");
1381 error = VOP_GETATTR(be_lun->vn, &vattr, NOCRED);
1383 snprintf(req->error_str, sizeof(req->error_str),
1384 "%s: error getting vnode attributes for device %s",
1385 __func__, be_lun->dev_path);
1389 dev = be_lun->vn->v_rdev;
1390 devsw = dev->si_devsw;
1391 if (!devsw->d_ioctl) {
1392 snprintf(req->error_str, sizeof(req->error_str),
1393 "%s: no d_ioctl for device %s!", __func__,
1398 error = devsw->d_ioctl(dev, DIOCGSECTORSIZE,
1399 (caddr_t)&be_lun->blocksize, FREAD,
1402 snprintf(req->error_str, sizeof(req->error_str),
1403 "%s: error %d returned for DIOCGSECTORSIZE ioctl "
1404 "on %s!", __func__, error, be_lun->dev_path);
1409 * If the user has asked for a blocksize that is greater than the
1410 * backing device's blocksize, we can do it only if the blocksize
1411 * the user is asking for is an even multiple of the underlying
1412 * device's blocksize.
1414 if ((params->blocksize_bytes != 0)
1415 && (params->blocksize_bytes > be_lun->blocksize)) {
1416 uint32_t bs_multiple, tmp_blocksize;
1418 bs_multiple = params->blocksize_bytes / be_lun->blocksize;
1420 tmp_blocksize = bs_multiple * be_lun->blocksize;
1422 if (tmp_blocksize == params->blocksize_bytes) {
1423 be_lun->blocksize = params->blocksize_bytes;
1425 snprintf(req->error_str, sizeof(req->error_str),
1426 "%s: requested blocksize %u is not an even "
1427 "multiple of backing device blocksize %u",
1428 __func__, params->blocksize_bytes,
1433 } else if ((params->blocksize_bytes != 0)
1434 && (params->blocksize_bytes != be_lun->blocksize)) {
1435 snprintf(req->error_str, sizeof(req->error_str),
1436 "%s: requested blocksize %u < backing device "
1437 "blocksize %u", __func__, params->blocksize_bytes,
1442 error = devsw->d_ioctl(dev, DIOCGMEDIASIZE,
1443 (caddr_t)&be_lun->size_bytes, FREAD,
1446 snprintf(req->error_str, sizeof(req->error_str),
1447 "%s: error %d returned for DIOCGMEDIASIZE "
1448 " ioctl on %s!", __func__, error,
1453 if (params->lun_size_bytes != 0) {
1454 if (params->lun_size_bytes > be_lun->size_bytes) {
1455 snprintf(req->error_str, sizeof(req->error_str),
1456 "%s: requested LUN size %ju > backing device "
1457 "size %ju", __func__,
1458 (uintmax_t)params->lun_size_bytes,
1459 (uintmax_t)be_lun->size_bytes);
1463 be_lun->size_bytes = params->lun_size_bytes;
1470 ctl_be_block_close(struct ctl_be_block_lun *be_lun)
1474 int flags = FREAD | FWRITE;
1476 switch (be_lun->dev_type) {
1477 case CTL_BE_BLOCK_DEV:
1478 if (be_lun->backend.dev.csw) {
1479 dev_relthread(be_lun->backend.dev.cdev,
1480 be_lun->backend.dev.dev_ref);
1481 be_lun->backend.dev.csw = NULL;
1482 be_lun->backend.dev.cdev = NULL;
1485 case CTL_BE_BLOCK_FILE:
1487 case CTL_BE_BLOCK_NONE:
1489 panic("Unexpected backend type.");
1493 (void)vn_close(be_lun->vn, flags, NOCRED, curthread);
1496 switch (be_lun->dev_type) {
1497 case CTL_BE_BLOCK_DEV:
1499 case CTL_BE_BLOCK_FILE:
1500 if (be_lun->backend.file.cred != NULL) {
1501 crfree(be_lun->backend.file.cred);
1502 be_lun->backend.file.cred = NULL;
1505 case CTL_BE_BLOCK_NONE:
1507 panic("Unexpected backend type.");
1517 ctl_be_block_open(struct ctl_be_block_softc *softc,
1518 struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req)
1520 struct nameidata nd;
1525 * XXX KDM allow a read-only option?
1527 flags = FREAD | FWRITE;
1530 if (rootvnode == NULL) {
1531 snprintf(req->error_str, sizeof(req->error_str),
1532 "%s: Root filesystem is not mounted", __func__);
1536 if (!curthread->td_proc->p_fd->fd_cdir) {
1537 curthread->td_proc->p_fd->fd_cdir = rootvnode;
1540 if (!curthread->td_proc->p_fd->fd_rdir) {
1541 curthread->td_proc->p_fd->fd_rdir = rootvnode;
1544 if (!curthread->td_proc->p_fd->fd_jdir) {
1545 curthread->td_proc->p_fd->fd_jdir = rootvnode;
1550 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, be_lun->dev_path, curthread);
1551 error = vn_open(&nd, &flags, 0, NULL);
1554 * This is the only reasonable guess we can make as far as
1555 * path if the user doesn't give us a fully qualified path.
1556 * If they want to specify a file, they need to specify the
1559 if (be_lun->dev_path[0] != '/') {
1560 char *dev_path = "/dev/";
1563 /* Try adding device path at beginning of name */
1564 dev_name = malloc(strlen(be_lun->dev_path)
1565 + strlen(dev_path) + 1,
1566 M_CTLBLK, M_WAITOK);
1568 sprintf(dev_name, "%s%s", dev_path,
1570 free(be_lun->dev_path, M_CTLBLK);
1571 be_lun->dev_path = dev_name;
1575 snprintf(req->error_str, sizeof(req->error_str),
1576 "%s: error opening %s", __func__, be_lun->dev_path);
1580 NDFREE(&nd, NDF_ONLY_PNBUF);
1582 be_lun->vn = nd.ni_vp;
1584 /* We only support disks and files. */
1585 if (vn_isdisk(be_lun->vn, &error)) {
1586 error = ctl_be_block_open_dev(be_lun, req);
1587 } else if (be_lun->vn->v_type == VREG) {
1588 error = ctl_be_block_open_file(be_lun, req);
1591 snprintf(req->error_str, sizeof(req->error_str),
1592 "%s is not a disk or file", be_lun->dev_path);
1594 VOP_UNLOCK(be_lun->vn, 0);
1597 ctl_be_block_close(be_lun);
1601 be_lun->blocksize_shift = fls(be_lun->blocksize) - 1;
1602 be_lun->size_blocks = be_lun->size_bytes >> be_lun->blocksize_shift;
1608 ctl_be_block_mem_ctor(void *mem, int size, void *arg, int flags)
1614 ctl_be_block_mem_dtor(void *mem, int size, void *arg)
1620 ctl_be_block_create(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
1622 struct ctl_be_block_lun *be_lun;
1623 struct ctl_lun_create_params *params;
1624 struct ctl_be_arg *file_arg;
1626 int retval, num_threads;
1629 params = &req->reqdata.create;
1632 num_threads = cbb_num_threads;
1636 be_lun = malloc(sizeof(*be_lun), M_CTLBLK, M_ZERO | M_WAITOK);
1638 be_lun->softc = softc;
1639 STAILQ_INIT(&be_lun->input_queue);
1640 STAILQ_INIT(&be_lun->config_write_queue);
1641 STAILQ_INIT(&be_lun->datamove_queue);
1642 STAILQ_INIT(&be_lun->ctl_be_lun.options);
1643 sprintf(be_lun->lunname, "cblk%d", softc->num_luns);
1644 mtx_init(&be_lun->lock, be_lun->lunname, NULL, MTX_DEF);
1646 be_lun->lun_zone = uma_zcreate(be_lun->lunname, MAXPHYS,
1647 ctl_be_block_mem_ctor, ctl_be_block_mem_dtor, NULL, NULL,
1648 /*align*/ 0, /*flags*/0);
1650 if (be_lun->lun_zone == NULL) {
1651 snprintf(req->error_str, sizeof(req->error_str),
1652 "%s: error allocating UMA zone", __func__);
1656 if (params->flags & CTL_LUN_FLAG_DEV_TYPE)
1657 be_lun->ctl_be_lun.lun_type = params->device_type;
1659 be_lun->ctl_be_lun.lun_type = T_DIRECT;
1661 if (be_lun->ctl_be_lun.lun_type == T_DIRECT) {
1662 for (i = 0; i < req->num_be_args; i++) {
1663 if (strcmp(req->kern_be_args[i].kname, "file") == 0) {
1664 file_arg = &req->kern_be_args[i];
1669 if (file_arg == NULL) {
1670 snprintf(req->error_str, sizeof(req->error_str),
1671 "%s: no file argument specified", __func__);
1675 be_lun->dev_path = malloc(file_arg->vallen, M_CTLBLK,
1678 strlcpy(be_lun->dev_path, (char *)file_arg->kvalue,
1681 retval = ctl_be_block_open(softc, be_lun, req);
1688 * Tell the user the size of the file/device.
1690 params->lun_size_bytes = be_lun->size_bytes;
1693 * The maximum LBA is the size - 1.
1695 be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1;
1698 * For processor devices, we don't have any size.
1700 be_lun->blocksize = 0;
1701 be_lun->size_blocks = 0;
1702 be_lun->size_bytes = 0;
1703 be_lun->ctl_be_lun.maxlba = 0;
1704 params->lun_size_bytes = 0;
1707 * Default to just 1 thread for processor devices.
1713 * XXX This searching loop might be refactored to be combined with
1716 for (i = 0; i < req->num_be_args; i++) {
1717 if (strcmp(req->kern_be_args[i].kname, "num_threads") == 0) {
1718 struct ctl_be_arg *thread_arg;
1719 char num_thread_str[16];
1720 int tmp_num_threads;
1723 thread_arg = &req->kern_be_args[i];
1725 strlcpy(num_thread_str, (char *)thread_arg->kvalue,
1726 min(thread_arg->vallen,
1727 sizeof(num_thread_str)));
1729 tmp_num_threads = strtol(num_thread_str, NULL, 0);
1732 * We don't let the user specify less than one
1733 * thread, but hope he's clueful enough not to
1734 * specify 1000 threads.
1736 if (tmp_num_threads < 1) {
1737 snprintf(req->error_str, sizeof(req->error_str),
1738 "%s: invalid number of threads %s",
1739 __func__, num_thread_str);
1743 num_threads = tmp_num_threads;
1744 } else if (strcmp(req->kern_be_args[i].kname, "file") != 0 &&
1745 strcmp(req->kern_be_args[i].kname, "dev") != 0) {
1746 struct ctl_be_lun_option *opt;
1748 opt = malloc(sizeof(*opt), M_CTLBLK, M_WAITOK);
1749 opt->name = malloc(strlen(req->kern_be_args[i].kname) + 1, M_CTLBLK, M_WAITOK);
1750 strcpy(opt->name, req->kern_be_args[i].kname);
1751 opt->value = malloc(strlen(req->kern_be_args[i].kvalue) + 1, M_CTLBLK, M_WAITOK);
1752 strcpy(opt->value, req->kern_be_args[i].kvalue);
1753 STAILQ_INSERT_TAIL(&be_lun->ctl_be_lun.options, opt, links);
1757 be_lun->flags = CTL_BE_BLOCK_LUN_UNCONFIGURED;
1758 be_lun->ctl_be_lun.flags = CTL_LUN_FLAG_PRIMARY;
1759 be_lun->ctl_be_lun.be_lun = be_lun;
1760 be_lun->ctl_be_lun.blocksize = be_lun->blocksize;
1761 /* Tell the user the blocksize we ended up using */
1762 params->blocksize_bytes = be_lun->blocksize;
1763 if (params->flags & CTL_LUN_FLAG_ID_REQ) {
1764 be_lun->ctl_be_lun.req_lun_id = params->req_lun_id;
1765 be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_ID_REQ;
1767 be_lun->ctl_be_lun.req_lun_id = 0;
1769 be_lun->ctl_be_lun.lun_shutdown = ctl_be_block_lun_shutdown;
1770 be_lun->ctl_be_lun.lun_config_status =
1771 ctl_be_block_lun_config_status;
1772 be_lun->ctl_be_lun.be = &ctl_be_block_driver;
1774 if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) {
1775 snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%4d",
1777 strncpy((char *)be_lun->ctl_be_lun.serial_num, tmpstr,
1778 ctl_min(sizeof(be_lun->ctl_be_lun.serial_num),
1781 /* Tell the user what we used for a serial number */
1782 strncpy((char *)params->serial_num, tmpstr,
1783 ctl_min(sizeof(params->serial_num), sizeof(tmpstr)));
1785 strncpy((char *)be_lun->ctl_be_lun.serial_num,
1787 ctl_min(sizeof(be_lun->ctl_be_lun.serial_num),
1788 sizeof(params->serial_num)));
1790 if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) {
1791 snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%4d", softc->num_luns);
1792 strncpy((char *)be_lun->ctl_be_lun.device_id, tmpstr,
1793 ctl_min(sizeof(be_lun->ctl_be_lun.device_id),
1796 /* Tell the user what we used for a device ID */
1797 strncpy((char *)params->device_id, tmpstr,
1798 ctl_min(sizeof(params->device_id), sizeof(tmpstr)));
1800 strncpy((char *)be_lun->ctl_be_lun.device_id,
1802 ctl_min(sizeof(be_lun->ctl_be_lun.device_id),
1803 sizeof(params->device_id)));
1806 TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_be_block_worker, be_lun);
1808 be_lun->io_taskqueue = taskqueue_create(be_lun->lunname, M_WAITOK,
1809 taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue);
1811 if (be_lun->io_taskqueue == NULL) {
1812 snprintf(req->error_str, sizeof(req->error_str),
1813 "%s: Unable to create taskqueue", __func__);
1818 * Note that we start the same number of threads by default for
1819 * both the file case and the block device case. For the file
1820 * case, we need multiple threads to allow concurrency, because the
1821 * vnode interface is designed to be a blocking interface. For the
1822 * block device case, ZFS zvols at least will block the caller's
1823 * context in many instances, and so we need multiple threads to
1824 * overcome that problem. Other block devices don't need as many
1825 * threads, but they shouldn't cause too many problems.
1827 * If the user wants to just have a single thread for a block
1828 * device, he can specify that when the LUN is created, or change
1829 * the tunable/sysctl to alter the default number of threads.
1831 retval = taskqueue_start_threads(&be_lun->io_taskqueue,
1832 /*num threads*/num_threads,
1835 "%s taskq", be_lun->lunname);
1840 be_lun->num_threads = num_threads;
1842 mtx_lock(&softc->lock);
1844 STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links);
1846 mtx_unlock(&softc->lock);
1848 retval = ctl_add_lun(&be_lun->ctl_be_lun);
1850 mtx_lock(&softc->lock);
1851 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun,
1854 mtx_unlock(&softc->lock);
1855 snprintf(req->error_str, sizeof(req->error_str),
1856 "%s: ctl_add_lun() returned error %d, see dmesg for "
1857 "details", __func__, retval);
1862 mtx_lock(&softc->lock);
1865 * Tell the config_status routine that we're waiting so it won't
1866 * clean up the LUN in the event of an error.
1868 be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING;
1870 while (be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) {
1871 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblk", 0);
1872 if (retval == EINTR)
1875 be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING;
1877 if (be_lun->flags & CTL_BE_BLOCK_LUN_CONFIG_ERR) {
1878 snprintf(req->error_str, sizeof(req->error_str),
1879 "%s: LUN configuration error, see dmesg for details",
1881 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun,
1884 mtx_unlock(&softc->lock);
1887 params->req_lun_id = be_lun->ctl_be_lun.lun_id;
1890 mtx_unlock(&softc->lock);
1892 be_lun->disk_stats = devstat_new_entry("cbb", params->req_lun_id,
1894 DEVSTAT_ALL_SUPPORTED,
1895 be_lun->ctl_be_lun.lun_type
1896 | DEVSTAT_TYPE_IF_OTHER,
1897 DEVSTAT_PRIORITY_OTHER);
1900 req->status = CTL_LUN_OK;
1905 req->status = CTL_LUN_ERROR;
1907 ctl_be_block_close(be_lun);
1909 free(be_lun->dev_path, M_CTLBLK);
1910 free(be_lun, M_CTLBLK);
1916 ctl_be_block_rm(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
1918 struct ctl_lun_rm_params *params;
1919 struct ctl_be_block_lun *be_lun;
1922 params = &req->reqdata.rm;
1924 mtx_lock(&softc->lock);
1928 STAILQ_FOREACH(be_lun, &softc->lun_list, links) {
1929 if (be_lun->ctl_be_lun.lun_id == params->lun_id)
1932 mtx_unlock(&softc->lock);
1934 if (be_lun == NULL) {
1935 snprintf(req->error_str, sizeof(req->error_str),
1936 "%s: LUN %u is not managed by the block backend",
1937 __func__, params->lun_id);
1941 retval = ctl_disable_lun(&be_lun->ctl_be_lun);
1944 snprintf(req->error_str, sizeof(req->error_str),
1945 "%s: error %d returned from ctl_disable_lun() for "
1946 "LUN %d", __func__, retval, params->lun_id);
1951 retval = ctl_invalidate_lun(&be_lun->ctl_be_lun);
1953 snprintf(req->error_str, sizeof(req->error_str),
1954 "%s: error %d returned from ctl_invalidate_lun() for "
1955 "LUN %d", __func__, retval, params->lun_id);
1959 mtx_lock(&softc->lock);
1961 be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING;
1963 while ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) {
1964 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblk", 0);
1965 if (retval == EINTR)
1969 be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING;
1971 if ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) {
1972 snprintf(req->error_str, sizeof(req->error_str),
1973 "%s: interrupted waiting for LUN to be freed",
1975 mtx_unlock(&softc->lock);
1979 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, links);
1982 mtx_unlock(&softc->lock);
1984 taskqueue_drain(be_lun->io_taskqueue, &be_lun->io_task);
1986 taskqueue_free(be_lun->io_taskqueue);
1988 ctl_be_block_close(be_lun);
1990 if (be_lun->disk_stats != NULL)
1991 devstat_remove_entry(be_lun->disk_stats);
1993 uma_zdestroy(be_lun->lun_zone);
1995 free(be_lun->dev_path, M_CTLBLK);
1997 free(be_lun, M_CTLBLK);
1999 req->status = CTL_LUN_OK;
2005 req->status = CTL_LUN_ERROR;
2011 ctl_be_block_modify_file(struct ctl_be_block_lun *be_lun,
2012 struct ctl_lun_req *req)
2016 struct ctl_lun_modify_params *params;
2018 params = &req->reqdata.modify;
2020 if (params->lun_size_bytes != 0) {
2021 be_lun->size_bytes = params->lun_size_bytes;
2023 error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred);
2025 snprintf(req->error_str, sizeof(req->error_str),
2026 "error calling VOP_GETATTR() for file %s",
2031 be_lun->size_bytes = vattr.va_size;
2038 ctl_be_block_modify_dev(struct ctl_be_block_lun *be_lun,
2039 struct ctl_lun_req *req)
2042 struct cdevsw *devsw;
2044 struct ctl_lun_modify_params *params;
2045 uint64_t size_bytes;
2047 params = &req->reqdata.modify;
2049 dev = be_lun->vn->v_rdev;
2050 devsw = dev->si_devsw;
2051 if (!devsw->d_ioctl) {
2052 snprintf(req->error_str, sizeof(req->error_str),
2053 "%s: no d_ioctl for device %s!", __func__,
2058 error = devsw->d_ioctl(dev, DIOCGMEDIASIZE,
2059 (caddr_t)&size_bytes, FREAD,
2062 snprintf(req->error_str, sizeof(req->error_str),
2063 "%s: error %d returned for DIOCGMEDIASIZE ioctl "
2064 "on %s!", __func__, error, be_lun->dev_path);
2068 if (params->lun_size_bytes != 0) {
2069 if (params->lun_size_bytes > size_bytes) {
2070 snprintf(req->error_str, sizeof(req->error_str),
2071 "%s: requested LUN size %ju > backing device "
2072 "size %ju", __func__,
2073 (uintmax_t)params->lun_size_bytes,
2074 (uintmax_t)size_bytes);
2078 be_lun->size_bytes = params->lun_size_bytes;
2080 be_lun->size_bytes = size_bytes;
2087 ctl_be_block_modify(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
2089 struct ctl_lun_modify_params *params;
2090 struct ctl_be_block_lun *be_lun;
2093 params = &req->reqdata.modify;
2095 mtx_lock(&softc->lock);
2099 STAILQ_FOREACH(be_lun, &softc->lun_list, links) {
2100 if (be_lun->ctl_be_lun.lun_id == params->lun_id)
2103 mtx_unlock(&softc->lock);
2105 if (be_lun == NULL) {
2106 snprintf(req->error_str, sizeof(req->error_str),
2107 "%s: LUN %u is not managed by the block backend",
2108 __func__, params->lun_id);
2112 if (params->lun_size_bytes != 0) {
2113 if (params->lun_size_bytes < be_lun->blocksize) {
2114 snprintf(req->error_str, sizeof(req->error_str),
2115 "%s: LUN size %ju < blocksize %u", __func__,
2116 params->lun_size_bytes, be_lun->blocksize);
2121 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY);
2123 if (be_lun->vn->v_type == VREG)
2124 error = ctl_be_block_modify_file(be_lun, req);
2126 error = ctl_be_block_modify_dev(be_lun, req);
2128 VOP_UNLOCK(be_lun->vn, 0);
2133 be_lun->size_blocks = be_lun->size_bytes >> be_lun->blocksize_shift;
2136 * The maximum LBA is the size - 1.
2138 * XXX: Note that this field is being updated without locking,
2139 * which might cause problems on 32-bit architectures.
2141 be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1;
2142 ctl_lun_capacity_changed(&be_lun->ctl_be_lun);
2144 /* Tell the user the exact size we ended up using */
2145 params->lun_size_bytes = be_lun->size_bytes;
2147 req->status = CTL_LUN_OK;
2152 req->status = CTL_LUN_ERROR;
2158 ctl_be_block_lun_shutdown(void *be_lun)
2160 struct ctl_be_block_lun *lun;
2161 struct ctl_be_block_softc *softc;
2163 lun = (struct ctl_be_block_lun *)be_lun;
2167 mtx_lock(&softc->lock);
2168 lun->flags |= CTL_BE_BLOCK_LUN_UNCONFIGURED;
2169 if (lun->flags & CTL_BE_BLOCK_LUN_WAITING)
2171 mtx_unlock(&softc->lock);
2176 ctl_be_block_lun_config_status(void *be_lun, ctl_lun_config_status status)
2178 struct ctl_be_block_lun *lun;
2179 struct ctl_be_block_softc *softc;
2181 lun = (struct ctl_be_block_lun *)be_lun;
2184 if (status == CTL_LUN_CONFIG_OK) {
2185 mtx_lock(&softc->lock);
2186 lun->flags &= ~CTL_BE_BLOCK_LUN_UNCONFIGURED;
2187 if (lun->flags & CTL_BE_BLOCK_LUN_WAITING)
2189 mtx_unlock(&softc->lock);
2192 * We successfully added the LUN, attempt to enable it.
2194 if (ctl_enable_lun(&lun->ctl_be_lun) != 0) {
2195 printf("%s: ctl_enable_lun() failed!\n", __func__);
2196 if (ctl_invalidate_lun(&lun->ctl_be_lun) != 0) {
2197 printf("%s: ctl_invalidate_lun() failed!\n",
2206 mtx_lock(&softc->lock);
2207 lun->flags &= ~CTL_BE_BLOCK_LUN_UNCONFIGURED;
2208 lun->flags |= CTL_BE_BLOCK_LUN_CONFIG_ERR;
2210 mtx_unlock(&softc->lock);
2215 ctl_be_block_config_write(union ctl_io *io)
2217 struct ctl_be_block_lun *be_lun;
2218 struct ctl_be_lun *ctl_be_lun;
2223 DPRINTF("entered\n");
2225 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
2226 CTL_PRIV_BACKEND_LUN].ptr;
2227 be_lun = (struct ctl_be_block_lun *)ctl_be_lun->be_lun;
2229 switch (io->scsiio.cdb[0]) {
2230 case SYNCHRONIZE_CACHE:
2231 case SYNCHRONIZE_CACHE_16:
2233 * The upper level CTL code will filter out any CDBs with
2234 * the immediate bit set and return the proper error.
2236 * We don't really need to worry about what LBA range the
2237 * user asked to be synced out. When they issue a sync
2238 * cache command, we'll sync out the whole thing.
2240 mtx_lock(&be_lun->lock);
2241 STAILQ_INSERT_TAIL(&be_lun->config_write_queue, &io->io_hdr,
2243 mtx_unlock(&be_lun->lock);
2244 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task);
2246 case START_STOP_UNIT: {
2247 struct scsi_start_stop_unit *cdb;
2249 cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb;
2251 if (cdb->how & SSS_START)
2252 retval = ctl_start_lun(ctl_be_lun);
2254 retval = ctl_stop_lun(ctl_be_lun);
2256 * XXX KDM Copan-specific offline behavior.
2257 * Figure out a reasonable way to port this?
2261 && (cdb->byte2 & SSS_ONOFFLINE))
2262 retval = ctl_lun_offline(ctl_be_lun);
2267 * In general, the above routines should not fail. They
2268 * just set state for the LUN. So we've got something
2269 * pretty wrong here if we can't start or stop the LUN.
2272 ctl_set_internal_failure(&io->scsiio,
2274 /*retry_count*/ 0xf051);
2275 retval = CTL_RETVAL_COMPLETE;
2277 ctl_set_success(&io->scsiio);
2279 ctl_config_write_done(io);
2283 ctl_set_invalid_opcode(&io->scsiio);
2284 ctl_config_write_done(io);
2285 retval = CTL_RETVAL_COMPLETE;
2294 ctl_be_block_config_read(union ctl_io *io)
2300 ctl_be_block_lun_info(void *be_lun, struct sbuf *sb)
2302 struct ctl_be_block_lun *lun;
2305 lun = (struct ctl_be_block_lun *)be_lun;
2308 retval = sbuf_printf(sb, "<num_threads>");
2313 retval = sbuf_printf(sb, "%d", lun->num_threads);
2318 retval = sbuf_printf(sb, "</num_threads>");
2321 * For processor devices, we don't have a path variable.
2324 || (lun->dev_path == NULL))
2327 retval = sbuf_printf(sb, "<file>");
2332 retval = ctl_sbuf_printf_esc(sb, lun->dev_path);
2337 retval = sbuf_printf(sb, "</file>\n");
2345 ctl_be_block_init(void)
2347 struct ctl_be_block_softc *softc;
2350 softc = &backend_block_softc;
2353 mtx_init(&softc->lock, "ctlblk", NULL, MTX_DEF);
2354 STAILQ_INIT(&softc->beio_free_queue);
2355 STAILQ_INIT(&softc->disk_list);
2356 STAILQ_INIT(&softc->lun_list);
2357 ctl_grow_beio(softc, 200);