2 * Copyright (c) 2003 Silicon Graphics International Corp.
3 * Copyright (c) 2009-2011 Spectra Logic Corporation
4 * Copyright (c) 2012 The FreeBSD Foundation
7 * Portions of this software were developed by Edward Tomasz Napierala
8 * under sponsorship from the FreeBSD Foundation.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions, and the following disclaimer,
15 * without modification.
16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
17 * substantially similar to the "NO WARRANTY" disclaimer below
18 * ("Disclaimer") and any redistribution must be conditioned upon
19 * including a substantially similar Disclaimer requirement for further
20 * binary redistribution.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
32 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGES.
35 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_block.c#5 $
38 * CAM Target Layer driver backend for block devices.
40 * Author: Ken Merry <ken@FreeBSD.org>
42 #include <sys/cdefs.h>
43 __FBSDID("$FreeBSD$");
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/types.h>
49 #include <sys/kthread.h>
51 #include <sys/fcntl.h>
52 #include <sys/limits.h>
54 #include <sys/mutex.h>
55 #include <sys/condvar.h>
56 #include <sys/malloc.h>
58 #include <sys/ioccom.h>
59 #include <sys/queue.h>
61 #include <sys/endian.h>
64 #include <sys/taskqueue.h>
65 #include <sys/vnode.h>
66 #include <sys/namei.h>
67 #include <sys/mount.h>
69 #include <sys/fcntl.h>
70 #include <sys/filedesc.h>
73 #include <sys/module.h>
75 #include <sys/devicestat.h>
76 #include <sys/sysctl.h>
78 #include <geom/geom.h>
81 #include <cam/scsi/scsi_all.h>
82 #include <cam/scsi/scsi_da.h>
83 #include <cam/ctl/ctl_io.h>
84 #include <cam/ctl/ctl.h>
85 #include <cam/ctl/ctl_backend.h>
86 #include <cam/ctl/ctl_frontend_internal.h>
87 #include <cam/ctl/ctl_ioctl.h>
88 #include <cam/ctl/ctl_scsi_all.h>
89 #include <cam/ctl/ctl_error.h>
92 * The idea here is that we'll allocate enough S/G space to hold a 1MB
93 * I/O. If we get an I/O larger than that, we'll split it.
95 #define CTLBLK_HALF_IO_SIZE (512 * 1024)
96 #define CTLBLK_MAX_IO_SIZE (CTLBLK_HALF_IO_SIZE * 2)
97 #define CTLBLK_MAX_SEG MAXPHYS
98 #define CTLBLK_HALF_SEGS MAX(CTLBLK_HALF_IO_SIZE / CTLBLK_MAX_SEG, 1)
99 #define CTLBLK_MAX_SEGS (CTLBLK_HALF_SEGS * 2)
102 #define DPRINTF(fmt, args...) \
103 printf("cbb(%s:%d): " fmt, __FUNCTION__, __LINE__, ##args)
105 #define DPRINTF(fmt, args...) do {} while(0)
109 ((struct ctl_ptr_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_BACKEND])
111 ((struct ctl_lba_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_LBA_LEN])
113 SDT_PROVIDER_DEFINE(cbb);
116 CTL_BE_BLOCK_LUN_UNCONFIGURED = 0x01,
117 CTL_BE_BLOCK_LUN_CONFIG_ERR = 0x02,
118 CTL_BE_BLOCK_LUN_WAITING = 0x04,
119 CTL_BE_BLOCK_LUN_MULTI_THREAD = 0x08
120 } ctl_be_block_lun_flags;
128 struct ctl_be_block_devdata {
134 struct ctl_be_block_filedata {
138 union ctl_be_block_bedata {
139 struct ctl_be_block_devdata dev;
140 struct ctl_be_block_filedata file;
143 struct ctl_be_block_io;
144 struct ctl_be_block_lun;
146 typedef void (*cbb_dispatch_t)(struct ctl_be_block_lun *be_lun,
147 struct ctl_be_block_io *beio);
150 * Backend LUN structure. There is a 1:1 mapping between a block device
151 * and a backend block LUN, and between a backend block LUN and a CTL LUN.
153 struct ctl_be_block_lun {
154 struct ctl_block_disk *disk;
157 ctl_be_block_type dev_type;
159 union ctl_be_block_bedata backend;
160 cbb_dispatch_t dispatch;
161 cbb_dispatch_t lun_flush;
162 cbb_dispatch_t unmap;
164 uint64_t size_blocks;
170 struct ctl_be_block_softc *softc;
171 struct devstat *disk_stats;
172 ctl_be_block_lun_flags flags;
173 STAILQ_ENTRY(ctl_be_block_lun) links;
174 struct ctl_be_lun ctl_be_lun;
175 struct taskqueue *io_taskqueue;
178 STAILQ_HEAD(, ctl_io_hdr) input_queue;
179 STAILQ_HEAD(, ctl_io_hdr) config_write_queue;
180 STAILQ_HEAD(, ctl_io_hdr) datamove_queue;
181 struct mtx_padalign io_lock;
182 struct mtx_padalign queue_lock;
186 * Overall softc structure for the block backend module.
188 struct ctl_be_block_softc {
191 STAILQ_HEAD(, ctl_block_disk) disk_list;
193 STAILQ_HEAD(, ctl_be_block_lun) lun_list;
196 static struct ctl_be_block_softc backend_block_softc;
199 * Per-I/O information.
201 struct ctl_be_block_io {
203 struct ctl_sg_entry sg_segs[CTLBLK_MAX_SEGS];
204 struct iovec xiovecs[CTLBLK_MAX_SEGS];
211 struct bintime ds_t0;
212 devstat_tag_type ds_tag_type;
213 devstat_trans_flags ds_trans_type;
216 struct ctl_be_block_softc *softc;
217 struct ctl_be_block_lun *lun;
218 void (*beio_cont)(struct ctl_be_block_io *beio); /* to continue processing */
221 static int cbb_num_threads = 14;
222 SYSCTL_NODE(_kern_cam_ctl, OID_AUTO, block, CTLFLAG_RD, 0,
223 "CAM Target Layer Block Backend");
224 SYSCTL_INT(_kern_cam_ctl_block, OID_AUTO, num_threads, CTLFLAG_RWTUN,
225 &cbb_num_threads, 0, "Number of threads per backing file");
227 static struct ctl_be_block_io *ctl_alloc_beio(struct ctl_be_block_softc *softc);
228 static void ctl_free_beio(struct ctl_be_block_io *beio);
229 static void ctl_complete_beio(struct ctl_be_block_io *beio);
230 static int ctl_be_block_move_done(union ctl_io *io);
231 static void ctl_be_block_biodone(struct bio *bio);
232 static void ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun,
233 struct ctl_be_block_io *beio);
234 static void ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun,
235 struct ctl_be_block_io *beio);
236 static void ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun,
237 struct ctl_be_block_io *beio);
238 static void ctl_be_block_unmap_dev(struct ctl_be_block_lun *be_lun,
239 struct ctl_be_block_io *beio);
240 static void ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun,
241 struct ctl_be_block_io *beio);
242 static void ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun,
244 static void ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun,
246 static void ctl_be_block_worker(void *context, int pending);
247 static int ctl_be_block_submit(union ctl_io *io);
248 static int ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr,
249 int flag, struct thread *td);
250 static int ctl_be_block_open_file(struct ctl_be_block_lun *be_lun,
251 struct ctl_lun_req *req);
252 static int ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun,
253 struct ctl_lun_req *req);
254 static int ctl_be_block_close(struct ctl_be_block_lun *be_lun);
255 static int ctl_be_block_open(struct ctl_be_block_softc *softc,
256 struct ctl_be_block_lun *be_lun,
257 struct ctl_lun_req *req);
258 static int ctl_be_block_create(struct ctl_be_block_softc *softc,
259 struct ctl_lun_req *req);
260 static int ctl_be_block_rm(struct ctl_be_block_softc *softc,
261 struct ctl_lun_req *req);
262 static int ctl_be_block_modify_file(struct ctl_be_block_lun *be_lun,
263 struct ctl_lun_req *req);
264 static int ctl_be_block_modify_dev(struct ctl_be_block_lun *be_lun,
265 struct ctl_lun_req *req);
266 static int ctl_be_block_modify(struct ctl_be_block_softc *softc,
267 struct ctl_lun_req *req);
268 static void ctl_be_block_lun_shutdown(void *be_lun);
269 static void ctl_be_block_lun_config_status(void *be_lun,
270 ctl_lun_config_status status);
271 static int ctl_be_block_config_write(union ctl_io *io);
272 static int ctl_be_block_config_read(union ctl_io *io);
273 static int ctl_be_block_lun_info(void *be_lun, struct sbuf *sb);
274 int ctl_be_block_init(void);
276 static struct ctl_backend_driver ctl_be_block_driver =
279 .flags = CTL_BE_FLAG_HAS_CONFIG,
280 .init = ctl_be_block_init,
281 .data_submit = ctl_be_block_submit,
282 .data_move_done = ctl_be_block_move_done,
283 .config_read = ctl_be_block_config_read,
284 .config_write = ctl_be_block_config_write,
285 .ioctl = ctl_be_block_ioctl,
286 .lun_info = ctl_be_block_lun_info
289 MALLOC_DEFINE(M_CTLBLK, "ctlblk", "Memory used for CTL block backend");
290 CTL_BACKEND_DECLARE(cbb, ctl_be_block_driver);
292 static uma_zone_t beio_zone;
294 static struct ctl_be_block_io *
295 ctl_alloc_beio(struct ctl_be_block_softc *softc)
297 struct ctl_be_block_io *beio;
299 beio = uma_zalloc(beio_zone, M_WAITOK | M_ZERO);
305 ctl_free_beio(struct ctl_be_block_io *beio)
312 for (i = 0; i < beio->num_segs; i++) {
313 if (beio->sg_segs[i].addr == NULL)
316 uma_zfree(beio->lun->lun_zone, beio->sg_segs[i].addr);
317 beio->sg_segs[i].addr = NULL;
319 /* For compare we had two equal S/G lists. */
320 if (ARGS(beio->io)->flags & CTL_LLF_COMPARE) {
321 uma_zfree(beio->lun->lun_zone,
322 beio->sg_segs[i + CTLBLK_HALF_SEGS].addr);
323 beio->sg_segs[i + CTLBLK_HALF_SEGS].addr = NULL;
327 if (duplicate_free > 0) {
328 printf("%s: %d duplicate frees out of %d segments\n", __func__,
329 duplicate_free, beio->num_segs);
332 uma_zfree(beio_zone, beio);
336 ctl_complete_beio(struct ctl_be_block_io *beio)
338 union ctl_io *io = beio->io;
340 if (beio->beio_cont != NULL) {
341 beio->beio_cont(beio);
344 ctl_data_submit_done(io);
349 ctl_be_block_move_done(union ctl_io *io)
351 struct ctl_be_block_io *beio;
352 struct ctl_be_block_lun *be_lun;
353 struct ctl_lba_len_flags *lbalen;
355 struct bintime cur_bt;
359 beio = (struct ctl_be_block_io *)PRIV(io)->ptr;
362 DPRINTF("entered\n");
366 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt);
367 bintime_add(&io->io_hdr.dma_bt, &cur_bt);
368 io->io_hdr.num_dmas++;
370 io->scsiio.kern_rel_offset += io->scsiio.kern_data_len;
373 * We set status at this point for read commands, and write
374 * commands with errors.
376 if ((io->io_hdr.port_status == 0) &&
377 ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0) &&
378 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) {
379 lbalen = ARGS(beio->io);
380 if (lbalen->flags & CTL_LLF_READ) {
381 ctl_set_success(&io->scsiio);
382 } else if (lbalen->flags & CTL_LLF_COMPARE) {
383 /* We have two data blocks ready for comparison. */
384 for (i = 0; i < beio->num_segs; i++) {
385 if (memcmp(beio->sg_segs[i].addr,
386 beio->sg_segs[i + CTLBLK_HALF_SEGS].addr,
387 beio->sg_segs[i].len) != 0)
390 if (i < beio->num_segs)
391 ctl_set_sense(&io->scsiio,
393 /*sense_key*/ SSD_KEY_MISCOMPARE,
398 ctl_set_success(&io->scsiio);
401 else if ((io->io_hdr.port_status != 0)
402 && ((io->io_hdr.flags & CTL_FLAG_ABORT) == 0)
403 && ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) {
405 * For hardware error sense keys, the sense key
406 * specific value is defined to be a retry count,
407 * but we use it to pass back an internal FETD
408 * error code. XXX KDM Hopefully the FETD is only
409 * using 16 bits for an error code, since that's
410 * all the space we have in the sks field.
412 ctl_set_internal_failure(&io->scsiio,
415 io->io_hdr.port_status);
419 * If this is a read, or a write with errors, it is done.
421 if ((beio->bio_cmd == BIO_READ)
422 || ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)
423 || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)) {
424 ctl_complete_beio(beio);
429 * At this point, we have a write and the DMA completed
430 * successfully. We now have to queue it to the task queue to
431 * execute the backend I/O. That is because we do blocking
432 * memory allocations, and in the file backing case, blocking I/O.
433 * This move done routine is generally called in the SIM's
434 * interrupt context, and therefore we cannot block.
436 mtx_lock(&be_lun->queue_lock);
438 * XXX KDM make sure that links is okay to use at this point.
439 * Otherwise, we either need to add another field to ctl_io_hdr,
440 * or deal with resource allocation here.
442 STAILQ_INSERT_TAIL(&be_lun->datamove_queue, &io->io_hdr, links);
443 mtx_unlock(&be_lun->queue_lock);
445 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task);
451 ctl_be_block_biodone(struct bio *bio)
453 struct ctl_be_block_io *beio;
454 struct ctl_be_block_lun *be_lun;
458 beio = bio->bio_caller1;
462 DPRINTF("entered\n");
464 error = bio->bio_error;
465 mtx_lock(&be_lun->io_lock);
469 beio->num_bios_done++;
472 * XXX KDM will this cause WITNESS to complain? Holding a lock
473 * during the free might cause it to complain.
478 * If the send complete bit isn't set, or we aren't the last I/O to
479 * complete, then we're done.
481 if ((beio->send_complete == 0)
482 || (beio->num_bios_done < beio->num_bios_sent)) {
483 mtx_unlock(&be_lun->io_lock);
488 * At this point, we've verified that we are the last I/O to
489 * complete, so it's safe to drop the lock.
491 devstat_end_transaction(beio->lun->disk_stats, beio->io_len,
492 beio->ds_tag_type, beio->ds_trans_type,
493 /*now*/ NULL, /*then*/&beio->ds_t0);
494 mtx_unlock(&be_lun->io_lock);
497 * If there are any errors from the backing device, we fail the
498 * entire I/O with a medium error.
500 if (beio->num_errors > 0) {
501 if (error == EOPNOTSUPP) {
502 ctl_set_invalid_opcode(&io->scsiio);
503 } else if (beio->bio_cmd == BIO_FLUSH) {
504 /* XXX KDM is there is a better error here? */
505 ctl_set_internal_failure(&io->scsiio,
507 /*retry_count*/ 0xbad2);
509 ctl_set_medium_error(&io->scsiio);
510 ctl_complete_beio(beio);
515 * If this is a write, a flush, a delete or verify, we're all done.
516 * If this is a read, we can now send the data to the user.
518 if ((beio->bio_cmd == BIO_WRITE)
519 || (beio->bio_cmd == BIO_FLUSH)
520 || (beio->bio_cmd == BIO_DELETE)
521 || (ARGS(io)->flags & CTL_LLF_VERIFY)) {
522 ctl_set_success(&io->scsiio);
523 ctl_complete_beio(beio);
526 getbintime(&io->io_hdr.dma_start_bt);
533 ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun,
534 struct ctl_be_block_io *beio)
536 union ctl_io *io = beio->io;
537 struct mount *mountpoint;
538 int error, lock_flags;
540 DPRINTF("entered\n");
542 binuptime(&beio->ds_t0);
543 mtx_lock(&be_lun->io_lock);
544 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0);
545 mtx_unlock(&be_lun->io_lock);
547 (void) vn_start_write(be_lun->vn, &mountpoint, V_WAIT);
549 if (MNT_SHARED_WRITES(mountpoint)
550 || ((mountpoint == NULL)
551 && MNT_SHARED_WRITES(be_lun->vn->v_mount)))
552 lock_flags = LK_SHARED;
554 lock_flags = LK_EXCLUSIVE;
556 vn_lock(be_lun->vn, lock_flags | LK_RETRY);
558 error = VOP_FSYNC(be_lun->vn, MNT_WAIT, curthread);
559 VOP_UNLOCK(be_lun->vn, 0);
561 vn_finished_write(mountpoint);
563 mtx_lock(&be_lun->io_lock);
564 devstat_end_transaction(beio->lun->disk_stats, beio->io_len,
565 beio->ds_tag_type, beio->ds_trans_type,
566 /*now*/ NULL, /*then*/&beio->ds_t0);
567 mtx_unlock(&be_lun->io_lock);
570 ctl_set_success(&io->scsiio);
572 /* XXX KDM is there is a better error here? */
573 ctl_set_internal_failure(&io->scsiio,
575 /*retry_count*/ 0xbad1);
578 ctl_complete_beio(beio);
581 SDT_PROBE_DEFINE1(cbb, kernel, read, file_start, "uint64_t");
582 SDT_PROBE_DEFINE1(cbb, kernel, write, file_start, "uint64_t");
583 SDT_PROBE_DEFINE1(cbb, kernel, read, file_done,"uint64_t");
584 SDT_PROBE_DEFINE1(cbb, kernel, write, file_done, "uint64_t");
587 ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun,
588 struct ctl_be_block_io *beio)
590 struct ctl_be_block_filedata *file_data;
593 struct iovec *xiovec;
597 DPRINTF("entered\n");
599 file_data = &be_lun->backend.file;
602 if (ARGS(io)->flags & CTL_LLF_DPO)
604 if (beio->bio_cmd == BIO_WRITE && ARGS(io)->flags & CTL_LLF_FUA)
607 bzero(&xuio, sizeof(xuio));
608 if (beio->bio_cmd == BIO_READ) {
609 SDT_PROBE(cbb, kernel, read, file_start, 0, 0, 0, 0, 0);
610 xuio.uio_rw = UIO_READ;
612 SDT_PROBE(cbb, kernel, write, file_start, 0, 0, 0, 0, 0);
613 xuio.uio_rw = UIO_WRITE;
615 xuio.uio_offset = beio->io_offset;
616 xuio.uio_resid = beio->io_len;
617 xuio.uio_segflg = UIO_SYSSPACE;
618 xuio.uio_iov = beio->xiovecs;
619 xuio.uio_iovcnt = beio->num_segs;
620 xuio.uio_td = curthread;
622 for (i = 0, xiovec = xuio.uio_iov; i < xuio.uio_iovcnt; i++, xiovec++) {
623 xiovec->iov_base = beio->sg_segs[i].addr;
624 xiovec->iov_len = beio->sg_segs[i].len;
627 binuptime(&beio->ds_t0);
628 mtx_lock(&be_lun->io_lock);
629 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0);
630 mtx_unlock(&be_lun->io_lock);
632 if (beio->bio_cmd == BIO_READ) {
633 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY);
636 * UFS pays attention to IO_DIRECT for reads. If the
637 * DIRECTIO option is configured into the kernel, it calls
638 * ffs_rawread(). But that only works for single-segment
639 * uios with user space addresses. In our case, with a
640 * kernel uio, it still reads into the buffer cache, but it
641 * will just try to release the buffer from the cache later
644 * ZFS does not pay attention to IO_DIRECT for reads.
646 * UFS does not pay attention to IO_SYNC for reads.
648 * ZFS pays attention to IO_SYNC (which translates into the
649 * Solaris define FRSYNC for zfs_read()) for reads. It
650 * attempts to sync the file before reading.
652 * So, to attempt to provide some barrier semantics in the
653 * BIO_ORDERED case, set both IO_DIRECT and IO_SYNC.
655 error = VOP_READ(be_lun->vn, &xuio, flags, file_data->cred);
657 VOP_UNLOCK(be_lun->vn, 0);
658 SDT_PROBE(cbb, kernel, read, file_done, 0, 0, 0, 0, 0);
660 struct mount *mountpoint;
663 (void)vn_start_write(be_lun->vn, &mountpoint, V_WAIT);
665 if (MNT_SHARED_WRITES(mountpoint)
666 || ((mountpoint == NULL)
667 && MNT_SHARED_WRITES(be_lun->vn->v_mount)))
668 lock_flags = LK_SHARED;
670 lock_flags = LK_EXCLUSIVE;
672 vn_lock(be_lun->vn, lock_flags | LK_RETRY);
675 * UFS pays attention to IO_DIRECT for writes. The write
676 * is done asynchronously. (Normally the write would just
677 * get put into cache.
679 * UFS pays attention to IO_SYNC for writes. It will
680 * attempt to write the buffer out synchronously if that
683 * ZFS does not pay attention to IO_DIRECT for writes.
685 * ZFS pays attention to IO_SYNC (a.k.a. FSYNC or FRSYNC)
686 * for writes. It will flush the transaction from the
687 * cache before returning.
689 * So if we've got the BIO_ORDERED flag set, we want
690 * IO_SYNC in either the UFS or ZFS case.
692 error = VOP_WRITE(be_lun->vn, &xuio, flags, file_data->cred);
693 VOP_UNLOCK(be_lun->vn, 0);
695 vn_finished_write(mountpoint);
696 SDT_PROBE(cbb, kernel, write, file_done, 0, 0, 0, 0, 0);
699 mtx_lock(&be_lun->io_lock);
700 devstat_end_transaction(beio->lun->disk_stats, beio->io_len,
701 beio->ds_tag_type, beio->ds_trans_type,
702 /*now*/ NULL, /*then*/&beio->ds_t0);
703 mtx_unlock(&be_lun->io_lock);
706 * If we got an error, set the sense data to "MEDIUM ERROR" and
707 * return the I/O to the user.
712 ctl_scsi_path_string(io, path_str, sizeof(path_str));
714 * XXX KDM ZFS returns ENOSPC when the underlying
715 * filesystem fills up. What kind of SCSI error should we
718 printf("%s%s command returned errno %d\n", path_str,
719 (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE", error);
720 ctl_set_medium_error(&io->scsiio);
721 ctl_complete_beio(beio);
726 * If this is a write or a verify, we're all done.
727 * If this is a read, we can now send the data to the user.
729 if ((beio->bio_cmd == BIO_WRITE) ||
730 (ARGS(io)->flags & CTL_LLF_VERIFY)) {
731 ctl_set_success(&io->scsiio);
732 ctl_complete_beio(beio);
735 getbintime(&io->io_hdr.dma_start_bt);
742 ctl_be_block_dispatch_zvol(struct ctl_be_block_lun *be_lun,
743 struct ctl_be_block_io *beio)
745 struct ctl_be_block_devdata *dev_data;
748 struct iovec *xiovec;
752 DPRINTF("entered\n");
754 dev_data = &be_lun->backend.dev;
757 if (ARGS(io)->flags & CTL_LLF_DPO)
759 if (beio->bio_cmd == BIO_WRITE && ARGS(io)->flags & CTL_LLF_FUA)
762 bzero(&xuio, sizeof(xuio));
763 if (beio->bio_cmd == BIO_READ) {
764 SDT_PROBE(cbb, kernel, read, file_start, 0, 0, 0, 0, 0);
765 xuio.uio_rw = UIO_READ;
767 SDT_PROBE(cbb, kernel, write, file_start, 0, 0, 0, 0, 0);
768 xuio.uio_rw = UIO_WRITE;
770 xuio.uio_offset = beio->io_offset;
771 xuio.uio_resid = beio->io_len;
772 xuio.uio_segflg = UIO_SYSSPACE;
773 xuio.uio_iov = beio->xiovecs;
774 xuio.uio_iovcnt = beio->num_segs;
775 xuio.uio_td = curthread;
777 for (i = 0, xiovec = xuio.uio_iov; i < xuio.uio_iovcnt; i++, xiovec++) {
778 xiovec->iov_base = beio->sg_segs[i].addr;
779 xiovec->iov_len = beio->sg_segs[i].len;
782 binuptime(&beio->ds_t0);
783 mtx_lock(&be_lun->io_lock);
784 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0);
785 mtx_unlock(&be_lun->io_lock);
787 if (beio->bio_cmd == BIO_READ) {
788 error = (*dev_data->csw->d_read)(dev_data->cdev, &xuio, flags);
789 SDT_PROBE(cbb, kernel, read, file_done, 0, 0, 0, 0, 0);
791 error = (*dev_data->csw->d_write)(dev_data->cdev, &xuio, flags);
792 SDT_PROBE(cbb, kernel, write, file_done, 0, 0, 0, 0, 0);
795 mtx_lock(&be_lun->io_lock);
796 devstat_end_transaction(beio->lun->disk_stats, beio->io_len,
797 beio->ds_tag_type, beio->ds_trans_type,
798 /*now*/ NULL, /*then*/&beio->ds_t0);
799 mtx_unlock(&be_lun->io_lock);
802 * If we got an error, set the sense data to "MEDIUM ERROR" and
803 * return the I/O to the user.
806 ctl_set_medium_error(&io->scsiio);
807 ctl_complete_beio(beio);
812 * If this is a write or a verify, we're all done.
813 * If this is a read, we can now send the data to the user.
815 if ((beio->bio_cmd == BIO_WRITE) ||
816 (ARGS(io)->flags & CTL_LLF_VERIFY)) {
817 ctl_set_success(&io->scsiio);
818 ctl_complete_beio(beio);
821 getbintime(&io->io_hdr.dma_start_bt);
828 ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun,
829 struct ctl_be_block_io *beio)
833 struct ctl_be_block_devdata *dev_data;
835 dev_data = &be_lun->backend.dev;
838 DPRINTF("entered\n");
840 /* This can't fail, it's a blocking allocation. */
843 bio->bio_cmd = BIO_FLUSH;
844 bio->bio_flags |= BIO_ORDERED;
845 bio->bio_dev = dev_data->cdev;
848 bio->bio_done = ctl_be_block_biodone;
849 bio->bio_caller1 = beio;
853 * We don't need to acquire the LUN lock here, because we are only
854 * sending one bio, and so there is no other context to synchronize
857 beio->num_bios_sent = 1;
858 beio->send_complete = 1;
860 binuptime(&beio->ds_t0);
861 mtx_lock(&be_lun->io_lock);
862 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0);
863 mtx_unlock(&be_lun->io_lock);
865 (*dev_data->csw->d_strategy)(bio);
869 ctl_be_block_unmap_dev_range(struct ctl_be_block_lun *be_lun,
870 struct ctl_be_block_io *beio,
871 uint64_t off, uint64_t len, int last)
874 struct ctl_be_block_devdata *dev_data;
877 dev_data = &be_lun->backend.dev;
878 maxlen = LONG_MAX - (LONG_MAX % be_lun->blocksize);
881 bio->bio_cmd = BIO_DELETE;
882 bio->bio_dev = dev_data->cdev;
883 bio->bio_offset = off;
884 bio->bio_length = MIN(len, maxlen);
886 bio->bio_done = ctl_be_block_biodone;
887 bio->bio_caller1 = beio;
888 bio->bio_pblkno = off / be_lun->blocksize;
890 off += bio->bio_length;
891 len -= bio->bio_length;
893 mtx_lock(&be_lun->io_lock);
894 beio->num_bios_sent++;
895 if (last && len == 0)
896 beio->send_complete = 1;
897 mtx_unlock(&be_lun->io_lock);
899 (*dev_data->csw->d_strategy)(bio);
904 ctl_be_block_unmap_dev(struct ctl_be_block_lun *be_lun,
905 struct ctl_be_block_io *beio)
908 struct ctl_be_block_devdata *dev_data;
909 struct ctl_ptr_len_flags *ptrlen;
910 struct scsi_unmap_desc *buf, *end;
913 dev_data = &be_lun->backend.dev;
916 DPRINTF("entered\n");
918 binuptime(&beio->ds_t0);
919 mtx_lock(&be_lun->io_lock);
920 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0);
921 mtx_unlock(&be_lun->io_lock);
923 if (beio->io_offset == -1) {
925 ptrlen = (struct ctl_ptr_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
926 buf = (struct scsi_unmap_desc *)ptrlen->ptr;
927 end = buf + ptrlen->len / sizeof(*buf);
928 for (; buf < end; buf++) {
929 len = (uint64_t)scsi_4btoul(buf->length) *
932 ctl_be_block_unmap_dev_range(be_lun, beio,
933 scsi_8btou64(buf->lba) * be_lun->blocksize, len,
934 (end - buf < 2) ? TRUE : FALSE);
937 ctl_be_block_unmap_dev_range(be_lun, beio,
938 beio->io_offset, beio->io_len, TRUE);
942 ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun,
943 struct ctl_be_block_io *beio)
945 TAILQ_HEAD(, bio) queue = TAILQ_HEAD_INITIALIZER(queue);
948 struct ctl_be_block_devdata *dev_data;
952 DPRINTF("entered\n");
954 dev_data = &be_lun->backend.dev;
957 * We have to limit our I/O size to the maximum supported by the
958 * backend device. Hopefully it is MAXPHYS. If the driver doesn't
959 * set it properly, use DFLTPHYS.
961 max_iosize = dev_data->cdev->si_iosize_max;
962 if (max_iosize < PAGE_SIZE)
963 max_iosize = DFLTPHYS;
965 cur_offset = beio->io_offset;
966 for (i = 0; i < beio->num_segs; i++) {
970 cur_size = beio->sg_segs[i].len;
971 cur_ptr = beio->sg_segs[i].addr;
973 while (cur_size > 0) {
974 /* This can't fail, it's a blocking allocation. */
977 KASSERT(bio != NULL, ("g_alloc_bio() failed!\n"));
979 bio->bio_cmd = beio->bio_cmd;
980 bio->bio_dev = dev_data->cdev;
981 bio->bio_caller1 = beio;
982 bio->bio_length = min(cur_size, max_iosize);
983 bio->bio_offset = cur_offset;
984 bio->bio_data = cur_ptr;
985 bio->bio_done = ctl_be_block_biodone;
986 bio->bio_pblkno = cur_offset / be_lun->blocksize;
988 cur_offset += bio->bio_length;
989 cur_ptr += bio->bio_length;
990 cur_size -= bio->bio_length;
992 TAILQ_INSERT_TAIL(&queue, bio, bio_queue);
993 beio->num_bios_sent++;
996 binuptime(&beio->ds_t0);
997 mtx_lock(&be_lun->io_lock);
998 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0);
999 beio->send_complete = 1;
1000 mtx_unlock(&be_lun->io_lock);
1003 * Fire off all allocated requests!
1005 while ((bio = TAILQ_FIRST(&queue)) != NULL) {
1006 TAILQ_REMOVE(&queue, bio, bio_queue);
1007 (*dev_data->csw->d_strategy)(bio);
1012 ctl_be_block_cw_done_ws(struct ctl_be_block_io *beio)
1017 ctl_free_beio(beio);
1018 if ((io->io_hdr.flags & CTL_FLAG_ABORT) ||
1019 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE &&
1020 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) {
1021 ctl_config_write_done(io);
1025 ctl_be_block_config_write(io);
1029 ctl_be_block_cw_dispatch_ws(struct ctl_be_block_lun *be_lun,
1032 struct ctl_be_block_io *beio;
1033 struct ctl_be_block_softc *softc;
1034 struct ctl_lba_len_flags *lbalen;
1035 uint64_t len_left, lba;
1039 DPRINTF("entered\n");
1041 beio = (struct ctl_be_block_io *)PRIV(io)->ptr;
1042 softc = be_lun->softc;
1043 lbalen = ARGS(beio->io);
1045 if (lbalen->flags & ~(SWS_LBDATA | SWS_UNMAP | SWS_ANCHOR | SWS_NDOB) ||
1046 (lbalen->flags & (SWS_UNMAP | SWS_ANCHOR) && be_lun->unmap == NULL)) {
1047 ctl_free_beio(beio);
1048 ctl_set_invalid_field(&io->scsiio,
1054 ctl_config_write_done(io);
1058 switch (io->scsiio.tag_type) {
1059 case CTL_TAG_ORDERED:
1060 beio->ds_tag_type = DEVSTAT_TAG_ORDERED;
1062 case CTL_TAG_HEAD_OF_QUEUE:
1063 beio->ds_tag_type = DEVSTAT_TAG_HEAD;
1065 case CTL_TAG_UNTAGGED:
1066 case CTL_TAG_SIMPLE:
1069 beio->ds_tag_type = DEVSTAT_TAG_SIMPLE;
1073 if (lbalen->flags & (SWS_UNMAP | SWS_ANCHOR)) {
1074 beio->io_offset = lbalen->lba * be_lun->blocksize;
1075 beio->io_len = (uint64_t)lbalen->len * be_lun->blocksize;
1076 beio->bio_cmd = BIO_DELETE;
1077 beio->ds_trans_type = DEVSTAT_FREE;
1079 be_lun->unmap(be_lun, beio);
1083 beio->bio_cmd = BIO_WRITE;
1084 beio->ds_trans_type = DEVSTAT_WRITE;
1086 DPRINTF("WRITE SAME at LBA %jx len %u\n",
1087 (uintmax_t)lbalen->lba, lbalen->len);
1089 len_left = (uint64_t)lbalen->len * be_lun->blocksize;
1090 for (i = 0, lba = 0; i < CTLBLK_MAX_SEGS && len_left > 0; i++) {
1093 * Setup the S/G entry for this chunk.
1095 seglen = MIN(CTLBLK_MAX_SEG, len_left);
1096 seglen -= seglen % be_lun->blocksize;
1097 beio->sg_segs[i].len = seglen;
1098 beio->sg_segs[i].addr = uma_zalloc(be_lun->lun_zone, M_WAITOK);
1100 DPRINTF("segment %d addr %p len %zd\n", i,
1101 beio->sg_segs[i].addr, beio->sg_segs[i].len);
1106 buf = beio->sg_segs[i].addr;
1108 for (; buf < end; buf += be_lun->blocksize) {
1109 memcpy(buf, io->scsiio.kern_data_ptr, be_lun->blocksize);
1110 if (lbalen->flags & SWS_LBDATA)
1111 scsi_ulto4b(lbalen->lba + lba, buf);
1116 beio->io_offset = lbalen->lba * be_lun->blocksize;
1117 beio->io_len = lba * be_lun->blocksize;
1119 /* We can not do all in one run. Correct and schedule rerun. */
1123 beio->beio_cont = ctl_be_block_cw_done_ws;
1126 be_lun->dispatch(be_lun, beio);
1130 ctl_be_block_cw_dispatch_unmap(struct ctl_be_block_lun *be_lun,
1133 struct ctl_be_block_io *beio;
1134 struct ctl_be_block_softc *softc;
1135 struct ctl_ptr_len_flags *ptrlen;
1137 DPRINTF("entered\n");
1139 beio = (struct ctl_be_block_io *)PRIV(io)->ptr;
1140 softc = be_lun->softc;
1141 ptrlen = (struct ctl_ptr_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
1143 if ((ptrlen->flags & ~SU_ANCHOR) != 0 || be_lun->unmap == NULL) {
1144 ctl_free_beio(beio);
1145 ctl_set_invalid_field(&io->scsiio,
1151 ctl_config_write_done(io);
1155 switch (io->scsiio.tag_type) {
1156 case CTL_TAG_ORDERED:
1157 beio->ds_tag_type = DEVSTAT_TAG_ORDERED;
1159 case CTL_TAG_HEAD_OF_QUEUE:
1160 beio->ds_tag_type = DEVSTAT_TAG_HEAD;
1162 case CTL_TAG_UNTAGGED:
1163 case CTL_TAG_SIMPLE:
1166 beio->ds_tag_type = DEVSTAT_TAG_SIMPLE;
1171 beio->io_offset = -1;
1173 beio->bio_cmd = BIO_DELETE;
1174 beio->ds_trans_type = DEVSTAT_FREE;
1178 be_lun->unmap(be_lun, beio);
1182 ctl_be_block_cw_done(struct ctl_be_block_io *beio)
1187 ctl_free_beio(beio);
1188 ctl_config_write_done(io);
1192 ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun,
1195 struct ctl_be_block_io *beio;
1196 struct ctl_be_block_softc *softc;
1198 DPRINTF("entered\n");
1200 softc = be_lun->softc;
1201 beio = ctl_alloc_beio(softc);
1204 beio->beio_cont = ctl_be_block_cw_done;
1205 PRIV(io)->ptr = (void *)beio;
1207 switch (io->scsiio.cdb[0]) {
1208 case SYNCHRONIZE_CACHE:
1209 case SYNCHRONIZE_CACHE_16:
1210 beio->bio_cmd = BIO_FLUSH;
1211 beio->ds_trans_type = DEVSTAT_NO_DATA;
1212 beio->ds_tag_type = DEVSTAT_TAG_ORDERED;
1214 be_lun->lun_flush(be_lun, beio);
1218 ctl_be_block_cw_dispatch_ws(be_lun, io);
1221 ctl_be_block_cw_dispatch_unmap(be_lun, io);
1224 panic("Unhandled CDB type %#x", io->scsiio.cdb[0]);
1229 SDT_PROBE_DEFINE1(cbb, kernel, read, start, "uint64_t");
1230 SDT_PROBE_DEFINE1(cbb, kernel, write, start, "uint64_t");
1231 SDT_PROBE_DEFINE1(cbb, kernel, read, alloc_done, "uint64_t");
1232 SDT_PROBE_DEFINE1(cbb, kernel, write, alloc_done, "uint64_t");
1235 ctl_be_block_next(struct ctl_be_block_io *beio)
1237 struct ctl_be_block_lun *be_lun;
1242 ctl_free_beio(beio);
1243 if ((io->io_hdr.flags & CTL_FLAG_ABORT) ||
1244 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE &&
1245 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) {
1246 ctl_data_submit_done(io);
1250 io->io_hdr.status &= ~CTL_STATUS_MASK;
1251 io->io_hdr.status |= CTL_STATUS_NONE;
1253 mtx_lock(&be_lun->queue_lock);
1255 * XXX KDM make sure that links is okay to use at this point.
1256 * Otherwise, we either need to add another field to ctl_io_hdr,
1257 * or deal with resource allocation here.
1259 STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links);
1260 mtx_unlock(&be_lun->queue_lock);
1262 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task);
1266 ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun,
1269 struct ctl_be_block_io *beio;
1270 struct ctl_be_block_softc *softc;
1271 struct ctl_lba_len_flags *lbalen;
1272 struct ctl_ptr_len_flags *bptrlen;
1273 uint64_t len_left, lbas;
1276 softc = be_lun->softc;
1278 DPRINTF("entered\n");
1281 if (lbalen->flags & CTL_LLF_WRITE) {
1282 SDT_PROBE(cbb, kernel, write, start, 0, 0, 0, 0, 0);
1284 SDT_PROBE(cbb, kernel, read, start, 0, 0, 0, 0, 0);
1287 beio = ctl_alloc_beio(softc);
1291 bptrlen->ptr = (void *)beio;
1293 switch (io->scsiio.tag_type) {
1294 case CTL_TAG_ORDERED:
1295 beio->ds_tag_type = DEVSTAT_TAG_ORDERED;
1297 case CTL_TAG_HEAD_OF_QUEUE:
1298 beio->ds_tag_type = DEVSTAT_TAG_HEAD;
1300 case CTL_TAG_UNTAGGED:
1301 case CTL_TAG_SIMPLE:
1304 beio->ds_tag_type = DEVSTAT_TAG_SIMPLE;
1308 if (lbalen->flags & CTL_LLF_WRITE) {
1309 beio->bio_cmd = BIO_WRITE;
1310 beio->ds_trans_type = DEVSTAT_WRITE;
1312 beio->bio_cmd = BIO_READ;
1313 beio->ds_trans_type = DEVSTAT_READ;
1316 DPRINTF("%s at LBA %jx len %u @%ju\n",
1317 (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE",
1318 (uintmax_t)lbalen->lba, lbalen->len, bptrlen->len);
1319 if (lbalen->flags & CTL_LLF_COMPARE)
1320 lbas = CTLBLK_HALF_IO_SIZE;
1322 lbas = CTLBLK_MAX_IO_SIZE;
1323 lbas = MIN(lbalen->len - bptrlen->len, lbas / be_lun->blocksize);
1324 beio->io_offset = (lbalen->lba + bptrlen->len) * be_lun->blocksize;
1325 beio->io_len = lbas * be_lun->blocksize;
1326 bptrlen->len += lbas;
1328 for (i = 0, len_left = beio->io_len; len_left > 0; i++) {
1329 KASSERT(i < CTLBLK_MAX_SEGS, ("Too many segs (%d >= %d)",
1330 i, CTLBLK_MAX_SEGS));
1333 * Setup the S/G entry for this chunk.
1335 beio->sg_segs[i].len = min(CTLBLK_MAX_SEG, len_left);
1336 beio->sg_segs[i].addr = uma_zalloc(be_lun->lun_zone, M_WAITOK);
1338 DPRINTF("segment %d addr %p len %zd\n", i,
1339 beio->sg_segs[i].addr, beio->sg_segs[i].len);
1341 /* Set up second segment for compare operation. */
1342 if (lbalen->flags & CTL_LLF_COMPARE) {
1343 beio->sg_segs[i + CTLBLK_HALF_SEGS].len =
1344 beio->sg_segs[i].len;
1345 beio->sg_segs[i + CTLBLK_HALF_SEGS].addr =
1346 uma_zalloc(be_lun->lun_zone, M_WAITOK);
1350 len_left -= beio->sg_segs[i].len;
1352 if (bptrlen->len < lbalen->len)
1353 beio->beio_cont = ctl_be_block_next;
1354 io->scsiio.be_move_done = ctl_be_block_move_done;
1355 /* For compare we have separate S/G lists for read and datamove. */
1356 if (lbalen->flags & CTL_LLF_COMPARE)
1357 io->scsiio.kern_data_ptr = (uint8_t *)&beio->sg_segs[CTLBLK_HALF_SEGS];
1359 io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs;
1360 io->scsiio.kern_data_len = beio->io_len;
1361 io->scsiio.kern_data_resid = 0;
1362 io->scsiio.kern_sg_entries = beio->num_segs;
1363 io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST;
1366 * For the read case, we need to read the data into our buffers and
1367 * then we can send it back to the user. For the write case, we
1368 * need to get the data from the user first.
1370 if (beio->bio_cmd == BIO_READ) {
1371 SDT_PROBE(cbb, kernel, read, alloc_done, 0, 0, 0, 0, 0);
1372 be_lun->dispatch(be_lun, beio);
1374 SDT_PROBE(cbb, kernel, write, alloc_done, 0, 0, 0, 0, 0);
1376 getbintime(&io->io_hdr.dma_start_bt);
1383 ctl_be_block_worker(void *context, int pending)
1385 struct ctl_be_block_lun *be_lun;
1386 struct ctl_be_block_softc *softc;
1389 be_lun = (struct ctl_be_block_lun *)context;
1390 softc = be_lun->softc;
1392 DPRINTF("entered\n");
1394 mtx_lock(&be_lun->queue_lock);
1396 io = (union ctl_io *)STAILQ_FIRST(&be_lun->datamove_queue);
1398 struct ctl_be_block_io *beio;
1400 DPRINTF("datamove queue\n");
1402 STAILQ_REMOVE(&be_lun->datamove_queue, &io->io_hdr,
1405 mtx_unlock(&be_lun->queue_lock);
1407 beio = (struct ctl_be_block_io *)PRIV(io)->ptr;
1409 be_lun->dispatch(be_lun, beio);
1411 mtx_lock(&be_lun->queue_lock);
1414 io = (union ctl_io *)STAILQ_FIRST(&be_lun->config_write_queue);
1417 DPRINTF("config write queue\n");
1419 STAILQ_REMOVE(&be_lun->config_write_queue, &io->io_hdr,
1422 mtx_unlock(&be_lun->queue_lock);
1424 ctl_be_block_cw_dispatch(be_lun, io);
1426 mtx_lock(&be_lun->queue_lock);
1429 io = (union ctl_io *)STAILQ_FIRST(&be_lun->input_queue);
1431 DPRINTF("input queue\n");
1433 STAILQ_REMOVE(&be_lun->input_queue, &io->io_hdr,
1435 mtx_unlock(&be_lun->queue_lock);
1438 * We must drop the lock, since this routine and
1439 * its children may sleep.
1441 ctl_be_block_dispatch(be_lun, io);
1443 mtx_lock(&be_lun->queue_lock);
1448 * If we get here, there is no work left in the queues, so
1449 * just break out and let the task queue go to sleep.
1453 mtx_unlock(&be_lun->queue_lock);
1457 * Entry point from CTL to the backend for I/O. We queue everything to a
1458 * work thread, so this just puts the I/O on a queue and wakes up the
1462 ctl_be_block_submit(union ctl_io *io)
1464 struct ctl_be_block_lun *be_lun;
1465 struct ctl_be_lun *ctl_be_lun;
1467 DPRINTF("entered\n");
1469 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
1470 CTL_PRIV_BACKEND_LUN].ptr;
1471 be_lun = (struct ctl_be_block_lun *)ctl_be_lun->be_lun;
1474 * Make sure we only get SCSI I/O.
1476 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("Non-SCSI I/O (type "
1477 "%#x) encountered", io->io_hdr.io_type));
1481 mtx_lock(&be_lun->queue_lock);
1483 * XXX KDM make sure that links is okay to use at this point.
1484 * Otherwise, we either need to add another field to ctl_io_hdr,
1485 * or deal with resource allocation here.
1487 STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links);
1488 mtx_unlock(&be_lun->queue_lock);
1489 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task);
1491 return (CTL_RETVAL_COMPLETE);
1495 ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr,
1496 int flag, struct thread *td)
1498 struct ctl_be_block_softc *softc;
1501 softc = &backend_block_softc;
1507 struct ctl_lun_req *lun_req;
1509 lun_req = (struct ctl_lun_req *)addr;
1511 switch (lun_req->reqtype) {
1512 case CTL_LUNREQ_CREATE:
1513 error = ctl_be_block_create(softc, lun_req);
1516 error = ctl_be_block_rm(softc, lun_req);
1518 case CTL_LUNREQ_MODIFY:
1519 error = ctl_be_block_modify(softc, lun_req);
1522 lun_req->status = CTL_LUN_ERROR;
1523 snprintf(lun_req->error_str, sizeof(lun_req->error_str),
1524 "%s: invalid LUN request type %d", __func__,
1539 ctl_be_block_open_file(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req)
1541 struct ctl_be_block_filedata *file_data;
1542 struct ctl_lun_create_params *params;
1547 file_data = &be_lun->backend.file;
1548 params = &req->reqdata.create;
1550 be_lun->dev_type = CTL_BE_BLOCK_FILE;
1551 be_lun->dispatch = ctl_be_block_dispatch_file;
1552 be_lun->lun_flush = ctl_be_block_flush_file;
1554 error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred);
1556 snprintf(req->error_str, sizeof(req->error_str),
1557 "error calling VOP_GETATTR() for file %s",
1563 * Verify that we have the ability to upgrade to exclusive
1564 * access on this file so we can trap errors at open instead
1565 * of reporting them during first access.
1567 if (VOP_ISLOCKED(be_lun->vn) != LK_EXCLUSIVE) {
1568 vn_lock(be_lun->vn, LK_UPGRADE | LK_RETRY);
1569 if (be_lun->vn->v_iflag & VI_DOOMED) {
1571 snprintf(req->error_str, sizeof(req->error_str),
1572 "error locking file %s", be_lun->dev_path);
1578 file_data->cred = crhold(curthread->td_ucred);
1579 if (params->lun_size_bytes != 0)
1580 be_lun->size_bytes = params->lun_size_bytes;
1582 be_lun->size_bytes = vattr.va_size;
1584 * We set the multi thread flag for file operations because all
1585 * filesystems (in theory) are capable of allowing multiple readers
1586 * of a file at once. So we want to get the maximum possible
1589 be_lun->flags |= CTL_BE_BLOCK_LUN_MULTI_THREAD;
1592 * XXX KDM vattr.va_blocksize may be larger than 512 bytes here.
1593 * With ZFS, it is 131072 bytes. Block sizes that large don't work
1594 * with disklabel and UFS on FreeBSD at least. Large block sizes
1595 * may not work with other OSes as well. So just export a sector
1596 * size of 512 bytes, which should work with any OS or
1597 * application. Since our backing is a file, any block size will
1598 * work fine for the backing store.
1601 be_lun->blocksize= vattr.va_blocksize;
1603 if (params->blocksize_bytes != 0)
1604 be_lun->blocksize = params->blocksize_bytes;
1606 be_lun->blocksize = 512;
1609 * Sanity check. The media size has to be at least one
1612 if (be_lun->size_bytes < be_lun->blocksize) {
1614 snprintf(req->error_str, sizeof(req->error_str),
1615 "file %s size %ju < block size %u", be_lun->dev_path,
1616 (uintmax_t)be_lun->size_bytes, be_lun->blocksize);
1622 ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req)
1624 struct ctl_lun_create_params *params;
1627 struct cdevsw *devsw;
1629 off_t ps, pss, po, pos;
1631 params = &req->reqdata.create;
1633 be_lun->dev_type = CTL_BE_BLOCK_DEV;
1634 be_lun->backend.dev.cdev = be_lun->vn->v_rdev;
1635 be_lun->backend.dev.csw = dev_refthread(be_lun->backend.dev.cdev,
1636 &be_lun->backend.dev.dev_ref);
1637 if (be_lun->backend.dev.csw == NULL)
1638 panic("Unable to retrieve device switch");
1639 if (strcmp(be_lun->backend.dev.csw->d_name, "zvol") == 0)
1640 be_lun->dispatch = ctl_be_block_dispatch_zvol;
1642 be_lun->dispatch = ctl_be_block_dispatch_dev;
1643 be_lun->lun_flush = ctl_be_block_flush_dev;
1644 be_lun->unmap = ctl_be_block_unmap_dev;
1646 error = VOP_GETATTR(be_lun->vn, &vattr, NOCRED);
1648 snprintf(req->error_str, sizeof(req->error_str),
1649 "%s: error getting vnode attributes for device %s",
1650 __func__, be_lun->dev_path);
1654 dev = be_lun->vn->v_rdev;
1655 devsw = dev->si_devsw;
1656 if (!devsw->d_ioctl) {
1657 snprintf(req->error_str, sizeof(req->error_str),
1658 "%s: no d_ioctl for device %s!", __func__,
1663 error = devsw->d_ioctl(dev, DIOCGSECTORSIZE,
1664 (caddr_t)&be_lun->blocksize, FREAD,
1667 snprintf(req->error_str, sizeof(req->error_str),
1668 "%s: error %d returned for DIOCGSECTORSIZE ioctl "
1669 "on %s!", __func__, error, be_lun->dev_path);
1674 * If the user has asked for a blocksize that is greater than the
1675 * backing device's blocksize, we can do it only if the blocksize
1676 * the user is asking for is an even multiple of the underlying
1677 * device's blocksize.
1679 if ((params->blocksize_bytes != 0)
1680 && (params->blocksize_bytes > be_lun->blocksize)) {
1681 uint32_t bs_multiple, tmp_blocksize;
1683 bs_multiple = params->blocksize_bytes / be_lun->blocksize;
1685 tmp_blocksize = bs_multiple * be_lun->blocksize;
1687 if (tmp_blocksize == params->blocksize_bytes) {
1688 be_lun->blocksize = params->blocksize_bytes;
1690 snprintf(req->error_str, sizeof(req->error_str),
1691 "%s: requested blocksize %u is not an even "
1692 "multiple of backing device blocksize %u",
1693 __func__, params->blocksize_bytes,
1698 } else if ((params->blocksize_bytes != 0)
1699 && (params->blocksize_bytes != be_lun->blocksize)) {
1700 snprintf(req->error_str, sizeof(req->error_str),
1701 "%s: requested blocksize %u < backing device "
1702 "blocksize %u", __func__, params->blocksize_bytes,
1707 error = devsw->d_ioctl(dev, DIOCGMEDIASIZE,
1708 (caddr_t)&be_lun->size_bytes, FREAD,
1711 snprintf(req->error_str, sizeof(req->error_str),
1712 "%s: error %d returned for DIOCGMEDIASIZE "
1713 " ioctl on %s!", __func__, error,
1718 if (params->lun_size_bytes != 0) {
1719 if (params->lun_size_bytes > be_lun->size_bytes) {
1720 snprintf(req->error_str, sizeof(req->error_str),
1721 "%s: requested LUN size %ju > backing device "
1722 "size %ju", __func__,
1723 (uintmax_t)params->lun_size_bytes,
1724 (uintmax_t)be_lun->size_bytes);
1728 be_lun->size_bytes = params->lun_size_bytes;
1731 error = devsw->d_ioctl(dev, DIOCGSTRIPESIZE,
1732 (caddr_t)&ps, FREAD, curthread);
1736 error = devsw->d_ioctl(dev, DIOCGSTRIPEOFFSET,
1737 (caddr_t)&po, FREAD, curthread);
1741 pss = ps / be_lun->blocksize;
1742 pos = po / be_lun->blocksize;
1743 if ((pss > 0) && (pss * be_lun->blocksize == ps) && (pss >= pos) &&
1744 ((pss & (pss - 1)) == 0) && (pos * be_lun->blocksize == po)) {
1745 be_lun->pblockexp = fls(pss) - 1;
1746 be_lun->pblockoff = (pss - pos) % pss;
1753 ctl_be_block_close(struct ctl_be_block_lun *be_lun)
1757 int flags = FREAD | FWRITE;
1759 switch (be_lun->dev_type) {
1760 case CTL_BE_BLOCK_DEV:
1761 if (be_lun->backend.dev.csw) {
1762 dev_relthread(be_lun->backend.dev.cdev,
1763 be_lun->backend.dev.dev_ref);
1764 be_lun->backend.dev.csw = NULL;
1765 be_lun->backend.dev.cdev = NULL;
1768 case CTL_BE_BLOCK_FILE:
1770 case CTL_BE_BLOCK_NONE:
1773 panic("Unexpected backend type.");
1777 (void)vn_close(be_lun->vn, flags, NOCRED, curthread);
1780 switch (be_lun->dev_type) {
1781 case CTL_BE_BLOCK_DEV:
1783 case CTL_BE_BLOCK_FILE:
1784 if (be_lun->backend.file.cred != NULL) {
1785 crfree(be_lun->backend.file.cred);
1786 be_lun->backend.file.cred = NULL;
1789 case CTL_BE_BLOCK_NONE:
1792 panic("Unexpected backend type.");
1802 ctl_be_block_open(struct ctl_be_block_softc *softc,
1803 struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req)
1805 struct nameidata nd;
1810 * XXX KDM allow a read-only option?
1812 flags = FREAD | FWRITE;
1815 if (rootvnode == NULL) {
1816 snprintf(req->error_str, sizeof(req->error_str),
1817 "%s: Root filesystem is not mounted", __func__);
1821 if (!curthread->td_proc->p_fd->fd_cdir) {
1822 curthread->td_proc->p_fd->fd_cdir = rootvnode;
1825 if (!curthread->td_proc->p_fd->fd_rdir) {
1826 curthread->td_proc->p_fd->fd_rdir = rootvnode;
1829 if (!curthread->td_proc->p_fd->fd_jdir) {
1830 curthread->td_proc->p_fd->fd_jdir = rootvnode;
1835 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, be_lun->dev_path, curthread);
1836 error = vn_open(&nd, &flags, 0, NULL);
1839 * This is the only reasonable guess we can make as far as
1840 * path if the user doesn't give us a fully qualified path.
1841 * If they want to specify a file, they need to specify the
1844 if (be_lun->dev_path[0] != '/') {
1845 char *dev_path = "/dev/";
1848 /* Try adding device path at beginning of name */
1849 dev_name = malloc(strlen(be_lun->dev_path)
1850 + strlen(dev_path) + 1,
1851 M_CTLBLK, M_WAITOK);
1853 sprintf(dev_name, "%s%s", dev_path,
1855 free(be_lun->dev_path, M_CTLBLK);
1856 be_lun->dev_path = dev_name;
1860 snprintf(req->error_str, sizeof(req->error_str),
1861 "%s: error opening %s", __func__, be_lun->dev_path);
1865 NDFREE(&nd, NDF_ONLY_PNBUF);
1867 be_lun->vn = nd.ni_vp;
1869 /* We only support disks and files. */
1870 if (vn_isdisk(be_lun->vn, &error)) {
1871 error = ctl_be_block_open_dev(be_lun, req);
1872 } else if (be_lun->vn->v_type == VREG) {
1873 error = ctl_be_block_open_file(be_lun, req);
1876 snprintf(req->error_str, sizeof(req->error_str),
1877 "%s is not a disk or plain file", be_lun->dev_path);
1879 VOP_UNLOCK(be_lun->vn, 0);
1882 ctl_be_block_close(be_lun);
1886 be_lun->blocksize_shift = fls(be_lun->blocksize) - 1;
1887 be_lun->size_blocks = be_lun->size_bytes >> be_lun->blocksize_shift;
1893 ctl_be_block_create(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
1895 struct ctl_be_block_lun *be_lun;
1896 struct ctl_lun_create_params *params;
1897 char num_thread_str[16];
1900 int retval, num_threads, unmap;
1901 int tmp_num_threads;
1903 params = &req->reqdata.create;
1906 num_threads = cbb_num_threads;
1908 be_lun = malloc(sizeof(*be_lun), M_CTLBLK, M_ZERO | M_WAITOK);
1910 be_lun->softc = softc;
1911 STAILQ_INIT(&be_lun->input_queue);
1912 STAILQ_INIT(&be_lun->config_write_queue);
1913 STAILQ_INIT(&be_lun->datamove_queue);
1914 sprintf(be_lun->lunname, "cblk%d", softc->num_luns);
1915 mtx_init(&be_lun->io_lock, "cblk io lock", NULL, MTX_DEF);
1916 mtx_init(&be_lun->queue_lock, "cblk queue lock", NULL, MTX_DEF);
1917 ctl_init_opts(&be_lun->ctl_be_lun.options,
1918 req->num_be_args, req->kern_be_args);
1920 be_lun->lun_zone = uma_zcreate(be_lun->lunname, CTLBLK_MAX_SEG,
1921 NULL, NULL, NULL, NULL, /*align*/ 0, /*flags*/0);
1923 if (be_lun->lun_zone == NULL) {
1924 snprintf(req->error_str, sizeof(req->error_str),
1925 "%s: error allocating UMA zone", __func__);
1929 if (params->flags & CTL_LUN_FLAG_DEV_TYPE)
1930 be_lun->ctl_be_lun.lun_type = params->device_type;
1932 be_lun->ctl_be_lun.lun_type = T_DIRECT;
1934 if (be_lun->ctl_be_lun.lun_type == T_DIRECT) {
1935 value = ctl_get_opt(&be_lun->ctl_be_lun.options, "file");
1936 if (value == NULL) {
1937 snprintf(req->error_str, sizeof(req->error_str),
1938 "%s: no file argument specified", __func__);
1941 be_lun->dev_path = strdup(value, M_CTLBLK);
1943 retval = ctl_be_block_open(softc, be_lun, req);
1950 * Tell the user the size of the file/device.
1952 params->lun_size_bytes = be_lun->size_bytes;
1955 * The maximum LBA is the size - 1.
1957 be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1;
1960 * For processor devices, we don't have any size.
1962 be_lun->blocksize = 0;
1963 be_lun->pblockexp = 0;
1964 be_lun->pblockoff = 0;
1965 be_lun->size_blocks = 0;
1966 be_lun->size_bytes = 0;
1967 be_lun->ctl_be_lun.maxlba = 0;
1968 params->lun_size_bytes = 0;
1971 * Default to just 1 thread for processor devices.
1977 * XXX This searching loop might be refactored to be combined with
1980 value = ctl_get_opt(&be_lun->ctl_be_lun.options, "num_threads");
1981 if (value != NULL) {
1982 tmp_num_threads = strtol(value, NULL, 0);
1985 * We don't let the user specify less than one
1986 * thread, but hope he's clueful enough not to
1987 * specify 1000 threads.
1989 if (tmp_num_threads < 1) {
1990 snprintf(req->error_str, sizeof(req->error_str),
1991 "%s: invalid number of threads %s",
1992 __func__, num_thread_str);
1995 num_threads = tmp_num_threads;
1998 value = ctl_get_opt(&be_lun->ctl_be_lun.options, "unmap");
1999 if (value != NULL && strcmp(value, "on") == 0)
2002 be_lun->flags = CTL_BE_BLOCK_LUN_UNCONFIGURED;
2003 be_lun->ctl_be_lun.flags = CTL_LUN_FLAG_PRIMARY;
2005 be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_UNMAP;
2006 be_lun->ctl_be_lun.be_lun = be_lun;
2007 be_lun->ctl_be_lun.blocksize = be_lun->blocksize;
2008 be_lun->ctl_be_lun.pblockexp = be_lun->pblockexp;
2009 be_lun->ctl_be_lun.pblockoff = be_lun->pblockoff;
2010 /* Tell the user the blocksize we ended up using */
2011 params->blocksize_bytes = be_lun->blocksize;
2012 if (params->flags & CTL_LUN_FLAG_ID_REQ) {
2013 be_lun->ctl_be_lun.req_lun_id = params->req_lun_id;
2014 be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_ID_REQ;
2016 be_lun->ctl_be_lun.req_lun_id = 0;
2018 be_lun->ctl_be_lun.lun_shutdown = ctl_be_block_lun_shutdown;
2019 be_lun->ctl_be_lun.lun_config_status =
2020 ctl_be_block_lun_config_status;
2021 be_lun->ctl_be_lun.be = &ctl_be_block_driver;
2023 if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) {
2024 snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%4d",
2026 strncpy((char *)be_lun->ctl_be_lun.serial_num, tmpstr,
2027 ctl_min(sizeof(be_lun->ctl_be_lun.serial_num),
2030 /* Tell the user what we used for a serial number */
2031 strncpy((char *)params->serial_num, tmpstr,
2032 ctl_min(sizeof(params->serial_num), sizeof(tmpstr)));
2034 strncpy((char *)be_lun->ctl_be_lun.serial_num,
2036 ctl_min(sizeof(be_lun->ctl_be_lun.serial_num),
2037 sizeof(params->serial_num)));
2039 if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) {
2040 snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%4d", softc->num_luns);
2041 strncpy((char *)be_lun->ctl_be_lun.device_id, tmpstr,
2042 ctl_min(sizeof(be_lun->ctl_be_lun.device_id),
2045 /* Tell the user what we used for a device ID */
2046 strncpy((char *)params->device_id, tmpstr,
2047 ctl_min(sizeof(params->device_id), sizeof(tmpstr)));
2049 strncpy((char *)be_lun->ctl_be_lun.device_id,
2051 ctl_min(sizeof(be_lun->ctl_be_lun.device_id),
2052 sizeof(params->device_id)));
2055 TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_be_block_worker, be_lun);
2057 be_lun->io_taskqueue = taskqueue_create(be_lun->lunname, M_WAITOK,
2058 taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue);
2060 if (be_lun->io_taskqueue == NULL) {
2061 snprintf(req->error_str, sizeof(req->error_str),
2062 "%s: Unable to create taskqueue", __func__);
2067 * Note that we start the same number of threads by default for
2068 * both the file case and the block device case. For the file
2069 * case, we need multiple threads to allow concurrency, because the
2070 * vnode interface is designed to be a blocking interface. For the
2071 * block device case, ZFS zvols at least will block the caller's
2072 * context in many instances, and so we need multiple threads to
2073 * overcome that problem. Other block devices don't need as many
2074 * threads, but they shouldn't cause too many problems.
2076 * If the user wants to just have a single thread for a block
2077 * device, he can specify that when the LUN is created, or change
2078 * the tunable/sysctl to alter the default number of threads.
2080 retval = taskqueue_start_threads(&be_lun->io_taskqueue,
2081 /*num threads*/num_threads,
2084 "%s taskq", be_lun->lunname);
2089 be_lun->num_threads = num_threads;
2091 mtx_lock(&softc->lock);
2093 STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links);
2095 mtx_unlock(&softc->lock);
2097 retval = ctl_add_lun(&be_lun->ctl_be_lun);
2099 mtx_lock(&softc->lock);
2100 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun,
2103 mtx_unlock(&softc->lock);
2104 snprintf(req->error_str, sizeof(req->error_str),
2105 "%s: ctl_add_lun() returned error %d, see dmesg for "
2106 "details", __func__, retval);
2111 mtx_lock(&softc->lock);
2114 * Tell the config_status routine that we're waiting so it won't
2115 * clean up the LUN in the event of an error.
2117 be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING;
2119 while (be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) {
2120 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblk", 0);
2121 if (retval == EINTR)
2124 be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING;
2126 if (be_lun->flags & CTL_BE_BLOCK_LUN_CONFIG_ERR) {
2127 snprintf(req->error_str, sizeof(req->error_str),
2128 "%s: LUN configuration error, see dmesg for details",
2130 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun,
2133 mtx_unlock(&softc->lock);
2136 params->req_lun_id = be_lun->ctl_be_lun.lun_id;
2139 mtx_unlock(&softc->lock);
2141 be_lun->disk_stats = devstat_new_entry("cbb", params->req_lun_id,
2143 DEVSTAT_ALL_SUPPORTED,
2144 be_lun->ctl_be_lun.lun_type
2145 | DEVSTAT_TYPE_IF_OTHER,
2146 DEVSTAT_PRIORITY_OTHER);
2149 req->status = CTL_LUN_OK;
2154 req->status = CTL_LUN_ERROR;
2156 if (be_lun->io_taskqueue != NULL)
2157 taskqueue_free(be_lun->io_taskqueue);
2158 ctl_be_block_close(be_lun);
2159 if (be_lun->dev_path != NULL)
2160 free(be_lun->dev_path, M_CTLBLK);
2161 if (be_lun->lun_zone != NULL)
2162 uma_zdestroy(be_lun->lun_zone);
2163 ctl_free_opts(&be_lun->ctl_be_lun.options);
2164 mtx_destroy(&be_lun->queue_lock);
2165 mtx_destroy(&be_lun->io_lock);
2166 free(be_lun, M_CTLBLK);
2172 ctl_be_block_rm(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
2174 struct ctl_lun_rm_params *params;
2175 struct ctl_be_block_lun *be_lun;
2178 params = &req->reqdata.rm;
2180 mtx_lock(&softc->lock);
2184 STAILQ_FOREACH(be_lun, &softc->lun_list, links) {
2185 if (be_lun->ctl_be_lun.lun_id == params->lun_id)
2188 mtx_unlock(&softc->lock);
2190 if (be_lun == NULL) {
2191 snprintf(req->error_str, sizeof(req->error_str),
2192 "%s: LUN %u is not managed by the block backend",
2193 __func__, params->lun_id);
2197 retval = ctl_disable_lun(&be_lun->ctl_be_lun);
2200 snprintf(req->error_str, sizeof(req->error_str),
2201 "%s: error %d returned from ctl_disable_lun() for "
2202 "LUN %d", __func__, retval, params->lun_id);
2207 retval = ctl_invalidate_lun(&be_lun->ctl_be_lun);
2209 snprintf(req->error_str, sizeof(req->error_str),
2210 "%s: error %d returned from ctl_invalidate_lun() for "
2211 "LUN %d", __func__, retval, params->lun_id);
2215 mtx_lock(&softc->lock);
2217 be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING;
2219 while ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) {
2220 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblk", 0);
2221 if (retval == EINTR)
2225 be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING;
2227 if ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) {
2228 snprintf(req->error_str, sizeof(req->error_str),
2229 "%s: interrupted waiting for LUN to be freed",
2231 mtx_unlock(&softc->lock);
2235 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, links);
2238 mtx_unlock(&softc->lock);
2240 taskqueue_drain(be_lun->io_taskqueue, &be_lun->io_task);
2242 taskqueue_free(be_lun->io_taskqueue);
2244 ctl_be_block_close(be_lun);
2246 if (be_lun->disk_stats != NULL)
2247 devstat_remove_entry(be_lun->disk_stats);
2249 uma_zdestroy(be_lun->lun_zone);
2251 ctl_free_opts(&be_lun->ctl_be_lun.options);
2252 free(be_lun->dev_path, M_CTLBLK);
2253 mtx_destroy(&be_lun->queue_lock);
2254 mtx_destroy(&be_lun->io_lock);
2255 free(be_lun, M_CTLBLK);
2257 req->status = CTL_LUN_OK;
2263 req->status = CTL_LUN_ERROR;
2269 ctl_be_block_modify_file(struct ctl_be_block_lun *be_lun,
2270 struct ctl_lun_req *req)
2274 struct ctl_lun_modify_params *params;
2276 params = &req->reqdata.modify;
2278 if (params->lun_size_bytes != 0) {
2279 be_lun->size_bytes = params->lun_size_bytes;
2281 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY);
2282 error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred);
2283 VOP_UNLOCK(be_lun->vn, 0);
2285 snprintf(req->error_str, sizeof(req->error_str),
2286 "error calling VOP_GETATTR() for file %s",
2291 be_lun->size_bytes = vattr.va_size;
2298 ctl_be_block_modify_dev(struct ctl_be_block_lun *be_lun,
2299 struct ctl_lun_req *req)
2301 struct ctl_be_block_devdata *dev_data;
2303 struct ctl_lun_modify_params *params;
2304 uint64_t size_bytes;
2306 params = &req->reqdata.modify;
2308 dev_data = &be_lun->backend.dev;
2309 if (!dev_data->csw->d_ioctl) {
2310 snprintf(req->error_str, sizeof(req->error_str),
2311 "%s: no d_ioctl for device %s!", __func__,
2316 error = dev_data->csw->d_ioctl(dev_data->cdev, DIOCGMEDIASIZE,
2317 (caddr_t)&size_bytes, FREAD,
2320 snprintf(req->error_str, sizeof(req->error_str),
2321 "%s: error %d returned for DIOCGMEDIASIZE ioctl "
2322 "on %s!", __func__, error, be_lun->dev_path);
2326 if (params->lun_size_bytes != 0) {
2327 if (params->lun_size_bytes > size_bytes) {
2328 snprintf(req->error_str, sizeof(req->error_str),
2329 "%s: requested LUN size %ju > backing device "
2330 "size %ju", __func__,
2331 (uintmax_t)params->lun_size_bytes,
2332 (uintmax_t)size_bytes);
2336 be_lun->size_bytes = params->lun_size_bytes;
2338 be_lun->size_bytes = size_bytes;
2345 ctl_be_block_modify(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
2347 struct ctl_lun_modify_params *params;
2348 struct ctl_be_block_lun *be_lun;
2352 params = &req->reqdata.modify;
2354 mtx_lock(&softc->lock);
2358 STAILQ_FOREACH(be_lun, &softc->lun_list, links) {
2359 if (be_lun->ctl_be_lun.lun_id == params->lun_id)
2362 mtx_unlock(&softc->lock);
2364 if (be_lun == NULL) {
2365 snprintf(req->error_str, sizeof(req->error_str),
2366 "%s: LUN %u is not managed by the block backend",
2367 __func__, params->lun_id);
2371 if (params->lun_size_bytes != 0) {
2372 if (params->lun_size_bytes < be_lun->blocksize) {
2373 snprintf(req->error_str, sizeof(req->error_str),
2374 "%s: LUN size %ju < blocksize %u", __func__,
2375 params->lun_size_bytes, be_lun->blocksize);
2380 oldsize = be_lun->size_bytes;
2381 if (be_lun->vn->v_type == VREG)
2382 error = ctl_be_block_modify_file(be_lun, req);
2384 error = ctl_be_block_modify_dev(be_lun, req);
2388 if (be_lun->size_bytes != oldsize) {
2389 be_lun->size_blocks = be_lun->size_bytes >>
2390 be_lun->blocksize_shift;
2393 * The maximum LBA is the size - 1.
2395 * XXX: Note that this field is being updated without locking,
2396 * which might cause problems on 32-bit architectures.
2398 be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1;
2399 ctl_lun_capacity_changed(&be_lun->ctl_be_lun);
2402 /* Tell the user the exact size we ended up using */
2403 params->lun_size_bytes = be_lun->size_bytes;
2405 req->status = CTL_LUN_OK;
2410 req->status = CTL_LUN_ERROR;
2416 ctl_be_block_lun_shutdown(void *be_lun)
2418 struct ctl_be_block_lun *lun;
2419 struct ctl_be_block_softc *softc;
2421 lun = (struct ctl_be_block_lun *)be_lun;
2425 mtx_lock(&softc->lock);
2426 lun->flags |= CTL_BE_BLOCK_LUN_UNCONFIGURED;
2427 if (lun->flags & CTL_BE_BLOCK_LUN_WAITING)
2429 mtx_unlock(&softc->lock);
2434 ctl_be_block_lun_config_status(void *be_lun, ctl_lun_config_status status)
2436 struct ctl_be_block_lun *lun;
2437 struct ctl_be_block_softc *softc;
2439 lun = (struct ctl_be_block_lun *)be_lun;
2442 if (status == CTL_LUN_CONFIG_OK) {
2443 mtx_lock(&softc->lock);
2444 lun->flags &= ~CTL_BE_BLOCK_LUN_UNCONFIGURED;
2445 if (lun->flags & CTL_BE_BLOCK_LUN_WAITING)
2447 mtx_unlock(&softc->lock);
2450 * We successfully added the LUN, attempt to enable it.
2452 if (ctl_enable_lun(&lun->ctl_be_lun) != 0) {
2453 printf("%s: ctl_enable_lun() failed!\n", __func__);
2454 if (ctl_invalidate_lun(&lun->ctl_be_lun) != 0) {
2455 printf("%s: ctl_invalidate_lun() failed!\n",
2464 mtx_lock(&softc->lock);
2465 lun->flags &= ~CTL_BE_BLOCK_LUN_UNCONFIGURED;
2466 lun->flags |= CTL_BE_BLOCK_LUN_CONFIG_ERR;
2468 mtx_unlock(&softc->lock);
2473 ctl_be_block_config_write(union ctl_io *io)
2475 struct ctl_be_block_lun *be_lun;
2476 struct ctl_be_lun *ctl_be_lun;
2481 DPRINTF("entered\n");
2483 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
2484 CTL_PRIV_BACKEND_LUN].ptr;
2485 be_lun = (struct ctl_be_block_lun *)ctl_be_lun->be_lun;
2487 switch (io->scsiio.cdb[0]) {
2488 case SYNCHRONIZE_CACHE:
2489 case SYNCHRONIZE_CACHE_16:
2494 * The upper level CTL code will filter out any CDBs with
2495 * the immediate bit set and return the proper error.
2497 * We don't really need to worry about what LBA range the
2498 * user asked to be synced out. When they issue a sync
2499 * cache command, we'll sync out the whole thing.
2501 mtx_lock(&be_lun->queue_lock);
2502 STAILQ_INSERT_TAIL(&be_lun->config_write_queue, &io->io_hdr,
2504 mtx_unlock(&be_lun->queue_lock);
2505 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task);
2507 case START_STOP_UNIT: {
2508 struct scsi_start_stop_unit *cdb;
2510 cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb;
2512 if (cdb->how & SSS_START)
2513 retval = ctl_start_lun(ctl_be_lun);
2515 retval = ctl_stop_lun(ctl_be_lun);
2517 * XXX KDM Copan-specific offline behavior.
2518 * Figure out a reasonable way to port this?
2522 && (cdb->byte2 & SSS_ONOFFLINE))
2523 retval = ctl_lun_offline(ctl_be_lun);
2528 * In general, the above routines should not fail. They
2529 * just set state for the LUN. So we've got something
2530 * pretty wrong here if we can't start or stop the LUN.
2533 ctl_set_internal_failure(&io->scsiio,
2535 /*retry_count*/ 0xf051);
2536 retval = CTL_RETVAL_COMPLETE;
2538 ctl_set_success(&io->scsiio);
2540 ctl_config_write_done(io);
2544 ctl_set_invalid_opcode(&io->scsiio);
2545 ctl_config_write_done(io);
2546 retval = CTL_RETVAL_COMPLETE;
2555 ctl_be_block_config_read(union ctl_io *io)
2561 ctl_be_block_lun_info(void *be_lun, struct sbuf *sb)
2563 struct ctl_be_block_lun *lun;
2566 lun = (struct ctl_be_block_lun *)be_lun;
2569 retval = sbuf_printf(sb, "\t<num_threads>");
2574 retval = sbuf_printf(sb, "%d", lun->num_threads);
2579 retval = sbuf_printf(sb, "</num_threads>\n");
2587 ctl_be_block_init(void)
2589 struct ctl_be_block_softc *softc;
2592 softc = &backend_block_softc;
2595 mtx_init(&softc->lock, "ctlblock", NULL, MTX_DEF);
2596 beio_zone = uma_zcreate("beio", sizeof(struct ctl_be_block_io),
2597 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
2598 STAILQ_INIT(&softc->disk_list);
2599 STAILQ_INIT(&softc->lun_list);