2 * Copyright (c) 2003 Silicon Graphics International Corp.
3 * Copyright (c) 2009-2011 Spectra Logic Corporation
4 * Copyright (c) 2012 The FreeBSD Foundation
7 * Portions of this software were developed by Edward Tomasz Napierala
8 * under sponsorship from the FreeBSD Foundation.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions, and the following disclaimer,
15 * without modification.
16 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
17 * substantially similar to the "NO WARRANTY" disclaimer below
18 * ("Disclaimer") and any redistribution must be conditioned upon
19 * including a substantially similar Disclaimer requirement for further
20 * binary redistribution.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
26 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
31 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
32 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33 * POSSIBILITY OF SUCH DAMAGES.
35 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_block.c#5 $
38 * CAM Target Layer driver backend for block devices.
40 * Author: Ken Merry <ken@FreeBSD.org>
42 #include <sys/cdefs.h>
43 __FBSDID("$FreeBSD$");
45 #include <opt_kdtrace.h>
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/types.h>
51 #include <sys/kthread.h>
53 #include <sys/fcntl.h>
54 #include <sys/limits.h>
56 #include <sys/mutex.h>
57 #include <sys/condvar.h>
58 #include <sys/malloc.h>
60 #include <sys/ioccom.h>
61 #include <sys/queue.h>
63 #include <sys/endian.h>
66 #include <sys/taskqueue.h>
67 #include <sys/vnode.h>
68 #include <sys/namei.h>
69 #include <sys/mount.h>
71 #include <sys/fcntl.h>
72 #include <sys/filedesc.h>
73 #include <sys/filio.h>
76 #include <sys/module.h>
78 #include <sys/devicestat.h>
79 #include <sys/sysctl.h>
81 #include <geom/geom.h>
84 #include <cam/scsi/scsi_all.h>
85 #include <cam/scsi/scsi_da.h>
86 #include <cam/ctl/ctl_io.h>
87 #include <cam/ctl/ctl.h>
88 #include <cam/ctl/ctl_backend.h>
89 #include <cam/ctl/ctl_frontend_internal.h>
90 #include <cam/ctl/ctl_ioctl.h>
91 #include <cam/ctl/ctl_scsi_all.h>
92 #include <cam/ctl/ctl_error.h>
95 * The idea here is that we'll allocate enough S/G space to hold a 1MB
96 * I/O. If we get an I/O larger than that, we'll split it.
98 #define CTLBLK_HALF_IO_SIZE (512 * 1024)
99 #define CTLBLK_MAX_IO_SIZE (CTLBLK_HALF_IO_SIZE * 2)
100 #define CTLBLK_MAX_SEG MAXPHYS
101 #define CTLBLK_HALF_SEGS MAX(CTLBLK_HALF_IO_SIZE / CTLBLK_MAX_SEG, 1)
102 #define CTLBLK_MAX_SEGS (CTLBLK_HALF_SEGS * 2)
105 #define DPRINTF(fmt, args...) \
106 printf("cbb(%s:%d): " fmt, __FUNCTION__, __LINE__, ##args)
108 #define DPRINTF(fmt, args...) do {} while(0)
112 ((struct ctl_ptr_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_BACKEND])
114 ((struct ctl_lba_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_LBA_LEN])
116 SDT_PROVIDER_DEFINE(cbb);
119 CTL_BE_BLOCK_LUN_UNCONFIGURED = 0x01,
120 CTL_BE_BLOCK_LUN_CONFIG_ERR = 0x02,
121 CTL_BE_BLOCK_LUN_WAITING = 0x04,
122 CTL_BE_BLOCK_LUN_MULTI_THREAD = 0x08
123 } ctl_be_block_lun_flags;
131 struct ctl_be_block_devdata {
137 struct ctl_be_block_filedata {
141 union ctl_be_block_bedata {
142 struct ctl_be_block_devdata dev;
143 struct ctl_be_block_filedata file;
146 struct ctl_be_block_io;
147 struct ctl_be_block_lun;
149 typedef void (*cbb_dispatch_t)(struct ctl_be_block_lun *be_lun,
150 struct ctl_be_block_io *beio);
151 typedef uint64_t (*cbb_getattr_t)(struct ctl_be_block_lun *be_lun,
152 const char *attrname);
155 * Backend LUN structure. There is a 1:1 mapping between a block device
156 * and a backend block LUN, and between a backend block LUN and a CTL LUN.
158 struct ctl_be_block_lun {
159 struct ctl_lun_create_params params;
160 struct ctl_block_disk *disk;
163 ctl_be_block_type dev_type;
165 union ctl_be_block_bedata backend;
166 cbb_dispatch_t dispatch;
167 cbb_dispatch_t lun_flush;
168 cbb_dispatch_t unmap;
169 cbb_dispatch_t get_lba_status;
170 cbb_getattr_t getattr;
172 uint64_t size_blocks;
180 uint32_t atomicblock;
181 uint32_t opttxferlen;
182 struct ctl_be_block_softc *softc;
183 struct devstat *disk_stats;
184 ctl_be_block_lun_flags flags;
185 STAILQ_ENTRY(ctl_be_block_lun) links;
186 struct ctl_be_lun ctl_be_lun;
187 struct taskqueue *io_taskqueue;
190 STAILQ_HEAD(, ctl_io_hdr) input_queue;
191 STAILQ_HEAD(, ctl_io_hdr) config_read_queue;
192 STAILQ_HEAD(, ctl_io_hdr) config_write_queue;
193 STAILQ_HEAD(, ctl_io_hdr) datamove_queue;
194 struct mtx_padalign io_lock;
195 struct mtx_padalign queue_lock;
199 * Overall softc structure for the block backend module.
201 struct ctl_be_block_softc {
204 STAILQ_HEAD(, ctl_block_disk) disk_list;
206 STAILQ_HEAD(, ctl_be_block_lun) lun_list;
209 static struct ctl_be_block_softc backend_block_softc;
212 * Per-I/O information.
214 struct ctl_be_block_io {
216 struct ctl_sg_entry sg_segs[CTLBLK_MAX_SEGS];
217 struct iovec xiovecs[CTLBLK_MAX_SEGS];
224 struct bintime ds_t0;
225 devstat_tag_type ds_tag_type;
226 devstat_trans_flags ds_trans_type;
229 struct ctl_be_block_softc *softc;
230 struct ctl_be_block_lun *lun;
231 void (*beio_cont)(struct ctl_be_block_io *beio); /* to continue processing */
234 static int cbb_num_threads = 14;
235 TUNABLE_INT("kern.cam.ctl.block.num_threads", &cbb_num_threads);
236 SYSCTL_NODE(_kern_cam_ctl, OID_AUTO, block, CTLFLAG_RD, 0,
237 "CAM Target Layer Block Backend");
238 SYSCTL_INT(_kern_cam_ctl_block, OID_AUTO, num_threads, CTLFLAG_RW,
239 &cbb_num_threads, 0, "Number of threads per backing file");
241 static struct ctl_be_block_io *ctl_alloc_beio(struct ctl_be_block_softc *softc);
242 static void ctl_free_beio(struct ctl_be_block_io *beio);
243 static void ctl_complete_beio(struct ctl_be_block_io *beio);
244 static int ctl_be_block_move_done(union ctl_io *io);
245 static void ctl_be_block_biodone(struct bio *bio);
246 static void ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun,
247 struct ctl_be_block_io *beio);
248 static void ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun,
249 struct ctl_be_block_io *beio);
250 static void ctl_be_block_gls_file(struct ctl_be_block_lun *be_lun,
251 struct ctl_be_block_io *beio);
252 static uint64_t ctl_be_block_getattr_file(struct ctl_be_block_lun *be_lun,
253 const char *attrname);
254 static void ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun,
255 struct ctl_be_block_io *beio);
256 static void ctl_be_block_unmap_dev(struct ctl_be_block_lun *be_lun,
257 struct ctl_be_block_io *beio);
258 static void ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun,
259 struct ctl_be_block_io *beio);
260 static uint64_t ctl_be_block_getattr_dev(struct ctl_be_block_lun *be_lun,
261 const char *attrname);
262 static void ctl_be_block_cr_dispatch(struct ctl_be_block_lun *be_lun,
264 static void ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun,
266 static void ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun,
268 static void ctl_be_block_worker(void *context, int pending);
269 static int ctl_be_block_submit(union ctl_io *io);
270 static int ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr,
271 int flag, struct thread *td);
272 static int ctl_be_block_open_file(struct ctl_be_block_lun *be_lun,
273 struct ctl_lun_req *req);
274 static int ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun,
275 struct ctl_lun_req *req);
276 static int ctl_be_block_close(struct ctl_be_block_lun *be_lun);
277 static int ctl_be_block_open(struct ctl_be_block_softc *softc,
278 struct ctl_be_block_lun *be_lun,
279 struct ctl_lun_req *req);
280 static int ctl_be_block_create(struct ctl_be_block_softc *softc,
281 struct ctl_lun_req *req);
282 static int ctl_be_block_rm(struct ctl_be_block_softc *softc,
283 struct ctl_lun_req *req);
284 static int ctl_be_block_modify_file(struct ctl_be_block_lun *be_lun,
285 struct ctl_lun_req *req);
286 static int ctl_be_block_modify_dev(struct ctl_be_block_lun *be_lun,
287 struct ctl_lun_req *req);
288 static int ctl_be_block_modify(struct ctl_be_block_softc *softc,
289 struct ctl_lun_req *req);
290 static void ctl_be_block_lun_shutdown(void *be_lun);
291 static void ctl_be_block_lun_config_status(void *be_lun,
292 ctl_lun_config_status status);
293 static int ctl_be_block_config_write(union ctl_io *io);
294 static int ctl_be_block_config_read(union ctl_io *io);
295 static int ctl_be_block_lun_info(void *be_lun, struct sbuf *sb);
296 static uint64_t ctl_be_block_lun_attr(void *be_lun, const char *attrname);
297 int ctl_be_block_init(void);
299 static struct ctl_backend_driver ctl_be_block_driver =
302 .flags = CTL_BE_FLAG_HAS_CONFIG,
303 .init = ctl_be_block_init,
304 .data_submit = ctl_be_block_submit,
305 .data_move_done = ctl_be_block_move_done,
306 .config_read = ctl_be_block_config_read,
307 .config_write = ctl_be_block_config_write,
308 .ioctl = ctl_be_block_ioctl,
309 .lun_info = ctl_be_block_lun_info,
310 .lun_attr = ctl_be_block_lun_attr
313 MALLOC_DEFINE(M_CTLBLK, "ctlblk", "Memory used for CTL block backend");
314 CTL_BACKEND_DECLARE(cbb, ctl_be_block_driver);
316 static uma_zone_t beio_zone;
318 static struct ctl_be_block_io *
319 ctl_alloc_beio(struct ctl_be_block_softc *softc)
321 struct ctl_be_block_io *beio;
323 beio = uma_zalloc(beio_zone, M_WAITOK | M_ZERO);
329 ctl_free_beio(struct ctl_be_block_io *beio)
336 for (i = 0; i < beio->num_segs; i++) {
337 if (beio->sg_segs[i].addr == NULL)
340 uma_zfree(beio->lun->lun_zone, beio->sg_segs[i].addr);
341 beio->sg_segs[i].addr = NULL;
343 /* For compare we had two equal S/G lists. */
344 if (ARGS(beio->io)->flags & CTL_LLF_COMPARE) {
345 uma_zfree(beio->lun->lun_zone,
346 beio->sg_segs[i + CTLBLK_HALF_SEGS].addr);
347 beio->sg_segs[i + CTLBLK_HALF_SEGS].addr = NULL;
351 if (duplicate_free > 0) {
352 printf("%s: %d duplicate frees out of %d segments\n", __func__,
353 duplicate_free, beio->num_segs);
356 uma_zfree(beio_zone, beio);
360 ctl_complete_beio(struct ctl_be_block_io *beio)
362 union ctl_io *io = beio->io;
364 if (beio->beio_cont != NULL) {
365 beio->beio_cont(beio);
368 ctl_data_submit_done(io);
373 ctl_be_block_move_done(union ctl_io *io)
375 struct ctl_be_block_io *beio;
376 struct ctl_be_block_lun *be_lun;
377 struct ctl_lba_len_flags *lbalen;
379 struct bintime cur_bt;
383 beio = (struct ctl_be_block_io *)PRIV(io)->ptr;
386 DPRINTF("entered\n");
390 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt);
391 bintime_add(&io->io_hdr.dma_bt, &cur_bt);
392 io->io_hdr.num_dmas++;
394 io->scsiio.kern_rel_offset += io->scsiio.kern_data_len;
397 * We set status at this point for read commands, and write
398 * commands with errors.
400 if (io->io_hdr.flags & CTL_FLAG_ABORT) {
402 } else if ((io->io_hdr.port_status == 0) &&
403 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) {
404 lbalen = ARGS(beio->io);
405 if (lbalen->flags & CTL_LLF_READ) {
406 ctl_set_success(&io->scsiio);
407 } else if (lbalen->flags & CTL_LLF_COMPARE) {
408 /* We have two data blocks ready for comparison. */
409 for (i = 0; i < beio->num_segs; i++) {
410 if (memcmp(beio->sg_segs[i].addr,
411 beio->sg_segs[i + CTLBLK_HALF_SEGS].addr,
412 beio->sg_segs[i].len) != 0)
415 if (i < beio->num_segs)
416 ctl_set_sense(&io->scsiio,
418 /*sense_key*/ SSD_KEY_MISCOMPARE,
423 ctl_set_success(&io->scsiio);
425 } else if ((io->io_hdr.port_status != 0) &&
426 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
427 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
429 * For hardware error sense keys, the sense key
430 * specific value is defined to be a retry count,
431 * but we use it to pass back an internal FETD
432 * error code. XXX KDM Hopefully the FETD is only
433 * using 16 bits for an error code, since that's
434 * all the space we have in the sks field.
436 ctl_set_internal_failure(&io->scsiio,
439 io->io_hdr.port_status);
443 * If this is a read, or a write with errors, it is done.
445 if ((beio->bio_cmd == BIO_READ)
446 || ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)
447 || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)) {
448 ctl_complete_beio(beio);
453 * At this point, we have a write and the DMA completed
454 * successfully. We now have to queue it to the task queue to
455 * execute the backend I/O. That is because we do blocking
456 * memory allocations, and in the file backing case, blocking I/O.
457 * This move done routine is generally called in the SIM's
458 * interrupt context, and therefore we cannot block.
460 mtx_lock(&be_lun->queue_lock);
462 * XXX KDM make sure that links is okay to use at this point.
463 * Otherwise, we either need to add another field to ctl_io_hdr,
464 * or deal with resource allocation here.
466 STAILQ_INSERT_TAIL(&be_lun->datamove_queue, &io->io_hdr, links);
467 mtx_unlock(&be_lun->queue_lock);
469 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task);
475 ctl_be_block_biodone(struct bio *bio)
477 struct ctl_be_block_io *beio;
478 struct ctl_be_block_lun *be_lun;
482 beio = bio->bio_caller1;
486 DPRINTF("entered\n");
488 error = bio->bio_error;
489 mtx_lock(&be_lun->io_lock);
493 beio->num_bios_done++;
496 * XXX KDM will this cause WITNESS to complain? Holding a lock
497 * during the free might cause it to complain.
502 * If the send complete bit isn't set, or we aren't the last I/O to
503 * complete, then we're done.
505 if ((beio->send_complete == 0)
506 || (beio->num_bios_done < beio->num_bios_sent)) {
507 mtx_unlock(&be_lun->io_lock);
512 * At this point, we've verified that we are the last I/O to
513 * complete, so it's safe to drop the lock.
515 devstat_end_transaction(beio->lun->disk_stats, beio->io_len,
516 beio->ds_tag_type, beio->ds_trans_type,
517 /*now*/ NULL, /*then*/&beio->ds_t0);
518 mtx_unlock(&be_lun->io_lock);
521 * If there are any errors from the backing device, we fail the
522 * entire I/O with a medium error.
524 if (beio->num_errors > 0) {
525 if (error == EOPNOTSUPP) {
526 ctl_set_invalid_opcode(&io->scsiio);
527 } else if (error == ENOSPC || error == EDQUOT) {
528 ctl_set_space_alloc_fail(&io->scsiio);
529 } else if (beio->bio_cmd == BIO_FLUSH) {
530 /* XXX KDM is there is a better error here? */
531 ctl_set_internal_failure(&io->scsiio,
533 /*retry_count*/ 0xbad2);
535 ctl_set_medium_error(&io->scsiio);
536 ctl_complete_beio(beio);
541 * If this is a write, a flush, a delete or verify, we're all done.
542 * If this is a read, we can now send the data to the user.
544 if ((beio->bio_cmd == BIO_WRITE)
545 || (beio->bio_cmd == BIO_FLUSH)
546 || (beio->bio_cmd == BIO_DELETE)
547 || (ARGS(io)->flags & CTL_LLF_VERIFY)) {
548 ctl_set_success(&io->scsiio);
549 ctl_complete_beio(beio);
551 if ((ARGS(io)->flags & CTL_LLF_READ) &&
552 beio->beio_cont == NULL)
553 ctl_set_success(&io->scsiio);
555 getbintime(&io->io_hdr.dma_start_bt);
562 ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun,
563 struct ctl_be_block_io *beio)
565 union ctl_io *io = beio->io;
566 struct mount *mountpoint;
567 int error, lock_flags;
569 DPRINTF("entered\n");
571 binuptime(&beio->ds_t0);
572 mtx_lock(&be_lun->io_lock);
573 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0);
574 mtx_unlock(&be_lun->io_lock);
576 (void) vn_start_write(be_lun->vn, &mountpoint, V_WAIT);
578 if (MNT_SHARED_WRITES(mountpoint)
579 || ((mountpoint == NULL)
580 && MNT_SHARED_WRITES(be_lun->vn->v_mount)))
581 lock_flags = LK_SHARED;
583 lock_flags = LK_EXCLUSIVE;
585 vn_lock(be_lun->vn, lock_flags | LK_RETRY);
587 error = VOP_FSYNC(be_lun->vn, MNT_WAIT, curthread);
588 VOP_UNLOCK(be_lun->vn, 0);
590 vn_finished_write(mountpoint);
592 mtx_lock(&be_lun->io_lock);
593 devstat_end_transaction(beio->lun->disk_stats, beio->io_len,
594 beio->ds_tag_type, beio->ds_trans_type,
595 /*now*/ NULL, /*then*/&beio->ds_t0);
596 mtx_unlock(&be_lun->io_lock);
599 ctl_set_success(&io->scsiio);
601 /* XXX KDM is there is a better error here? */
602 ctl_set_internal_failure(&io->scsiio,
604 /*retry_count*/ 0xbad1);
607 ctl_complete_beio(beio);
610 SDT_PROBE_DEFINE1(cbb, kernel, read, file_start, "uint64_t");
611 SDT_PROBE_DEFINE1(cbb, kernel, write, file_start, "uint64_t");
612 SDT_PROBE_DEFINE1(cbb, kernel, read, file_done,"uint64_t");
613 SDT_PROBE_DEFINE1(cbb, kernel, write, file_done, "uint64_t");
616 ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun,
617 struct ctl_be_block_io *beio)
619 struct ctl_be_block_filedata *file_data;
622 struct iovec *xiovec;
626 DPRINTF("entered\n");
628 file_data = &be_lun->backend.file;
631 if (ARGS(io)->flags & CTL_LLF_DPO)
633 if (beio->bio_cmd == BIO_WRITE && ARGS(io)->flags & CTL_LLF_FUA)
636 bzero(&xuio, sizeof(xuio));
637 if (beio->bio_cmd == BIO_READ) {
638 SDT_PROBE(cbb, kernel, read, file_start, 0, 0, 0, 0, 0);
639 xuio.uio_rw = UIO_READ;
641 SDT_PROBE(cbb, kernel, write, file_start, 0, 0, 0, 0, 0);
642 xuio.uio_rw = UIO_WRITE;
644 xuio.uio_offset = beio->io_offset;
645 xuio.uio_resid = beio->io_len;
646 xuio.uio_segflg = UIO_SYSSPACE;
647 xuio.uio_iov = beio->xiovecs;
648 xuio.uio_iovcnt = beio->num_segs;
649 xuio.uio_td = curthread;
651 for (i = 0, xiovec = xuio.uio_iov; i < xuio.uio_iovcnt; i++, xiovec++) {
652 xiovec->iov_base = beio->sg_segs[i].addr;
653 xiovec->iov_len = beio->sg_segs[i].len;
656 binuptime(&beio->ds_t0);
657 mtx_lock(&be_lun->io_lock);
658 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0);
659 mtx_unlock(&be_lun->io_lock);
661 if (beio->bio_cmd == BIO_READ) {
662 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY);
665 * UFS pays attention to IO_DIRECT for reads. If the
666 * DIRECTIO option is configured into the kernel, it calls
667 * ffs_rawread(). But that only works for single-segment
668 * uios with user space addresses. In our case, with a
669 * kernel uio, it still reads into the buffer cache, but it
670 * will just try to release the buffer from the cache later
673 * ZFS does not pay attention to IO_DIRECT for reads.
675 * UFS does not pay attention to IO_SYNC for reads.
677 * ZFS pays attention to IO_SYNC (which translates into the
678 * Solaris define FRSYNC for zfs_read()) for reads. It
679 * attempts to sync the file before reading.
681 * So, to attempt to provide some barrier semantics in the
682 * BIO_ORDERED case, set both IO_DIRECT and IO_SYNC.
684 error = VOP_READ(be_lun->vn, &xuio, flags, file_data->cred);
686 VOP_UNLOCK(be_lun->vn, 0);
687 SDT_PROBE(cbb, kernel, read, file_done, 0, 0, 0, 0, 0);
689 struct mount *mountpoint;
692 (void)vn_start_write(be_lun->vn, &mountpoint, V_WAIT);
694 if (MNT_SHARED_WRITES(mountpoint)
695 || ((mountpoint == NULL)
696 && MNT_SHARED_WRITES(be_lun->vn->v_mount)))
697 lock_flags = LK_SHARED;
699 lock_flags = LK_EXCLUSIVE;
701 vn_lock(be_lun->vn, lock_flags | LK_RETRY);
704 * UFS pays attention to IO_DIRECT for writes. The write
705 * is done asynchronously. (Normally the write would just
706 * get put into cache.
708 * UFS pays attention to IO_SYNC for writes. It will
709 * attempt to write the buffer out synchronously if that
712 * ZFS does not pay attention to IO_DIRECT for writes.
714 * ZFS pays attention to IO_SYNC (a.k.a. FSYNC or FRSYNC)
715 * for writes. It will flush the transaction from the
716 * cache before returning.
718 * So if we've got the BIO_ORDERED flag set, we want
719 * IO_SYNC in either the UFS or ZFS case.
721 error = VOP_WRITE(be_lun->vn, &xuio, flags, file_data->cred);
722 VOP_UNLOCK(be_lun->vn, 0);
724 vn_finished_write(mountpoint);
725 SDT_PROBE(cbb, kernel, write, file_done, 0, 0, 0, 0, 0);
728 mtx_lock(&be_lun->io_lock);
729 devstat_end_transaction(beio->lun->disk_stats, beio->io_len,
730 beio->ds_tag_type, beio->ds_trans_type,
731 /*now*/ NULL, /*then*/&beio->ds_t0);
732 mtx_unlock(&be_lun->io_lock);
735 * If we got an error, set the sense data to "MEDIUM ERROR" and
736 * return the I/O to the user.
741 ctl_scsi_path_string(io, path_str, sizeof(path_str));
742 printf("%s%s command returned errno %d\n", path_str,
743 (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE", error);
744 if (error == ENOSPC || error == EDQUOT) {
745 ctl_set_space_alloc_fail(&io->scsiio);
747 ctl_set_medium_error(&io->scsiio);
748 ctl_complete_beio(beio);
753 * If this is a write or a verify, we're all done.
754 * If this is a read, we can now send the data to the user.
756 if ((beio->bio_cmd == BIO_WRITE) ||
757 (ARGS(io)->flags & CTL_LLF_VERIFY)) {
758 ctl_set_success(&io->scsiio);
759 ctl_complete_beio(beio);
761 if ((ARGS(io)->flags & CTL_LLF_READ) &&
762 beio->beio_cont == NULL)
763 ctl_set_success(&io->scsiio);
765 getbintime(&io->io_hdr.dma_start_bt);
772 ctl_be_block_gls_file(struct ctl_be_block_lun *be_lun,
773 struct ctl_be_block_io *beio)
775 union ctl_io *io = beio->io;
776 struct ctl_lba_len_flags *lbalen = ARGS(io);
777 struct scsi_get_lba_status_data *data;
781 DPRINTF("entered\n");
783 off = roff = ((off_t)lbalen->lba) << be_lun->blocksize_shift;
784 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY);
785 error = VOP_IOCTL(be_lun->vn, FIOSEEKHOLE, &off,
786 0, curthread->td_ucred, curthread);
787 if (error == 0 && off > roff)
788 status = 0; /* mapped up to off */
790 error = VOP_IOCTL(be_lun->vn, FIOSEEKDATA, &off,
791 0, curthread->td_ucred, curthread);
792 if (error == 0 && off > roff)
793 status = 1; /* deallocated up to off */
795 status = 0; /* unknown up to the end */
796 off = be_lun->size_bytes;
799 VOP_UNLOCK(be_lun->vn, 0);
801 off >>= be_lun->blocksize_shift;
802 data = (struct scsi_get_lba_status_data *)io->scsiio.kern_data_ptr;
803 scsi_u64to8b(lbalen->lba, data->descr[0].addr);
804 scsi_ulto4b(MIN(UINT32_MAX, off - lbalen->lba),
805 data->descr[0].length);
806 data->descr[0].status = status;
808 ctl_complete_beio(beio);
812 ctl_be_block_getattr_file(struct ctl_be_block_lun *be_lun, const char *attrname)
815 struct statfs statfs;
818 if (be_lun->vn == NULL)
820 if (strcmp(attrname, "blocksused") == 0) {
821 error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred);
824 return (vattr.va_bytes >> be_lun->blocksize_shift);
826 if (strcmp(attrname, "blocksavail") == 0) {
827 error = VFS_STATFS(be_lun->vn->v_mount, &statfs);
830 return ((statfs.f_bavail * statfs.f_bsize) >>
831 be_lun->blocksize_shift);
837 ctl_be_block_dispatch_zvol(struct ctl_be_block_lun *be_lun,
838 struct ctl_be_block_io *beio)
840 struct ctl_be_block_devdata *dev_data;
843 struct iovec *xiovec;
847 DPRINTF("entered\n");
849 dev_data = &be_lun->backend.dev;
852 if (ARGS(io)->flags & CTL_LLF_DPO)
854 if (beio->bio_cmd == BIO_WRITE && ARGS(io)->flags & CTL_LLF_FUA)
857 bzero(&xuio, sizeof(xuio));
858 if (beio->bio_cmd == BIO_READ) {
859 SDT_PROBE(cbb, kernel, read, file_start, 0, 0, 0, 0, 0);
860 xuio.uio_rw = UIO_READ;
862 SDT_PROBE(cbb, kernel, write, file_start, 0, 0, 0, 0, 0);
863 xuio.uio_rw = UIO_WRITE;
865 xuio.uio_offset = beio->io_offset;
866 xuio.uio_resid = beio->io_len;
867 xuio.uio_segflg = UIO_SYSSPACE;
868 xuio.uio_iov = beio->xiovecs;
869 xuio.uio_iovcnt = beio->num_segs;
870 xuio.uio_td = curthread;
872 for (i = 0, xiovec = xuio.uio_iov; i < xuio.uio_iovcnt; i++, xiovec++) {
873 xiovec->iov_base = beio->sg_segs[i].addr;
874 xiovec->iov_len = beio->sg_segs[i].len;
877 binuptime(&beio->ds_t0);
878 mtx_lock(&be_lun->io_lock);
879 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0);
880 mtx_unlock(&be_lun->io_lock);
882 if (beio->bio_cmd == BIO_READ) {
883 error = (*dev_data->csw->d_read)(dev_data->cdev, &xuio, flags);
884 SDT_PROBE(cbb, kernel, read, file_done, 0, 0, 0, 0, 0);
886 error = (*dev_data->csw->d_write)(dev_data->cdev, &xuio, flags);
887 SDT_PROBE(cbb, kernel, write, file_done, 0, 0, 0, 0, 0);
890 mtx_lock(&be_lun->io_lock);
891 devstat_end_transaction(beio->lun->disk_stats, beio->io_len,
892 beio->ds_tag_type, beio->ds_trans_type,
893 /*now*/ NULL, /*then*/&beio->ds_t0);
894 mtx_unlock(&be_lun->io_lock);
897 * If we got an error, set the sense data to "MEDIUM ERROR" and
898 * return the I/O to the user.
901 if (error == ENOSPC || error == EDQUOT) {
902 ctl_set_space_alloc_fail(&io->scsiio);
904 ctl_set_medium_error(&io->scsiio);
905 ctl_complete_beio(beio);
910 * If this is a write or a verify, we're all done.
911 * If this is a read, we can now send the data to the user.
913 if ((beio->bio_cmd == BIO_WRITE) ||
914 (ARGS(io)->flags & CTL_LLF_VERIFY)) {
915 ctl_set_success(&io->scsiio);
916 ctl_complete_beio(beio);
918 if ((ARGS(io)->flags & CTL_LLF_READ) &&
919 beio->beio_cont == NULL)
920 ctl_set_success(&io->scsiio);
922 getbintime(&io->io_hdr.dma_start_bt);
929 ctl_be_block_gls_zvol(struct ctl_be_block_lun *be_lun,
930 struct ctl_be_block_io *beio)
932 struct ctl_be_block_devdata *dev_data = &be_lun->backend.dev;
933 union ctl_io *io = beio->io;
934 struct ctl_lba_len_flags *lbalen = ARGS(io);
935 struct scsi_get_lba_status_data *data;
939 DPRINTF("entered\n");
941 off = roff = ((off_t)lbalen->lba) << be_lun->blocksize_shift;
942 error = (*dev_data->csw->d_ioctl)(dev_data->cdev, FIOSEEKHOLE,
943 (caddr_t)&off, FREAD, curthread);
944 if (error == 0 && off > roff)
945 status = 0; /* mapped up to off */
947 error = (*dev_data->csw->d_ioctl)(dev_data->cdev, FIOSEEKDATA,
948 (caddr_t)&off, FREAD, curthread);
949 if (error == 0 && off > roff)
950 status = 1; /* deallocated up to off */
952 status = 0; /* unknown up to the end */
953 off = be_lun->size_bytes;
957 off >>= be_lun->blocksize_shift;
958 data = (struct scsi_get_lba_status_data *)io->scsiio.kern_data_ptr;
959 scsi_u64to8b(lbalen->lba, data->descr[0].addr);
960 scsi_ulto4b(MIN(UINT32_MAX, off - lbalen->lba),
961 data->descr[0].length);
962 data->descr[0].status = status;
964 ctl_complete_beio(beio);
968 ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun,
969 struct ctl_be_block_io *beio)
973 struct ctl_be_block_devdata *dev_data;
975 dev_data = &be_lun->backend.dev;
978 DPRINTF("entered\n");
980 /* This can't fail, it's a blocking allocation. */
983 bio->bio_cmd = BIO_FLUSH;
984 bio->bio_flags |= BIO_ORDERED;
985 bio->bio_dev = dev_data->cdev;
988 bio->bio_done = ctl_be_block_biodone;
989 bio->bio_caller1 = beio;
993 * We don't need to acquire the LUN lock here, because we are only
994 * sending one bio, and so there is no other context to synchronize
997 beio->num_bios_sent = 1;
998 beio->send_complete = 1;
1000 binuptime(&beio->ds_t0);
1001 mtx_lock(&be_lun->io_lock);
1002 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0);
1003 mtx_unlock(&be_lun->io_lock);
1005 (*dev_data->csw->d_strategy)(bio);
1009 ctl_be_block_unmap_dev_range(struct ctl_be_block_lun *be_lun,
1010 struct ctl_be_block_io *beio,
1011 uint64_t off, uint64_t len, int last)
1014 struct ctl_be_block_devdata *dev_data;
1017 dev_data = &be_lun->backend.dev;
1018 maxlen = LONG_MAX - (LONG_MAX % be_lun->blocksize);
1020 bio = g_alloc_bio();
1021 bio->bio_cmd = BIO_DELETE;
1022 bio->bio_dev = dev_data->cdev;
1023 bio->bio_offset = off;
1024 bio->bio_length = MIN(len, maxlen);
1026 bio->bio_done = ctl_be_block_biodone;
1027 bio->bio_caller1 = beio;
1028 bio->bio_pblkno = off / be_lun->blocksize;
1030 off += bio->bio_length;
1031 len -= bio->bio_length;
1033 mtx_lock(&be_lun->io_lock);
1034 beio->num_bios_sent++;
1035 if (last && len == 0)
1036 beio->send_complete = 1;
1037 mtx_unlock(&be_lun->io_lock);
1039 (*dev_data->csw->d_strategy)(bio);
1044 ctl_be_block_unmap_dev(struct ctl_be_block_lun *be_lun,
1045 struct ctl_be_block_io *beio)
1048 struct ctl_be_block_devdata *dev_data;
1049 struct ctl_ptr_len_flags *ptrlen;
1050 struct scsi_unmap_desc *buf, *end;
1053 dev_data = &be_lun->backend.dev;
1056 DPRINTF("entered\n");
1058 binuptime(&beio->ds_t0);
1059 mtx_lock(&be_lun->io_lock);
1060 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0);
1061 mtx_unlock(&be_lun->io_lock);
1063 if (beio->io_offset == -1) {
1065 ptrlen = (struct ctl_ptr_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
1066 buf = (struct scsi_unmap_desc *)ptrlen->ptr;
1067 end = buf + ptrlen->len / sizeof(*buf);
1068 for (; buf < end; buf++) {
1069 len = (uint64_t)scsi_4btoul(buf->length) *
1071 beio->io_len += len;
1072 ctl_be_block_unmap_dev_range(be_lun, beio,
1073 scsi_8btou64(buf->lba) * be_lun->blocksize, len,
1074 (end - buf < 2) ? TRUE : FALSE);
1077 ctl_be_block_unmap_dev_range(be_lun, beio,
1078 beio->io_offset, beio->io_len, TRUE);
1082 ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun,
1083 struct ctl_be_block_io *beio)
1085 TAILQ_HEAD(, bio) queue = TAILQ_HEAD_INITIALIZER(queue);
1088 struct ctl_be_block_devdata *dev_data;
1092 DPRINTF("entered\n");
1094 dev_data = &be_lun->backend.dev;
1097 * We have to limit our I/O size to the maximum supported by the
1098 * backend device. Hopefully it is MAXPHYS. If the driver doesn't
1099 * set it properly, use DFLTPHYS.
1101 max_iosize = dev_data->cdev->si_iosize_max;
1102 if (max_iosize < PAGE_SIZE)
1103 max_iosize = DFLTPHYS;
1105 cur_offset = beio->io_offset;
1106 for (i = 0; i < beio->num_segs; i++) {
1110 cur_size = beio->sg_segs[i].len;
1111 cur_ptr = beio->sg_segs[i].addr;
1113 while (cur_size > 0) {
1114 /* This can't fail, it's a blocking allocation. */
1115 bio = g_alloc_bio();
1117 KASSERT(bio != NULL, ("g_alloc_bio() failed!\n"));
1119 bio->bio_cmd = beio->bio_cmd;
1120 bio->bio_dev = dev_data->cdev;
1121 bio->bio_caller1 = beio;
1122 bio->bio_length = min(cur_size, max_iosize);
1123 bio->bio_offset = cur_offset;
1124 bio->bio_data = cur_ptr;
1125 bio->bio_done = ctl_be_block_biodone;
1126 bio->bio_pblkno = cur_offset / be_lun->blocksize;
1128 cur_offset += bio->bio_length;
1129 cur_ptr += bio->bio_length;
1130 cur_size -= bio->bio_length;
1132 TAILQ_INSERT_TAIL(&queue, bio, bio_queue);
1133 beio->num_bios_sent++;
1136 binuptime(&beio->ds_t0);
1137 mtx_lock(&be_lun->io_lock);
1138 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0);
1139 beio->send_complete = 1;
1140 mtx_unlock(&be_lun->io_lock);
1143 * Fire off all allocated requests!
1145 while ((bio = TAILQ_FIRST(&queue)) != NULL) {
1146 TAILQ_REMOVE(&queue, bio, bio_queue);
1147 (*dev_data->csw->d_strategy)(bio);
1152 ctl_be_block_getattr_dev(struct ctl_be_block_lun *be_lun, const char *attrname)
1154 struct ctl_be_block_devdata *dev_data = &be_lun->backend.dev;
1155 struct diocgattr_arg arg;
1158 if (dev_data->csw == NULL || dev_data->csw->d_ioctl == NULL)
1159 return (UINT64_MAX);
1160 strlcpy(arg.name, attrname, sizeof(arg.name));
1161 arg.len = sizeof(arg.value.off);
1162 error = dev_data->csw->d_ioctl(dev_data->cdev,
1163 DIOCGATTR, (caddr_t)&arg, FREAD, curthread);
1165 return (UINT64_MAX);
1166 return (arg.value.off);
1170 ctl_be_block_cw_done_ws(struct ctl_be_block_io *beio)
1175 ctl_free_beio(beio);
1176 if ((io->io_hdr.flags & CTL_FLAG_ABORT) ||
1177 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE &&
1178 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) {
1179 ctl_config_write_done(io);
1183 ctl_be_block_config_write(io);
1187 ctl_be_block_cw_dispatch_ws(struct ctl_be_block_lun *be_lun,
1190 struct ctl_be_block_io *beio;
1191 struct ctl_be_block_softc *softc;
1192 struct ctl_lba_len_flags *lbalen;
1193 uint64_t len_left, lba;
1194 uint32_t pb, pbo, adj;
1198 DPRINTF("entered\n");
1200 beio = (struct ctl_be_block_io *)PRIV(io)->ptr;
1201 softc = be_lun->softc;
1202 lbalen = ARGS(beio->io);
1204 if (lbalen->flags & ~(SWS_LBDATA | SWS_UNMAP | SWS_ANCHOR | SWS_NDOB) ||
1205 (lbalen->flags & (SWS_UNMAP | SWS_ANCHOR) && be_lun->unmap == NULL)) {
1206 ctl_free_beio(beio);
1207 ctl_set_invalid_field(&io->scsiio,
1213 ctl_config_write_done(io);
1217 switch (io->scsiio.tag_type) {
1218 case CTL_TAG_ORDERED:
1219 beio->ds_tag_type = DEVSTAT_TAG_ORDERED;
1221 case CTL_TAG_HEAD_OF_QUEUE:
1222 beio->ds_tag_type = DEVSTAT_TAG_HEAD;
1224 case CTL_TAG_UNTAGGED:
1225 case CTL_TAG_SIMPLE:
1228 beio->ds_tag_type = DEVSTAT_TAG_SIMPLE;
1232 if (lbalen->flags & (SWS_UNMAP | SWS_ANCHOR)) {
1233 beio->io_offset = lbalen->lba * be_lun->blocksize;
1234 beio->io_len = (uint64_t)lbalen->len * be_lun->blocksize;
1235 beio->bio_cmd = BIO_DELETE;
1236 beio->ds_trans_type = DEVSTAT_FREE;
1238 be_lun->unmap(be_lun, beio);
1242 beio->bio_cmd = BIO_WRITE;
1243 beio->ds_trans_type = DEVSTAT_WRITE;
1245 DPRINTF("WRITE SAME at LBA %jx len %u\n",
1246 (uintmax_t)lbalen->lba, lbalen->len);
1248 pb = be_lun->blocksize << be_lun->pblockexp;
1249 if (be_lun->pblockoff > 0)
1250 pbo = pb - be_lun->blocksize * be_lun->pblockoff;
1253 len_left = (uint64_t)lbalen->len * be_lun->blocksize;
1254 for (i = 0, lba = 0; i < CTLBLK_MAX_SEGS && len_left > 0; i++) {
1257 * Setup the S/G entry for this chunk.
1259 seglen = MIN(CTLBLK_MAX_SEG, len_left);
1260 if (pb > be_lun->blocksize) {
1261 adj = ((lbalen->lba + lba) * be_lun->blocksize +
1266 seglen -= seglen % be_lun->blocksize;
1268 seglen -= seglen % be_lun->blocksize;
1269 beio->sg_segs[i].len = seglen;
1270 beio->sg_segs[i].addr = uma_zalloc(be_lun->lun_zone, M_WAITOK);
1272 DPRINTF("segment %d addr %p len %zd\n", i,
1273 beio->sg_segs[i].addr, beio->sg_segs[i].len);
1278 buf = beio->sg_segs[i].addr;
1280 for (; buf < end; buf += be_lun->blocksize) {
1281 memcpy(buf, io->scsiio.kern_data_ptr, be_lun->blocksize);
1282 if (lbalen->flags & SWS_LBDATA)
1283 scsi_ulto4b(lbalen->lba + lba, buf);
1288 beio->io_offset = lbalen->lba * be_lun->blocksize;
1289 beio->io_len = lba * be_lun->blocksize;
1291 /* We can not do all in one run. Correct and schedule rerun. */
1295 beio->beio_cont = ctl_be_block_cw_done_ws;
1298 be_lun->dispatch(be_lun, beio);
1302 ctl_be_block_cw_dispatch_unmap(struct ctl_be_block_lun *be_lun,
1305 struct ctl_be_block_io *beio;
1306 struct ctl_be_block_softc *softc;
1307 struct ctl_ptr_len_flags *ptrlen;
1309 DPRINTF("entered\n");
1311 beio = (struct ctl_be_block_io *)PRIV(io)->ptr;
1312 softc = be_lun->softc;
1313 ptrlen = (struct ctl_ptr_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
1315 if ((ptrlen->flags & ~SU_ANCHOR) != 0 || be_lun->unmap == NULL) {
1316 ctl_free_beio(beio);
1317 ctl_set_invalid_field(&io->scsiio,
1323 ctl_config_write_done(io);
1327 switch (io->scsiio.tag_type) {
1328 case CTL_TAG_ORDERED:
1329 beio->ds_tag_type = DEVSTAT_TAG_ORDERED;
1331 case CTL_TAG_HEAD_OF_QUEUE:
1332 beio->ds_tag_type = DEVSTAT_TAG_HEAD;
1334 case CTL_TAG_UNTAGGED:
1335 case CTL_TAG_SIMPLE:
1338 beio->ds_tag_type = DEVSTAT_TAG_SIMPLE;
1343 beio->io_offset = -1;
1345 beio->bio_cmd = BIO_DELETE;
1346 beio->ds_trans_type = DEVSTAT_FREE;
1350 be_lun->unmap(be_lun, beio);
1354 ctl_be_block_cr_done(struct ctl_be_block_io *beio)
1359 ctl_free_beio(beio);
1360 ctl_config_read_done(io);
1364 ctl_be_block_cr_dispatch(struct ctl_be_block_lun *be_lun,
1367 struct ctl_be_block_io *beio;
1368 struct ctl_be_block_softc *softc;
1370 DPRINTF("entered\n");
1372 softc = be_lun->softc;
1373 beio = ctl_alloc_beio(softc);
1376 beio->beio_cont = ctl_be_block_cr_done;
1377 PRIV(io)->ptr = (void *)beio;
1379 switch (io->scsiio.cdb[0]) {
1380 case SERVICE_ACTION_IN: /* GET LBA STATUS */
1382 beio->ds_trans_type = DEVSTAT_NO_DATA;
1383 beio->ds_tag_type = DEVSTAT_TAG_ORDERED;
1385 if (be_lun->get_lba_status)
1386 be_lun->get_lba_status(be_lun, beio);
1388 ctl_be_block_cr_done(beio);
1391 panic("Unhandled CDB type %#x", io->scsiio.cdb[0]);
1397 ctl_be_block_cw_done(struct ctl_be_block_io *beio)
1402 ctl_free_beio(beio);
1403 ctl_config_write_done(io);
1407 ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun,
1410 struct ctl_be_block_io *beio;
1411 struct ctl_be_block_softc *softc;
1413 DPRINTF("entered\n");
1415 softc = be_lun->softc;
1416 beio = ctl_alloc_beio(softc);
1419 beio->beio_cont = ctl_be_block_cw_done;
1420 PRIV(io)->ptr = (void *)beio;
1422 switch (io->scsiio.cdb[0]) {
1423 case SYNCHRONIZE_CACHE:
1424 case SYNCHRONIZE_CACHE_16:
1425 beio->bio_cmd = BIO_FLUSH;
1426 beio->ds_trans_type = DEVSTAT_NO_DATA;
1427 beio->ds_tag_type = DEVSTAT_TAG_ORDERED;
1429 be_lun->lun_flush(be_lun, beio);
1433 ctl_be_block_cw_dispatch_ws(be_lun, io);
1436 ctl_be_block_cw_dispatch_unmap(be_lun, io);
1439 panic("Unhandled CDB type %#x", io->scsiio.cdb[0]);
1444 SDT_PROBE_DEFINE1(cbb, kernel, read, start, "uint64_t");
1445 SDT_PROBE_DEFINE1(cbb, kernel, write, start, "uint64_t");
1446 SDT_PROBE_DEFINE1(cbb, kernel, read, alloc_done, "uint64_t");
1447 SDT_PROBE_DEFINE1(cbb, kernel, write, alloc_done, "uint64_t");
1450 ctl_be_block_next(struct ctl_be_block_io *beio)
1452 struct ctl_be_block_lun *be_lun;
1457 ctl_free_beio(beio);
1458 if ((io->io_hdr.flags & CTL_FLAG_ABORT) ||
1459 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE &&
1460 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) {
1461 ctl_data_submit_done(io);
1465 io->io_hdr.status &= ~CTL_STATUS_MASK;
1466 io->io_hdr.status |= CTL_STATUS_NONE;
1468 mtx_lock(&be_lun->queue_lock);
1470 * XXX KDM make sure that links is okay to use at this point.
1471 * Otherwise, we either need to add another field to ctl_io_hdr,
1472 * or deal with resource allocation here.
1474 STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links);
1475 mtx_unlock(&be_lun->queue_lock);
1477 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task);
1481 ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun,
1484 struct ctl_be_block_io *beio;
1485 struct ctl_be_block_softc *softc;
1486 struct ctl_lba_len_flags *lbalen;
1487 struct ctl_ptr_len_flags *bptrlen;
1488 uint64_t len_left, lbas;
1491 softc = be_lun->softc;
1493 DPRINTF("entered\n");
1496 if (lbalen->flags & CTL_LLF_WRITE) {
1497 SDT_PROBE(cbb, kernel, write, start, 0, 0, 0, 0, 0);
1499 SDT_PROBE(cbb, kernel, read, start, 0, 0, 0, 0, 0);
1502 beio = ctl_alloc_beio(softc);
1506 bptrlen->ptr = (void *)beio;
1508 switch (io->scsiio.tag_type) {
1509 case CTL_TAG_ORDERED:
1510 beio->ds_tag_type = DEVSTAT_TAG_ORDERED;
1512 case CTL_TAG_HEAD_OF_QUEUE:
1513 beio->ds_tag_type = DEVSTAT_TAG_HEAD;
1515 case CTL_TAG_UNTAGGED:
1516 case CTL_TAG_SIMPLE:
1519 beio->ds_tag_type = DEVSTAT_TAG_SIMPLE;
1523 if (lbalen->flags & CTL_LLF_WRITE) {
1524 beio->bio_cmd = BIO_WRITE;
1525 beio->ds_trans_type = DEVSTAT_WRITE;
1527 beio->bio_cmd = BIO_READ;
1528 beio->ds_trans_type = DEVSTAT_READ;
1531 DPRINTF("%s at LBA %jx len %u @%ju\n",
1532 (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE",
1533 (uintmax_t)lbalen->lba, lbalen->len, bptrlen->len);
1534 if (lbalen->flags & CTL_LLF_COMPARE)
1535 lbas = CTLBLK_HALF_IO_SIZE;
1537 lbas = CTLBLK_MAX_IO_SIZE;
1538 lbas = MIN(lbalen->len - bptrlen->len, lbas / be_lun->blocksize);
1539 beio->io_offset = (lbalen->lba + bptrlen->len) * be_lun->blocksize;
1540 beio->io_len = lbas * be_lun->blocksize;
1541 bptrlen->len += lbas;
1543 for (i = 0, len_left = beio->io_len; len_left > 0; i++) {
1544 KASSERT(i < CTLBLK_MAX_SEGS, ("Too many segs (%d >= %d)",
1545 i, CTLBLK_MAX_SEGS));
1548 * Setup the S/G entry for this chunk.
1550 beio->sg_segs[i].len = min(CTLBLK_MAX_SEG, len_left);
1551 beio->sg_segs[i].addr = uma_zalloc(be_lun->lun_zone, M_WAITOK);
1553 DPRINTF("segment %d addr %p len %zd\n", i,
1554 beio->sg_segs[i].addr, beio->sg_segs[i].len);
1556 /* Set up second segment for compare operation. */
1557 if (lbalen->flags & CTL_LLF_COMPARE) {
1558 beio->sg_segs[i + CTLBLK_HALF_SEGS].len =
1559 beio->sg_segs[i].len;
1560 beio->sg_segs[i + CTLBLK_HALF_SEGS].addr =
1561 uma_zalloc(be_lun->lun_zone, M_WAITOK);
1565 len_left -= beio->sg_segs[i].len;
1567 if (bptrlen->len < lbalen->len)
1568 beio->beio_cont = ctl_be_block_next;
1569 io->scsiio.be_move_done = ctl_be_block_move_done;
1570 /* For compare we have separate S/G lists for read and datamove. */
1571 if (lbalen->flags & CTL_LLF_COMPARE)
1572 io->scsiio.kern_data_ptr = (uint8_t *)&beio->sg_segs[CTLBLK_HALF_SEGS];
1574 io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs;
1575 io->scsiio.kern_data_len = beio->io_len;
1576 io->scsiio.kern_data_resid = 0;
1577 io->scsiio.kern_sg_entries = beio->num_segs;
1578 io->io_hdr.flags |= CTL_FLAG_ALLOCATED | CTL_FLAG_KDPTR_SGLIST;
1581 * For the read case, we need to read the data into our buffers and
1582 * then we can send it back to the user. For the write case, we
1583 * need to get the data from the user first.
1585 if (beio->bio_cmd == BIO_READ) {
1586 SDT_PROBE(cbb, kernel, read, alloc_done, 0, 0, 0, 0, 0);
1587 be_lun->dispatch(be_lun, beio);
1589 SDT_PROBE(cbb, kernel, write, alloc_done, 0, 0, 0, 0, 0);
1591 getbintime(&io->io_hdr.dma_start_bt);
1598 ctl_be_block_worker(void *context, int pending)
1600 struct ctl_be_block_lun *be_lun;
1601 struct ctl_be_block_softc *softc;
1604 be_lun = (struct ctl_be_block_lun *)context;
1605 softc = be_lun->softc;
1607 DPRINTF("entered\n");
1609 mtx_lock(&be_lun->queue_lock);
1611 io = (union ctl_io *)STAILQ_FIRST(&be_lun->datamove_queue);
1613 struct ctl_be_block_io *beio;
1615 DPRINTF("datamove queue\n");
1617 STAILQ_REMOVE(&be_lun->datamove_queue, &io->io_hdr,
1620 mtx_unlock(&be_lun->queue_lock);
1622 beio = (struct ctl_be_block_io *)PRIV(io)->ptr;
1624 be_lun->dispatch(be_lun, beio);
1626 mtx_lock(&be_lun->queue_lock);
1629 io = (union ctl_io *)STAILQ_FIRST(&be_lun->config_write_queue);
1631 DPRINTF("config write queue\n");
1632 STAILQ_REMOVE(&be_lun->config_write_queue, &io->io_hdr,
1634 mtx_unlock(&be_lun->queue_lock);
1635 ctl_be_block_cw_dispatch(be_lun, io);
1636 mtx_lock(&be_lun->queue_lock);
1639 io = (union ctl_io *)STAILQ_FIRST(&be_lun->config_read_queue);
1641 DPRINTF("config read queue\n");
1642 STAILQ_REMOVE(&be_lun->config_read_queue, &io->io_hdr,
1644 mtx_unlock(&be_lun->queue_lock);
1645 ctl_be_block_cr_dispatch(be_lun, io);
1646 mtx_lock(&be_lun->queue_lock);
1649 io = (union ctl_io *)STAILQ_FIRST(&be_lun->input_queue);
1651 DPRINTF("input queue\n");
1653 STAILQ_REMOVE(&be_lun->input_queue, &io->io_hdr,
1655 mtx_unlock(&be_lun->queue_lock);
1658 * We must drop the lock, since this routine and
1659 * its children may sleep.
1661 ctl_be_block_dispatch(be_lun, io);
1663 mtx_lock(&be_lun->queue_lock);
1668 * If we get here, there is no work left in the queues, so
1669 * just break out and let the task queue go to sleep.
1673 mtx_unlock(&be_lun->queue_lock);
1677 * Entry point from CTL to the backend for I/O. We queue everything to a
1678 * work thread, so this just puts the I/O on a queue and wakes up the
1682 ctl_be_block_submit(union ctl_io *io)
1684 struct ctl_be_block_lun *be_lun;
1685 struct ctl_be_lun *ctl_be_lun;
1687 DPRINTF("entered\n");
1689 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
1690 CTL_PRIV_BACKEND_LUN].ptr;
1691 be_lun = (struct ctl_be_block_lun *)ctl_be_lun->be_lun;
1694 * Make sure we only get SCSI I/O.
1696 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("Non-SCSI I/O (type "
1697 "%#x) encountered", io->io_hdr.io_type));
1701 mtx_lock(&be_lun->queue_lock);
1703 * XXX KDM make sure that links is okay to use at this point.
1704 * Otherwise, we either need to add another field to ctl_io_hdr,
1705 * or deal with resource allocation here.
1707 STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links);
1708 mtx_unlock(&be_lun->queue_lock);
1709 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task);
1711 return (CTL_RETVAL_COMPLETE);
1715 ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr,
1716 int flag, struct thread *td)
1718 struct ctl_be_block_softc *softc;
1721 softc = &backend_block_softc;
1727 struct ctl_lun_req *lun_req;
1729 lun_req = (struct ctl_lun_req *)addr;
1731 switch (lun_req->reqtype) {
1732 case CTL_LUNREQ_CREATE:
1733 error = ctl_be_block_create(softc, lun_req);
1736 error = ctl_be_block_rm(softc, lun_req);
1738 case CTL_LUNREQ_MODIFY:
1739 error = ctl_be_block_modify(softc, lun_req);
1742 lun_req->status = CTL_LUN_ERROR;
1743 snprintf(lun_req->error_str, sizeof(lun_req->error_str),
1744 "invalid LUN request type %d",
1759 ctl_be_block_open_file(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req)
1761 struct ctl_be_block_filedata *file_data;
1762 struct ctl_lun_create_params *params;
1765 off_t ps, pss, po, pos, us, uss, uo, uos;
1769 file_data = &be_lun->backend.file;
1770 params = &be_lun->params;
1772 be_lun->dev_type = CTL_BE_BLOCK_FILE;
1773 be_lun->dispatch = ctl_be_block_dispatch_file;
1774 be_lun->lun_flush = ctl_be_block_flush_file;
1775 be_lun->get_lba_status = ctl_be_block_gls_file;
1776 be_lun->getattr = ctl_be_block_getattr_file;
1778 error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred);
1780 snprintf(req->error_str, sizeof(req->error_str),
1781 "error calling VOP_GETATTR() for file %s",
1787 * Verify that we have the ability to upgrade to exclusive
1788 * access on this file so we can trap errors at open instead
1789 * of reporting them during first access.
1791 if (VOP_ISLOCKED(be_lun->vn) != LK_EXCLUSIVE) {
1792 vn_lock(be_lun->vn, LK_UPGRADE | LK_RETRY);
1793 if (be_lun->vn->v_iflag & VI_DOOMED) {
1795 snprintf(req->error_str, sizeof(req->error_str),
1796 "error locking file %s", be_lun->dev_path);
1802 file_data->cred = crhold(curthread->td_ucred);
1803 if (params->lun_size_bytes != 0)
1804 be_lun->size_bytes = params->lun_size_bytes;
1806 be_lun->size_bytes = vattr.va_size;
1808 * We set the multi thread flag for file operations because all
1809 * filesystems (in theory) are capable of allowing multiple readers
1810 * of a file at once. So we want to get the maximum possible
1813 be_lun->flags |= CTL_BE_BLOCK_LUN_MULTI_THREAD;
1816 * For files we can use any logical block size. Prefer 512 bytes
1817 * for compatibility reasons. If file's vattr.va_blocksize
1818 * (preferred I/O block size) is bigger and multiple to chosen
1819 * logical block size -- report it as physical block size.
1821 if (params->blocksize_bytes != 0)
1822 be_lun->blocksize = params->blocksize_bytes;
1824 be_lun->blocksize = 512;
1826 us = ps = vattr.va_blocksize;
1829 value = ctl_get_opt(&be_lun->ctl_be_lun.options, "pblocksize");
1831 ctl_expand_number(value, &ps);
1832 value = ctl_get_opt(&be_lun->ctl_be_lun.options, "pblockoffset");
1834 ctl_expand_number(value, &po);
1835 pss = ps / be_lun->blocksize;
1836 pos = po / be_lun->blocksize;
1837 if ((pss > 0) && (pss * be_lun->blocksize == ps) && (pss >= pos) &&
1838 ((pss & (pss - 1)) == 0) && (pos * be_lun->blocksize == po)) {
1839 be_lun->pblockexp = fls(pss) - 1;
1840 be_lun->pblockoff = (pss - pos) % pss;
1843 value = ctl_get_opt(&be_lun->ctl_be_lun.options, "ublocksize");
1845 ctl_expand_number(value, &us);
1846 value = ctl_get_opt(&be_lun->ctl_be_lun.options, "ublockoffset");
1848 ctl_expand_number(value, &uo);
1849 uss = us / be_lun->blocksize;
1850 uos = uo / be_lun->blocksize;
1851 if ((uss > 0) && (uss * be_lun->blocksize == us) && (uss >= uos) &&
1852 ((uss & (uss - 1)) == 0) && (uos * be_lun->blocksize == uo)) {
1853 be_lun->ublockexp = fls(uss) - 1;
1854 be_lun->ublockoff = (uss - uos) % uss;
1858 * Sanity check. The media size has to be at least one
1861 if (be_lun->size_bytes < be_lun->blocksize) {
1863 snprintf(req->error_str, sizeof(req->error_str),
1864 "file %s size %ju < block size %u", be_lun->dev_path,
1865 (uintmax_t)be_lun->size_bytes, be_lun->blocksize);
1868 be_lun->opttxferlen = CTLBLK_MAX_IO_SIZE / be_lun->blocksize;
1873 ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req)
1875 struct ctl_lun_create_params *params;
1878 struct cdevsw *devsw;
1880 int error, atomic, maxio, unmap;
1881 off_t ps, pss, po, pos, us, uss, uo, uos;
1883 params = &be_lun->params;
1885 be_lun->dev_type = CTL_BE_BLOCK_DEV;
1886 be_lun->backend.dev.cdev = be_lun->vn->v_rdev;
1887 be_lun->backend.dev.csw = dev_refthread(be_lun->backend.dev.cdev,
1888 &be_lun->backend.dev.dev_ref);
1889 if (be_lun->backend.dev.csw == NULL)
1890 panic("Unable to retrieve device switch");
1891 if (strcmp(be_lun->backend.dev.csw->d_name, "zvol") == 0) {
1892 be_lun->dispatch = ctl_be_block_dispatch_zvol;
1893 be_lun->get_lba_status = ctl_be_block_gls_zvol;
1894 atomic = maxio = CTLBLK_MAX_IO_SIZE;
1896 be_lun->dispatch = ctl_be_block_dispatch_dev;
1898 maxio = be_lun->backend.dev.cdev->si_iosize_max;
1901 if (maxio > CTLBLK_MAX_IO_SIZE)
1902 maxio = CTLBLK_MAX_IO_SIZE;
1904 be_lun->lun_flush = ctl_be_block_flush_dev;
1905 be_lun->getattr = ctl_be_block_getattr_dev;
1907 error = VOP_GETATTR(be_lun->vn, &vattr, NOCRED);
1909 snprintf(req->error_str, sizeof(req->error_str),
1910 "error getting vnode attributes for device %s",
1915 dev = be_lun->vn->v_rdev;
1916 devsw = dev->si_devsw;
1917 if (!devsw->d_ioctl) {
1918 snprintf(req->error_str, sizeof(req->error_str),
1919 "no d_ioctl for device %s!",
1924 error = devsw->d_ioctl(dev, DIOCGSECTORSIZE,
1925 (caddr_t)&be_lun->blocksize, FREAD,
1928 snprintf(req->error_str, sizeof(req->error_str),
1929 "error %d returned for DIOCGSECTORSIZE ioctl "
1930 "on %s!", error, be_lun->dev_path);
1935 * If the user has asked for a blocksize that is greater than the
1936 * backing device's blocksize, we can do it only if the blocksize
1937 * the user is asking for is an even multiple of the underlying
1938 * device's blocksize.
1940 if ((params->blocksize_bytes != 0)
1941 && (params->blocksize_bytes > be_lun->blocksize)) {
1942 uint32_t bs_multiple, tmp_blocksize;
1944 bs_multiple = params->blocksize_bytes / be_lun->blocksize;
1946 tmp_blocksize = bs_multiple * be_lun->blocksize;
1948 if (tmp_blocksize == params->blocksize_bytes) {
1949 be_lun->blocksize = params->blocksize_bytes;
1951 snprintf(req->error_str, sizeof(req->error_str),
1952 "requested blocksize %u is not an even "
1953 "multiple of backing device blocksize %u",
1954 params->blocksize_bytes,
1959 } else if ((params->blocksize_bytes != 0)
1960 && (params->blocksize_bytes != be_lun->blocksize)) {
1961 snprintf(req->error_str, sizeof(req->error_str),
1962 "requested blocksize %u < backing device "
1963 "blocksize %u", params->blocksize_bytes,
1968 error = devsw->d_ioctl(dev, DIOCGMEDIASIZE,
1969 (caddr_t)&be_lun->size_bytes, FREAD,
1972 snprintf(req->error_str, sizeof(req->error_str),
1973 "error %d returned for DIOCGMEDIASIZE "
1974 " ioctl on %s!", error,
1979 if (params->lun_size_bytes != 0) {
1980 if (params->lun_size_bytes > be_lun->size_bytes) {
1981 snprintf(req->error_str, sizeof(req->error_str),
1982 "requested LUN size %ju > backing device "
1984 (uintmax_t)params->lun_size_bytes,
1985 (uintmax_t)be_lun->size_bytes);
1989 be_lun->size_bytes = params->lun_size_bytes;
1992 error = devsw->d_ioctl(dev, DIOCGSTRIPESIZE,
1993 (caddr_t)&ps, FREAD, curthread);
1997 error = devsw->d_ioctl(dev, DIOCGSTRIPEOFFSET,
1998 (caddr_t)&po, FREAD, curthread);
2005 value = ctl_get_opt(&be_lun->ctl_be_lun.options, "pblocksize");
2007 ctl_expand_number(value, &ps);
2008 value = ctl_get_opt(&be_lun->ctl_be_lun.options, "pblockoffset");
2010 ctl_expand_number(value, &po);
2011 pss = ps / be_lun->blocksize;
2012 pos = po / be_lun->blocksize;
2013 if ((pss > 0) && (pss * be_lun->blocksize == ps) && (pss >= pos) &&
2014 ((pss & (pss - 1)) == 0) && (pos * be_lun->blocksize == po)) {
2015 be_lun->pblockexp = fls(pss) - 1;
2016 be_lun->pblockoff = (pss - pos) % pss;
2019 value = ctl_get_opt(&be_lun->ctl_be_lun.options, "ublocksize");
2021 ctl_expand_number(value, &us);
2022 value = ctl_get_opt(&be_lun->ctl_be_lun.options, "ublockoffset");
2024 ctl_expand_number(value, &uo);
2025 uss = us / be_lun->blocksize;
2026 uos = uo / be_lun->blocksize;
2027 if ((uss > 0) && (uss * be_lun->blocksize == us) && (uss >= uos) &&
2028 ((uss & (uss - 1)) == 0) && (uos * be_lun->blocksize == uo)) {
2029 be_lun->ublockexp = fls(uss) - 1;
2030 be_lun->ublockoff = (uss - uos) % uss;
2033 be_lun->atomicblock = atomic / be_lun->blocksize;
2034 be_lun->opttxferlen = maxio / be_lun->blocksize;
2036 if (be_lun->dispatch == ctl_be_block_dispatch_zvol) {
2039 struct diocgattr_arg arg;
2041 strlcpy(arg.name, "GEOM::candelete", sizeof(arg.name));
2042 arg.len = sizeof(arg.value.i);
2043 error = devsw->d_ioctl(dev, DIOCGATTR,
2044 (caddr_t)&arg, FREAD, curthread);
2045 unmap = (error == 0) ? arg.value.i : 0;
2047 value = ctl_get_opt(&be_lun->ctl_be_lun.options, "unmap");
2049 unmap = (strcmp(value, "on") == 0);
2051 be_lun->unmap = ctl_be_block_unmap_dev;
2057 ctl_be_block_close(struct ctl_be_block_lun *be_lun)
2061 int flags = FREAD | FWRITE;
2063 switch (be_lun->dev_type) {
2064 case CTL_BE_BLOCK_DEV:
2065 if (be_lun->backend.dev.csw) {
2066 dev_relthread(be_lun->backend.dev.cdev,
2067 be_lun->backend.dev.dev_ref);
2068 be_lun->backend.dev.csw = NULL;
2069 be_lun->backend.dev.cdev = NULL;
2072 case CTL_BE_BLOCK_FILE:
2074 case CTL_BE_BLOCK_NONE:
2077 panic("Unexpected backend type.");
2081 (void)vn_close(be_lun->vn, flags, NOCRED, curthread);
2084 switch (be_lun->dev_type) {
2085 case CTL_BE_BLOCK_DEV:
2087 case CTL_BE_BLOCK_FILE:
2088 if (be_lun->backend.file.cred != NULL) {
2089 crfree(be_lun->backend.file.cred);
2090 be_lun->backend.file.cred = NULL;
2093 case CTL_BE_BLOCK_NONE:
2096 panic("Unexpected backend type.");
2099 be_lun->dev_type = CTL_BE_BLOCK_NONE;
2107 ctl_be_block_open(struct ctl_be_block_softc *softc,
2108 struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req)
2110 struct nameidata nd;
2115 * XXX KDM allow a read-only option?
2117 flags = FREAD | FWRITE;
2120 if (rootvnode == NULL) {
2121 snprintf(req->error_str, sizeof(req->error_str),
2122 "Root filesystem is not mounted");
2126 if (!curthread->td_proc->p_fd->fd_cdir) {
2127 curthread->td_proc->p_fd->fd_cdir = rootvnode;
2130 if (!curthread->td_proc->p_fd->fd_rdir) {
2131 curthread->td_proc->p_fd->fd_rdir = rootvnode;
2134 if (!curthread->td_proc->p_fd->fd_jdir) {
2135 curthread->td_proc->p_fd->fd_jdir = rootvnode;
2140 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, be_lun->dev_path, curthread);
2141 error = vn_open(&nd, &flags, 0, NULL);
2144 * This is the only reasonable guess we can make as far as
2145 * path if the user doesn't give us a fully qualified path.
2146 * If they want to specify a file, they need to specify the
2149 if (be_lun->dev_path[0] != '/') {
2150 char *dev_path = "/dev/";
2153 /* Try adding device path at beginning of name */
2154 dev_name = malloc(strlen(be_lun->dev_path)
2155 + strlen(dev_path) + 1,
2156 M_CTLBLK, M_WAITOK);
2158 sprintf(dev_name, "%s%s", dev_path,
2160 free(be_lun->dev_path, M_CTLBLK);
2161 be_lun->dev_path = dev_name;
2165 snprintf(req->error_str, sizeof(req->error_str),
2166 "error opening %s: %d", be_lun->dev_path, error);
2170 NDFREE(&nd, NDF_ONLY_PNBUF);
2172 be_lun->vn = nd.ni_vp;
2174 /* We only support disks and files. */
2175 if (vn_isdisk(be_lun->vn, &error)) {
2176 error = ctl_be_block_open_dev(be_lun, req);
2177 } else if (be_lun->vn->v_type == VREG) {
2178 error = ctl_be_block_open_file(be_lun, req);
2181 snprintf(req->error_str, sizeof(req->error_str),
2182 "%s is not a disk or plain file", be_lun->dev_path);
2184 VOP_UNLOCK(be_lun->vn, 0);
2187 ctl_be_block_close(be_lun);
2191 be_lun->blocksize_shift = fls(be_lun->blocksize) - 1;
2192 be_lun->size_blocks = be_lun->size_bytes >> be_lun->blocksize_shift;
2198 ctl_be_block_create(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
2200 struct ctl_be_block_lun *be_lun;
2201 struct ctl_lun_create_params *params;
2202 char num_thread_str[16];
2205 int retval, num_threads;
2206 int tmp_num_threads;
2208 params = &req->reqdata.create;
2210 req->status = CTL_LUN_OK;
2212 num_threads = cbb_num_threads;
2214 be_lun = malloc(sizeof(*be_lun), M_CTLBLK, M_ZERO | M_WAITOK);
2216 be_lun->params = req->reqdata.create;
2217 be_lun->softc = softc;
2218 STAILQ_INIT(&be_lun->input_queue);
2219 STAILQ_INIT(&be_lun->config_read_queue);
2220 STAILQ_INIT(&be_lun->config_write_queue);
2221 STAILQ_INIT(&be_lun->datamove_queue);
2222 sprintf(be_lun->lunname, "cblk%d", softc->num_luns);
2223 mtx_init(&be_lun->io_lock, "cblk io lock", NULL, MTX_DEF);
2224 mtx_init(&be_lun->queue_lock, "cblk queue lock", NULL, MTX_DEF);
2225 ctl_init_opts(&be_lun->ctl_be_lun.options,
2226 req->num_be_args, req->kern_be_args);
2228 be_lun->lun_zone = uma_zcreate(be_lun->lunname, CTLBLK_MAX_SEG,
2229 NULL, NULL, NULL, NULL, /*align*/ 0, /*flags*/0);
2231 if (be_lun->lun_zone == NULL) {
2232 snprintf(req->error_str, sizeof(req->error_str),
2233 "error allocating UMA zone");
2237 if (params->flags & CTL_LUN_FLAG_DEV_TYPE)
2238 be_lun->ctl_be_lun.lun_type = params->device_type;
2240 be_lun->ctl_be_lun.lun_type = T_DIRECT;
2242 if (be_lun->ctl_be_lun.lun_type == T_DIRECT) {
2243 value = ctl_get_opt(&be_lun->ctl_be_lun.options, "file");
2244 if (value == NULL) {
2245 snprintf(req->error_str, sizeof(req->error_str),
2246 "no file argument specified");
2249 be_lun->dev_path = strdup(value, M_CTLBLK);
2250 be_lun->blocksize = 512;
2251 be_lun->blocksize_shift = fls(be_lun->blocksize) - 1;
2253 retval = ctl_be_block_open(softc, be_lun, req);
2256 req->status = CTL_LUN_WARNING;
2260 * For processor devices, we don't have any size.
2262 be_lun->blocksize = 0;
2263 be_lun->pblockexp = 0;
2264 be_lun->pblockoff = 0;
2265 be_lun->ublockexp = 0;
2266 be_lun->ublockoff = 0;
2267 be_lun->size_blocks = 0;
2268 be_lun->size_bytes = 0;
2269 be_lun->ctl_be_lun.maxlba = 0;
2272 * Default to just 1 thread for processor devices.
2278 * XXX This searching loop might be refactored to be combined with
2281 value = ctl_get_opt(&be_lun->ctl_be_lun.options, "num_threads");
2282 if (value != NULL) {
2283 tmp_num_threads = strtol(value, NULL, 0);
2286 * We don't let the user specify less than one
2287 * thread, but hope he's clueful enough not to
2288 * specify 1000 threads.
2290 if (tmp_num_threads < 1) {
2291 snprintf(req->error_str, sizeof(req->error_str),
2292 "invalid number of threads %s",
2296 num_threads = tmp_num_threads;
2299 be_lun->flags = CTL_BE_BLOCK_LUN_UNCONFIGURED;
2300 be_lun->ctl_be_lun.flags = CTL_LUN_FLAG_PRIMARY;
2301 if (be_lun->vn == NULL)
2302 be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_OFFLINE;
2303 if (be_lun->unmap != NULL)
2304 be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_UNMAP;
2305 if (be_lun->dispatch != ctl_be_block_dispatch_dev)
2306 be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_SERSEQ_READ;
2307 be_lun->ctl_be_lun.be_lun = be_lun;
2308 be_lun->ctl_be_lun.maxlba = (be_lun->size_blocks == 0) ?
2309 0 : (be_lun->size_blocks - 1);
2310 be_lun->ctl_be_lun.blocksize = be_lun->blocksize;
2311 be_lun->ctl_be_lun.pblockexp = be_lun->pblockexp;
2312 be_lun->ctl_be_lun.pblockoff = be_lun->pblockoff;
2313 be_lun->ctl_be_lun.ublockexp = be_lun->ublockexp;
2314 be_lun->ctl_be_lun.ublockoff = be_lun->ublockoff;
2315 be_lun->ctl_be_lun.atomicblock = be_lun->atomicblock;
2316 be_lun->ctl_be_lun.opttxferlen = be_lun->opttxferlen;
2317 /* Tell the user the blocksize we ended up using */
2318 params->lun_size_bytes = be_lun->size_bytes;
2319 params->blocksize_bytes = be_lun->blocksize;
2320 if (params->flags & CTL_LUN_FLAG_ID_REQ) {
2321 be_lun->ctl_be_lun.req_lun_id = params->req_lun_id;
2322 be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_ID_REQ;
2324 be_lun->ctl_be_lun.req_lun_id = 0;
2326 be_lun->ctl_be_lun.lun_shutdown = ctl_be_block_lun_shutdown;
2327 be_lun->ctl_be_lun.lun_config_status =
2328 ctl_be_block_lun_config_status;
2329 be_lun->ctl_be_lun.be = &ctl_be_block_driver;
2331 if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) {
2332 snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%4d",
2334 strncpy((char *)be_lun->ctl_be_lun.serial_num, tmpstr,
2335 MIN(sizeof(be_lun->ctl_be_lun.serial_num),
2338 /* Tell the user what we used for a serial number */
2339 strncpy((char *)params->serial_num, tmpstr,
2340 MIN(sizeof(params->serial_num), sizeof(tmpstr)));
2342 strncpy((char *)be_lun->ctl_be_lun.serial_num,
2344 MIN(sizeof(be_lun->ctl_be_lun.serial_num),
2345 sizeof(params->serial_num)));
2347 if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) {
2348 snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%4d", softc->num_luns);
2349 strncpy((char *)be_lun->ctl_be_lun.device_id, tmpstr,
2350 MIN(sizeof(be_lun->ctl_be_lun.device_id),
2353 /* Tell the user what we used for a device ID */
2354 strncpy((char *)params->device_id, tmpstr,
2355 MIN(sizeof(params->device_id), sizeof(tmpstr)));
2357 strncpy((char *)be_lun->ctl_be_lun.device_id,
2359 MIN(sizeof(be_lun->ctl_be_lun.device_id),
2360 sizeof(params->device_id)));
2363 TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_be_block_worker, be_lun);
2365 be_lun->io_taskqueue = taskqueue_create(be_lun->lunname, M_WAITOK,
2366 taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue);
2368 if (be_lun->io_taskqueue == NULL) {
2369 snprintf(req->error_str, sizeof(req->error_str),
2370 "unable to create taskqueue");
2375 * Note that we start the same number of threads by default for
2376 * both the file case and the block device case. For the file
2377 * case, we need multiple threads to allow concurrency, because the
2378 * vnode interface is designed to be a blocking interface. For the
2379 * block device case, ZFS zvols at least will block the caller's
2380 * context in many instances, and so we need multiple threads to
2381 * overcome that problem. Other block devices don't need as many
2382 * threads, but they shouldn't cause too many problems.
2384 * If the user wants to just have a single thread for a block
2385 * device, he can specify that when the LUN is created, or change
2386 * the tunable/sysctl to alter the default number of threads.
2388 retval = taskqueue_start_threads(&be_lun->io_taskqueue,
2389 /*num threads*/num_threads,
2392 "%s taskq", be_lun->lunname);
2397 be_lun->num_threads = num_threads;
2399 mtx_lock(&softc->lock);
2401 STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links);
2403 mtx_unlock(&softc->lock);
2405 retval = ctl_add_lun(&be_lun->ctl_be_lun);
2407 mtx_lock(&softc->lock);
2408 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun,
2411 mtx_unlock(&softc->lock);
2412 snprintf(req->error_str, sizeof(req->error_str),
2413 "ctl_add_lun() returned error %d, see dmesg for "
2419 mtx_lock(&softc->lock);
2422 * Tell the config_status routine that we're waiting so it won't
2423 * clean up the LUN in the event of an error.
2425 be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING;
2427 while (be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) {
2428 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblk", 0);
2429 if (retval == EINTR)
2432 be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING;
2434 if (be_lun->flags & CTL_BE_BLOCK_LUN_CONFIG_ERR) {
2435 snprintf(req->error_str, sizeof(req->error_str),
2436 "LUN configuration error, see dmesg for details");
2437 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun,
2440 mtx_unlock(&softc->lock);
2443 params->req_lun_id = be_lun->ctl_be_lun.lun_id;
2446 mtx_unlock(&softc->lock);
2448 be_lun->disk_stats = devstat_new_entry("cbb", params->req_lun_id,
2450 DEVSTAT_ALL_SUPPORTED,
2451 be_lun->ctl_be_lun.lun_type
2452 | DEVSTAT_TYPE_IF_OTHER,
2453 DEVSTAT_PRIORITY_OTHER);
2458 req->status = CTL_LUN_ERROR;
2460 if (be_lun->io_taskqueue != NULL)
2461 taskqueue_free(be_lun->io_taskqueue);
2462 ctl_be_block_close(be_lun);
2463 if (be_lun->dev_path != NULL)
2464 free(be_lun->dev_path, M_CTLBLK);
2465 if (be_lun->lun_zone != NULL)
2466 uma_zdestroy(be_lun->lun_zone);
2467 ctl_free_opts(&be_lun->ctl_be_lun.options);
2468 mtx_destroy(&be_lun->queue_lock);
2469 mtx_destroy(&be_lun->io_lock);
2470 free(be_lun, M_CTLBLK);
2476 ctl_be_block_rm(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
2478 struct ctl_lun_rm_params *params;
2479 struct ctl_be_block_lun *be_lun;
2482 params = &req->reqdata.rm;
2484 mtx_lock(&softc->lock);
2488 STAILQ_FOREACH(be_lun, &softc->lun_list, links) {
2489 if (be_lun->ctl_be_lun.lun_id == params->lun_id)
2492 mtx_unlock(&softc->lock);
2494 if (be_lun == NULL) {
2495 snprintf(req->error_str, sizeof(req->error_str),
2496 "LUN %u is not managed by the block backend",
2501 retval = ctl_disable_lun(&be_lun->ctl_be_lun);
2504 snprintf(req->error_str, sizeof(req->error_str),
2505 "error %d returned from ctl_disable_lun() for "
2506 "LUN %d", retval, params->lun_id);
2511 retval = ctl_invalidate_lun(&be_lun->ctl_be_lun);
2513 snprintf(req->error_str, sizeof(req->error_str),
2514 "error %d returned from ctl_invalidate_lun() for "
2515 "LUN %d", retval, params->lun_id);
2519 mtx_lock(&softc->lock);
2521 be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING;
2523 while ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) {
2524 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblk", 0);
2525 if (retval == EINTR)
2529 be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING;
2531 if ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) {
2532 snprintf(req->error_str, sizeof(req->error_str),
2533 "interrupted waiting for LUN to be freed");
2534 mtx_unlock(&softc->lock);
2538 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_block_lun, links);
2541 mtx_unlock(&softc->lock);
2543 taskqueue_drain(be_lun->io_taskqueue, &be_lun->io_task);
2545 taskqueue_free(be_lun->io_taskqueue);
2547 ctl_be_block_close(be_lun);
2549 if (be_lun->disk_stats != NULL)
2550 devstat_remove_entry(be_lun->disk_stats);
2552 uma_zdestroy(be_lun->lun_zone);
2554 ctl_free_opts(&be_lun->ctl_be_lun.options);
2555 free(be_lun->dev_path, M_CTLBLK);
2556 mtx_destroy(&be_lun->queue_lock);
2557 mtx_destroy(&be_lun->io_lock);
2558 free(be_lun, M_CTLBLK);
2560 req->status = CTL_LUN_OK;
2566 req->status = CTL_LUN_ERROR;
2572 ctl_be_block_modify_file(struct ctl_be_block_lun *be_lun,
2573 struct ctl_lun_req *req)
2577 struct ctl_lun_create_params *params = &be_lun->params;
2579 if (params->lun_size_bytes != 0) {
2580 be_lun->size_bytes = params->lun_size_bytes;
2582 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY);
2583 error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred);
2584 VOP_UNLOCK(be_lun->vn, 0);
2586 snprintf(req->error_str, sizeof(req->error_str),
2587 "error calling VOP_GETATTR() for file %s",
2592 be_lun->size_bytes = vattr.va_size;
2599 ctl_be_block_modify_dev(struct ctl_be_block_lun *be_lun,
2600 struct ctl_lun_req *req)
2602 struct ctl_be_block_devdata *dev_data;
2604 struct ctl_lun_create_params *params = &be_lun->params;
2605 uint64_t size_bytes;
2607 dev_data = &be_lun->backend.dev;
2608 if (!dev_data->csw->d_ioctl) {
2609 snprintf(req->error_str, sizeof(req->error_str),
2610 "no d_ioctl for device %s!", be_lun->dev_path);
2614 error = dev_data->csw->d_ioctl(dev_data->cdev, DIOCGMEDIASIZE,
2615 (caddr_t)&size_bytes, FREAD,
2618 snprintf(req->error_str, sizeof(req->error_str),
2619 "error %d returned for DIOCGMEDIASIZE ioctl "
2620 "on %s!", error, be_lun->dev_path);
2624 if (params->lun_size_bytes != 0) {
2625 if (params->lun_size_bytes > size_bytes) {
2626 snprintf(req->error_str, sizeof(req->error_str),
2627 "requested LUN size %ju > backing device "
2629 (uintmax_t)params->lun_size_bytes,
2630 (uintmax_t)size_bytes);
2634 be_lun->size_bytes = params->lun_size_bytes;
2636 be_lun->size_bytes = size_bytes;
2643 ctl_be_block_modify(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
2645 struct ctl_lun_modify_params *params;
2646 struct ctl_be_block_lun *be_lun;
2650 params = &req->reqdata.modify;
2652 mtx_lock(&softc->lock);
2654 STAILQ_FOREACH(be_lun, &softc->lun_list, links) {
2655 if (be_lun->ctl_be_lun.lun_id == params->lun_id)
2658 mtx_unlock(&softc->lock);
2660 if (be_lun == NULL) {
2661 snprintf(req->error_str, sizeof(req->error_str),
2662 "LUN %u is not managed by the block backend",
2667 be_lun->params.lun_size_bytes = params->lun_size_bytes;
2669 oldsize = be_lun->size_bytes;
2670 if (be_lun->vn == NULL)
2671 error = ctl_be_block_open(softc, be_lun, req);
2672 else if (be_lun->vn->v_type == VREG)
2673 error = ctl_be_block_modify_file(be_lun, req);
2675 error = ctl_be_block_modify_dev(be_lun, req);
2677 if (error == 0 && be_lun->size_bytes != oldsize) {
2678 be_lun->size_blocks = be_lun->size_bytes >>
2679 be_lun->blocksize_shift;
2682 * The maximum LBA is the size - 1.
2684 * XXX: Note that this field is being updated without locking,
2685 * which might cause problems on 32-bit architectures.
2687 if (be_lun->unmap != NULL)
2688 be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_UNMAP;
2689 be_lun->ctl_be_lun.maxlba = (be_lun->size_blocks == 0) ?
2690 0 : (be_lun->size_blocks - 1);
2691 be_lun->ctl_be_lun.blocksize = be_lun->blocksize;
2692 be_lun->ctl_be_lun.pblockexp = be_lun->pblockexp;
2693 be_lun->ctl_be_lun.pblockoff = be_lun->pblockoff;
2694 be_lun->ctl_be_lun.ublockexp = be_lun->ublockexp;
2695 be_lun->ctl_be_lun.ublockoff = be_lun->ublockoff;
2696 be_lun->ctl_be_lun.atomicblock = be_lun->atomicblock;
2697 be_lun->ctl_be_lun.opttxferlen = be_lun->opttxferlen;
2698 ctl_lun_capacity_changed(&be_lun->ctl_be_lun);
2699 if (oldsize == 0 && be_lun->size_blocks != 0)
2700 ctl_lun_online(&be_lun->ctl_be_lun);
2703 /* Tell the user the exact size we ended up using */
2704 params->lun_size_bytes = be_lun->size_bytes;
2706 req->status = error ? CTL_LUN_WARNING : CTL_LUN_OK;
2711 req->status = CTL_LUN_ERROR;
2717 ctl_be_block_lun_shutdown(void *be_lun)
2719 struct ctl_be_block_lun *lun;
2720 struct ctl_be_block_softc *softc;
2722 lun = (struct ctl_be_block_lun *)be_lun;
2726 mtx_lock(&softc->lock);
2727 lun->flags |= CTL_BE_BLOCK_LUN_UNCONFIGURED;
2728 if (lun->flags & CTL_BE_BLOCK_LUN_WAITING)
2730 mtx_unlock(&softc->lock);
2735 ctl_be_block_lun_config_status(void *be_lun, ctl_lun_config_status status)
2737 struct ctl_be_block_lun *lun;
2738 struct ctl_be_block_softc *softc;
2740 lun = (struct ctl_be_block_lun *)be_lun;
2743 if (status == CTL_LUN_CONFIG_OK) {
2744 mtx_lock(&softc->lock);
2745 lun->flags &= ~CTL_BE_BLOCK_LUN_UNCONFIGURED;
2746 if (lun->flags & CTL_BE_BLOCK_LUN_WAITING)
2748 mtx_unlock(&softc->lock);
2751 * We successfully added the LUN, attempt to enable it.
2753 if (ctl_enable_lun(&lun->ctl_be_lun) != 0) {
2754 printf("%s: ctl_enable_lun() failed!\n", __func__);
2755 if (ctl_invalidate_lun(&lun->ctl_be_lun) != 0) {
2756 printf("%s: ctl_invalidate_lun() failed!\n",
2765 mtx_lock(&softc->lock);
2766 lun->flags &= ~CTL_BE_BLOCK_LUN_UNCONFIGURED;
2767 lun->flags |= CTL_BE_BLOCK_LUN_CONFIG_ERR;
2769 mtx_unlock(&softc->lock);
2774 ctl_be_block_config_write(union ctl_io *io)
2776 struct ctl_be_block_lun *be_lun;
2777 struct ctl_be_lun *ctl_be_lun;
2782 DPRINTF("entered\n");
2784 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
2785 CTL_PRIV_BACKEND_LUN].ptr;
2786 be_lun = (struct ctl_be_block_lun *)ctl_be_lun->be_lun;
2788 switch (io->scsiio.cdb[0]) {
2789 case SYNCHRONIZE_CACHE:
2790 case SYNCHRONIZE_CACHE_16:
2795 * The upper level CTL code will filter out any CDBs with
2796 * the immediate bit set and return the proper error.
2798 * We don't really need to worry about what LBA range the
2799 * user asked to be synced out. When they issue a sync
2800 * cache command, we'll sync out the whole thing.
2802 mtx_lock(&be_lun->queue_lock);
2803 STAILQ_INSERT_TAIL(&be_lun->config_write_queue, &io->io_hdr,
2805 mtx_unlock(&be_lun->queue_lock);
2806 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task);
2808 case START_STOP_UNIT: {
2809 struct scsi_start_stop_unit *cdb;
2811 cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb;
2813 if (cdb->how & SSS_START)
2814 retval = ctl_start_lun(ctl_be_lun);
2816 retval = ctl_stop_lun(ctl_be_lun);
2818 * XXX KDM Copan-specific offline behavior.
2819 * Figure out a reasonable way to port this?
2823 && (cdb->byte2 & SSS_ONOFFLINE))
2824 retval = ctl_lun_offline(ctl_be_lun);
2829 * In general, the above routines should not fail. They
2830 * just set state for the LUN. So we've got something
2831 * pretty wrong here if we can't start or stop the LUN.
2834 ctl_set_internal_failure(&io->scsiio,
2836 /*retry_count*/ 0xf051);
2837 retval = CTL_RETVAL_COMPLETE;
2839 ctl_set_success(&io->scsiio);
2841 ctl_config_write_done(io);
2845 ctl_set_invalid_opcode(&io->scsiio);
2846 ctl_config_write_done(io);
2847 retval = CTL_RETVAL_COMPLETE;
2855 ctl_be_block_config_read(union ctl_io *io)
2857 struct ctl_be_block_lun *be_lun;
2858 struct ctl_be_lun *ctl_be_lun;
2861 DPRINTF("entered\n");
2863 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
2864 CTL_PRIV_BACKEND_LUN].ptr;
2865 be_lun = (struct ctl_be_block_lun *)ctl_be_lun->be_lun;
2867 switch (io->scsiio.cdb[0]) {
2868 case SERVICE_ACTION_IN:
2869 if (io->scsiio.cdb[1] == SGLS_SERVICE_ACTION) {
2870 mtx_lock(&be_lun->queue_lock);
2871 STAILQ_INSERT_TAIL(&be_lun->config_read_queue,
2872 &io->io_hdr, links);
2873 mtx_unlock(&be_lun->queue_lock);
2874 taskqueue_enqueue(be_lun->io_taskqueue,
2876 retval = CTL_RETVAL_QUEUED;
2879 ctl_set_invalid_field(&io->scsiio,
2885 ctl_config_read_done(io);
2886 retval = CTL_RETVAL_COMPLETE;
2889 ctl_set_invalid_opcode(&io->scsiio);
2890 ctl_config_read_done(io);
2891 retval = CTL_RETVAL_COMPLETE;
2899 ctl_be_block_lun_info(void *be_lun, struct sbuf *sb)
2901 struct ctl_be_block_lun *lun;
2904 lun = (struct ctl_be_block_lun *)be_lun;
2907 retval = sbuf_printf(sb, "\t<num_threads>");
2912 retval = sbuf_printf(sb, "%d", lun->num_threads);
2917 retval = sbuf_printf(sb, "</num_threads>\n");
2925 ctl_be_block_lun_attr(void *be_lun, const char *attrname)
2927 struct ctl_be_block_lun *lun = (struct ctl_be_block_lun *)be_lun;
2929 if (lun->getattr == NULL)
2930 return (UINT64_MAX);
2931 return (lun->getattr(lun, attrname));
2935 ctl_be_block_init(void)
2937 struct ctl_be_block_softc *softc;
2940 softc = &backend_block_softc;
2943 mtx_init(&softc->lock, "ctlblock", NULL, MTX_DEF);
2944 beio_zone = uma_zcreate("beio", sizeof(struct ctl_be_block_io),
2945 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
2946 STAILQ_INIT(&softc->disk_list);
2947 STAILQ_INIT(&softc->lun_list);