2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2003 Silicon Graphics International Corp.
5 * Copyright (c) 2009-2011 Spectra Logic Corporation
6 * Copyright (c) 2012 The FreeBSD Foundation
7 * Copyright (c) 2014-2015 Alexander Motin <mav@FreeBSD.org>
10 * Portions of this software were developed by Edward Tomasz Napierala
11 * under sponsorship from the FreeBSD Foundation.
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions, and the following disclaimer,
18 * without modification.
19 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
20 * substantially similar to the "NO WARRANTY" disclaimer below
21 * ("Disclaimer") and any redistribution must be conditioned upon
22 * including a substantially similar Disclaimer requirement for further
23 * binary redistribution.
26 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
27 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
28 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
29 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
30 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
34 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
35 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGES.
38 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_block.c#5 $
41 * CAM Target Layer driver backend for block devices.
43 * Author: Ken Merry <ken@FreeBSD.org>
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD$");
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
51 #include <sys/types.h>
52 #include <sys/kthread.h>
54 #include <sys/fcntl.h>
55 #include <sys/limits.h>
57 #include <sys/mutex.h>
58 #include <sys/condvar.h>
59 #include <sys/malloc.h>
61 #include <sys/ioccom.h>
62 #include <sys/queue.h>
64 #include <sys/endian.h>
67 #include <sys/taskqueue.h>
68 #include <sys/vnode.h>
69 #include <sys/namei.h>
70 #include <sys/mount.h>
72 #include <sys/fcntl.h>
73 #include <sys/filedesc.h>
74 #include <sys/filio.h>
77 #include <sys/module.h>
79 #include <sys/devicestat.h>
80 #include <sys/sysctl.h>
85 #include <geom/geom.h>
88 #include <cam/scsi/scsi_all.h>
89 #include <cam/scsi/scsi_da.h>
90 #include <cam/ctl/ctl_io.h>
91 #include <cam/ctl/ctl.h>
92 #include <cam/ctl/ctl_backend.h>
93 #include <cam/ctl/ctl_ioctl.h>
94 #include <cam/ctl/ctl_ha.h>
95 #include <cam/ctl/ctl_scsi_all.h>
96 #include <cam/ctl/ctl_private.h>
97 #include <cam/ctl/ctl_error.h>
100 * The idea here is that we'll allocate enough S/G space to hold a 1MB
101 * I/O. If we get an I/O larger than that, we'll split it.
103 #define CTLBLK_HALF_IO_SIZE (512 * 1024)
104 #define CTLBLK_MAX_IO_SIZE (CTLBLK_HALF_IO_SIZE * 2)
105 #define CTLBLK_MIN_SEG (128 * 1024)
106 #define CTLBLK_MAX_SEG MIN(CTLBLK_HALF_IO_SIZE, maxphys)
107 #define CTLBLK_HALF_SEGS MAX(CTLBLK_HALF_IO_SIZE / CTLBLK_MIN_SEG, 1)
108 #define CTLBLK_MAX_SEGS (CTLBLK_HALF_SEGS * 2)
109 #define CTLBLK_NUM_SEGS (CTLBLK_MAX_IO_SIZE / CTLBLK_MAX_SEG)
112 #define DPRINTF(fmt, args...) \
113 printf("cbb(%s:%d): " fmt, __FUNCTION__, __LINE__, ##args)
115 #define DPRINTF(fmt, args...) do {} while(0)
119 ((struct ctl_ptr_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_BACKEND])
121 ((struct ctl_lba_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_LBA_LEN])
123 SDT_PROVIDER_DEFINE(cbb);
126 CTL_BE_BLOCK_LUN_UNCONFIGURED = 0x01,
127 CTL_BE_BLOCK_LUN_WAITING = 0x04,
128 } ctl_be_block_lun_flags;
136 struct ctl_be_block_filedata {
140 union ctl_be_block_bedata {
141 struct ctl_be_block_filedata file;
144 struct ctl_be_block_io;
145 struct ctl_be_block_lun;
147 typedef void (*cbb_dispatch_t)(struct ctl_be_block_lun *be_lun,
148 struct ctl_be_block_io *beio);
149 typedef uint64_t (*cbb_getattr_t)(struct ctl_be_block_lun *be_lun,
150 const char *attrname);
153 * Backend LUN structure. There is a 1:1 mapping between a block device
154 * and a backend block LUN, and between a backend block LUN and a CTL LUN.
156 struct ctl_be_block_lun {
157 struct ctl_be_lun cbe_lun; /* Must be first element. */
158 struct ctl_lun_create_params params;
160 ctl_be_block_type dev_type;
162 union ctl_be_block_bedata backend;
163 cbb_dispatch_t dispatch;
164 cbb_dispatch_t lun_flush;
165 cbb_dispatch_t unmap;
166 cbb_dispatch_t get_lba_status;
167 cbb_getattr_t getattr;
168 uint64_t size_blocks;
170 struct ctl_be_block_softc *softc;
171 struct devstat *disk_stats;
172 ctl_be_block_lun_flags flags;
173 SLIST_ENTRY(ctl_be_block_lun) links;
174 struct taskqueue *io_taskqueue;
177 STAILQ_HEAD(, ctl_io_hdr) input_queue;
178 STAILQ_HEAD(, ctl_io_hdr) config_read_queue;
179 STAILQ_HEAD(, ctl_io_hdr) config_write_queue;
180 STAILQ_HEAD(, ctl_io_hdr) datamove_queue;
181 struct mtx_padalign io_lock;
182 struct mtx_padalign queue_lock;
186 * Overall softc structure for the block backend module.
188 struct ctl_be_block_softc {
189 struct sx modify_lock;
192 SLIST_HEAD(, ctl_be_block_lun) lun_list;
193 uma_zone_t beio_zone;
194 uma_zone_t bufmin_zone;
195 uma_zone_t bufmax_zone;
198 static struct ctl_be_block_softc backend_block_softc;
201 * Per-I/O information.
203 struct ctl_be_block_io {
205 struct ctl_sg_entry sg_segs[CTLBLK_MAX_SEGS];
206 struct iovec xiovecs[CTLBLK_MAX_SEGS];
215 uint64_t first_error_offset;
216 struct bintime ds_t0;
217 devstat_tag_type ds_tag_type;
218 devstat_trans_flags ds_trans_type;
222 struct ctl_be_block_softc *softc;
223 struct ctl_be_block_lun *lun;
224 void (*beio_cont)(struct ctl_be_block_io *beio); /* to continue processing */
227 extern struct ctl_softc *control_softc;
229 static int cbb_num_threads = 14;
230 SYSCTL_NODE(_kern_cam_ctl, OID_AUTO, block, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
231 "CAM Target Layer Block Backend");
232 SYSCTL_INT(_kern_cam_ctl_block, OID_AUTO, num_threads, CTLFLAG_RWTUN,
233 &cbb_num_threads, 0, "Number of threads per backing file");
235 static struct ctl_be_block_io *ctl_alloc_beio(struct ctl_be_block_softc *softc);
236 static void ctl_free_beio(struct ctl_be_block_io *beio);
237 static void ctl_complete_beio(struct ctl_be_block_io *beio);
238 static int ctl_be_block_move_done(union ctl_io *io);
239 static void ctl_be_block_biodone(struct bio *bio);
240 static void ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun,
241 struct ctl_be_block_io *beio);
242 static void ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun,
243 struct ctl_be_block_io *beio);
244 static void ctl_be_block_gls_file(struct ctl_be_block_lun *be_lun,
245 struct ctl_be_block_io *beio);
246 static uint64_t ctl_be_block_getattr_file(struct ctl_be_block_lun *be_lun,
247 const char *attrname);
248 static void ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun,
249 struct ctl_be_block_io *beio);
250 static void ctl_be_block_unmap_dev(struct ctl_be_block_lun *be_lun,
251 struct ctl_be_block_io *beio);
252 static void ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun,
253 struct ctl_be_block_io *beio);
254 static uint64_t ctl_be_block_getattr_dev(struct ctl_be_block_lun *be_lun,
255 const char *attrname);
256 static void ctl_be_block_cr_dispatch(struct ctl_be_block_lun *be_lun,
258 static void ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun,
260 static void ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun,
262 static void ctl_be_block_worker(void *context, int pending);
263 static int ctl_be_block_submit(union ctl_io *io);
264 static int ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr,
265 int flag, struct thread *td);
266 static int ctl_be_block_open_file(struct ctl_be_block_lun *be_lun,
267 struct ctl_lun_req *req);
268 static int ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun,
269 struct ctl_lun_req *req);
270 static int ctl_be_block_close(struct ctl_be_block_lun *be_lun);
271 static int ctl_be_block_open(struct ctl_be_block_lun *be_lun,
272 struct ctl_lun_req *req);
273 static int ctl_be_block_create(struct ctl_be_block_softc *softc,
274 struct ctl_lun_req *req);
275 static int ctl_be_block_rm(struct ctl_be_block_softc *softc,
276 struct ctl_lun_req *req);
277 static int ctl_be_block_modify(struct ctl_be_block_softc *softc,
278 struct ctl_lun_req *req);
279 static void ctl_be_block_lun_shutdown(struct ctl_be_lun *cbe_lun);
280 static int ctl_be_block_config_write(union ctl_io *io);
281 static int ctl_be_block_config_read(union ctl_io *io);
282 static int ctl_be_block_lun_info(struct ctl_be_lun *cbe_lun, struct sbuf *sb);
283 static uint64_t ctl_be_block_lun_attr(struct ctl_be_lun *cbe_lun, const char *attrname);
284 static int ctl_be_block_init(void);
285 static int ctl_be_block_shutdown(void);
287 static struct ctl_backend_driver ctl_be_block_driver =
290 .flags = CTL_BE_FLAG_HAS_CONFIG,
291 .init = ctl_be_block_init,
292 .shutdown = ctl_be_block_shutdown,
293 .data_submit = ctl_be_block_submit,
294 .data_move_done = ctl_be_block_move_done,
295 .config_read = ctl_be_block_config_read,
296 .config_write = ctl_be_block_config_write,
297 .ioctl = ctl_be_block_ioctl,
298 .lun_info = ctl_be_block_lun_info,
299 .lun_attr = ctl_be_block_lun_attr
302 MALLOC_DEFINE(M_CTLBLK, "ctlblock", "Memory used for CTL block backend");
303 CTL_BACKEND_DECLARE(cbb, ctl_be_block_driver);
306 ctl_alloc_seg(struct ctl_be_block_softc *softc, struct ctl_sg_entry *sg,
310 if (len <= CTLBLK_MIN_SEG) {
311 sg->addr = uma_zalloc(softc->bufmin_zone, M_WAITOK);
313 KASSERT(len <= CTLBLK_MAX_SEG,
314 ("Too large alloc %zu > %lu", len, CTLBLK_MAX_SEG));
315 sg->addr = uma_zalloc(softc->bufmax_zone, M_WAITOK);
321 ctl_free_seg(struct ctl_be_block_softc *softc, struct ctl_sg_entry *sg)
324 if (sg->len <= CTLBLK_MIN_SEG) {
325 uma_zfree(softc->bufmin_zone, sg->addr);
327 KASSERT(sg->len <= CTLBLK_MAX_SEG,
328 ("Too large free %zu > %lu", sg->len, CTLBLK_MAX_SEG));
329 uma_zfree(softc->bufmax_zone, sg->addr);
333 static struct ctl_be_block_io *
334 ctl_alloc_beio(struct ctl_be_block_softc *softc)
336 struct ctl_be_block_io *beio;
338 beio = uma_zalloc(softc->beio_zone, M_WAITOK | M_ZERO);
345 ctl_real_free_beio(struct ctl_be_block_io *beio)
347 struct ctl_be_block_softc *softc = beio->softc;
350 for (i = 0; i < beio->num_segs; i++) {
351 ctl_free_seg(softc, &beio->sg_segs[i]);
353 /* For compare we had two equal S/G lists. */
354 if (beio->two_sglists) {
356 &beio->sg_segs[i + CTLBLK_HALF_SEGS]);
360 uma_zfree(softc->beio_zone, beio);
364 ctl_refcnt_beio(void *arg, int diff)
366 struct ctl_be_block_io *beio = arg;
368 if (atomic_fetchadd_int(&beio->refcnt, diff) + diff == 0)
369 ctl_real_free_beio(beio);
373 ctl_free_beio(struct ctl_be_block_io *beio)
376 ctl_refcnt_beio(beio, -1);
380 ctl_complete_beio(struct ctl_be_block_io *beio)
382 union ctl_io *io = beio->io;
384 if (beio->beio_cont != NULL) {
385 beio->beio_cont(beio);
388 ctl_data_submit_done(io);
393 cmp(uint8_t *a, uint8_t *b, size_t size)
397 for (i = 0; i < size; i++) {
405 ctl_be_block_compare(union ctl_io *io)
407 struct ctl_be_block_io *beio;
412 beio = (struct ctl_be_block_io *)PRIV(io)->ptr;
414 for (i = 0; i < beio->num_segs; i++) {
415 res = cmp(beio->sg_segs[i].addr,
416 beio->sg_segs[i + CTLBLK_HALF_SEGS].addr,
417 beio->sg_segs[i].len);
419 if (res < beio->sg_segs[i].len)
422 if (i < beio->num_segs) {
423 scsi_u64to8b(off, info);
424 ctl_set_sense(&io->scsiio, /*current_error*/ 1,
425 /*sense_key*/ SSD_KEY_MISCOMPARE,
426 /*asc*/ 0x1D, /*ascq*/ 0x00,
427 /*type*/ SSD_ELEM_INFO,
428 /*size*/ sizeof(info), /*data*/ &info,
429 /*type*/ SSD_ELEM_NONE);
431 ctl_set_success(&io->scsiio);
435 ctl_be_block_move_done(union ctl_io *io)
437 struct ctl_be_block_io *beio;
438 struct ctl_be_block_lun *be_lun;
439 struct ctl_lba_len_flags *lbalen;
441 struct bintime cur_bt;
444 beio = (struct ctl_be_block_io *)PRIV(io)->ptr;
447 DPRINTF("entered\n");
450 getbinuptime(&cur_bt);
451 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt);
452 bintime_add(&io->io_hdr.dma_bt, &cur_bt);
454 io->io_hdr.num_dmas++;
455 io->scsiio.kern_rel_offset += io->scsiio.kern_data_len;
458 * We set status at this point for read commands, and write
459 * commands with errors.
461 if (io->io_hdr.flags & CTL_FLAG_ABORT) {
463 } else if ((io->io_hdr.port_status != 0) &&
464 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
465 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
466 ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1,
467 /*retry_count*/ io->io_hdr.port_status);
468 } else if (io->scsiio.kern_data_resid != 0 &&
469 (io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT &&
470 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
471 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
472 ctl_set_invalid_field_ciu(&io->scsiio);
473 } else if ((io->io_hdr.port_status == 0) &&
474 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) {
475 lbalen = ARGS(beio->io);
476 if (lbalen->flags & CTL_LLF_READ) {
477 ctl_set_success(&io->scsiio);
478 } else if (lbalen->flags & CTL_LLF_COMPARE) {
479 /* We have two data blocks ready for comparison. */
480 ctl_be_block_compare(io);
485 * If this is a read, or a write with errors, it is done.
487 if ((beio->bio_cmd == BIO_READ)
488 || ((io->io_hdr.flags & CTL_FLAG_ABORT) != 0)
489 || ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE)) {
490 ctl_complete_beio(beio);
495 * At this point, we have a write and the DMA completed
496 * successfully. We now have to queue it to the task queue to
497 * execute the backend I/O. That is because we do blocking
498 * memory allocations, and in the file backing case, blocking I/O.
499 * This move done routine is generally called in the SIM's
500 * interrupt context, and therefore we cannot block.
502 mtx_lock(&be_lun->queue_lock);
503 STAILQ_INSERT_TAIL(&be_lun->datamove_queue, &io->io_hdr, links);
504 mtx_unlock(&be_lun->queue_lock);
505 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task);
511 ctl_be_block_biodone(struct bio *bio)
513 struct ctl_be_block_io *beio;
514 struct ctl_be_block_lun *be_lun;
518 beio = bio->bio_caller1;
522 DPRINTF("entered\n");
524 error = bio->bio_error;
525 mtx_lock(&be_lun->io_lock);
527 (beio->first_error == 0 ||
528 bio->bio_offset < beio->first_error_offset)) {
529 beio->first_error = error;
530 beio->first_error_offset = bio->bio_offset;
533 beio->num_bios_done++;
536 * XXX KDM will this cause WITNESS to complain? Holding a lock
537 * during the free might cause it to complain.
542 * If the send complete bit isn't set, or we aren't the last I/O to
543 * complete, then we're done.
545 if ((beio->send_complete == 0)
546 || (beio->num_bios_done < beio->num_bios_sent)) {
547 mtx_unlock(&be_lun->io_lock);
552 * At this point, we've verified that we are the last I/O to
553 * complete, so it's safe to drop the lock.
555 devstat_end_transaction(beio->lun->disk_stats, beio->io_len,
556 beio->ds_tag_type, beio->ds_trans_type,
557 /*now*/ NULL, /*then*/&beio->ds_t0);
558 mtx_unlock(&be_lun->io_lock);
561 * If there are any errors from the backing device, we fail the
562 * entire I/O with a medium error.
564 error = beio->first_error;
566 if (error == EOPNOTSUPP) {
567 ctl_set_invalid_opcode(&io->scsiio);
568 } else if (error == ENOSPC || error == EDQUOT) {
569 ctl_set_space_alloc_fail(&io->scsiio);
570 } else if (error == EROFS || error == EACCES) {
571 ctl_set_hw_write_protected(&io->scsiio);
572 } else if (beio->bio_cmd == BIO_FLUSH) {
573 /* XXX KDM is there is a better error here? */
574 ctl_set_internal_failure(&io->scsiio,
576 /*retry_count*/ 0xbad2);
578 ctl_set_medium_error(&io->scsiio,
579 beio->bio_cmd == BIO_READ);
581 ctl_complete_beio(beio);
586 * If this is a write, a flush, a delete or verify, we're all done.
587 * If this is a read, we can now send the data to the user.
589 if ((beio->bio_cmd == BIO_WRITE)
590 || (beio->bio_cmd == BIO_FLUSH)
591 || (beio->bio_cmd == BIO_DELETE)
592 || (ARGS(io)->flags & CTL_LLF_VERIFY)) {
593 ctl_set_success(&io->scsiio);
594 ctl_complete_beio(beio);
596 if ((ARGS(io)->flags & CTL_LLF_READ) &&
597 beio->beio_cont == NULL) {
598 ctl_set_success(&io->scsiio);
602 getbinuptime(&io->io_hdr.dma_start_bt);
609 ctl_be_block_flush_file(struct ctl_be_block_lun *be_lun,
610 struct ctl_be_block_io *beio)
612 union ctl_io *io = beio->io;
613 struct mount *mountpoint;
614 int error, lock_flags;
616 DPRINTF("entered\n");
618 binuptime(&beio->ds_t0);
619 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0);
621 (void) vn_start_write(be_lun->vn, &mountpoint, V_WAIT);
623 if (MNT_SHARED_WRITES(mountpoint) ||
624 ((mountpoint == NULL) && MNT_SHARED_WRITES(be_lun->vn->v_mount)))
625 lock_flags = LK_SHARED;
627 lock_flags = LK_EXCLUSIVE;
628 vn_lock(be_lun->vn, lock_flags | LK_RETRY);
629 error = VOP_FSYNC(be_lun->vn, beio->io_arg ? MNT_NOWAIT : MNT_WAIT,
631 VOP_UNLOCK(be_lun->vn);
633 vn_finished_write(mountpoint);
635 mtx_lock(&be_lun->io_lock);
636 devstat_end_transaction(beio->lun->disk_stats, beio->io_len,
637 beio->ds_tag_type, beio->ds_trans_type,
638 /*now*/ NULL, /*then*/&beio->ds_t0);
639 mtx_unlock(&be_lun->io_lock);
642 ctl_set_success(&io->scsiio);
644 /* XXX KDM is there is a better error here? */
645 ctl_set_internal_failure(&io->scsiio,
647 /*retry_count*/ 0xbad1);
650 ctl_complete_beio(beio);
653 SDT_PROBE_DEFINE1(cbb, , read, file_start, "uint64_t");
654 SDT_PROBE_DEFINE1(cbb, , write, file_start, "uint64_t");
655 SDT_PROBE_DEFINE1(cbb, , read, file_done,"uint64_t");
656 SDT_PROBE_DEFINE1(cbb, , write, file_done, "uint64_t");
659 ctl_be_block_dispatch_file(struct ctl_be_block_lun *be_lun,
660 struct ctl_be_block_io *beio)
662 struct ctl_be_block_filedata *file_data;
665 struct iovec *xiovec;
669 DPRINTF("entered\n");
671 file_data = &be_lun->backend.file;
674 if (ARGS(io)->flags & CTL_LLF_DPO)
676 if (beio->bio_cmd == BIO_WRITE && ARGS(io)->flags & CTL_LLF_FUA)
679 bzero(&xuio, sizeof(xuio));
680 if (beio->bio_cmd == BIO_READ) {
681 SDT_PROBE0(cbb, , read, file_start);
682 xuio.uio_rw = UIO_READ;
684 SDT_PROBE0(cbb, , write, file_start);
685 xuio.uio_rw = UIO_WRITE;
687 xuio.uio_offset = beio->io_offset;
688 xuio.uio_resid = beio->io_len;
689 xuio.uio_segflg = UIO_SYSSPACE;
690 xuio.uio_iov = beio->xiovecs;
691 xuio.uio_iovcnt = beio->num_segs;
692 xuio.uio_td = curthread;
694 for (i = 0, xiovec = xuio.uio_iov; i < xuio.uio_iovcnt; i++, xiovec++) {
695 xiovec->iov_base = beio->sg_segs[i].addr;
696 xiovec->iov_len = beio->sg_segs[i].len;
699 binuptime(&beio->ds_t0);
700 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0);
702 if (beio->bio_cmd == BIO_READ) {
703 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY);
706 * UFS pays attention to IO_DIRECT for reads. If the
707 * DIRECTIO option is configured into the kernel, it calls
708 * ffs_rawread(). But that only works for single-segment
709 * uios with user space addresses. In our case, with a
710 * kernel uio, it still reads into the buffer cache, but it
711 * will just try to release the buffer from the cache later
714 * ZFS does not pay attention to IO_DIRECT for reads.
716 * UFS does not pay attention to IO_SYNC for reads.
718 * ZFS pays attention to IO_SYNC (which translates into the
719 * Solaris define FRSYNC for zfs_read()) for reads. It
720 * attempts to sync the file before reading.
722 error = VOP_READ(be_lun->vn, &xuio, flags, file_data->cred);
724 VOP_UNLOCK(be_lun->vn);
725 SDT_PROBE0(cbb, , read, file_done);
726 if (error == 0 && xuio.uio_resid > 0) {
728 * If we red less then requested (EOF), then
729 * we should clean the rest of the buffer.
731 s = beio->io_len - xuio.uio_resid;
732 for (i = 0; i < beio->num_segs; i++) {
733 if (s >= beio->sg_segs[i].len) {
734 s -= beio->sg_segs[i].len;
737 bzero((uint8_t *)beio->sg_segs[i].addr + s,
738 beio->sg_segs[i].len - s);
743 struct mount *mountpoint;
746 (void)vn_start_write(be_lun->vn, &mountpoint, V_WAIT);
748 if (MNT_SHARED_WRITES(mountpoint) || ((mountpoint == NULL)
749 && MNT_SHARED_WRITES(be_lun->vn->v_mount)))
750 lock_flags = LK_SHARED;
752 lock_flags = LK_EXCLUSIVE;
753 vn_lock(be_lun->vn, lock_flags | LK_RETRY);
756 * UFS pays attention to IO_DIRECT for writes. The write
757 * is done asynchronously. (Normally the write would just
758 * get put into cache.
760 * UFS pays attention to IO_SYNC for writes. It will
761 * attempt to write the buffer out synchronously if that
764 * ZFS does not pay attention to IO_DIRECT for writes.
766 * ZFS pays attention to IO_SYNC (a.k.a. FSYNC or FRSYNC)
767 * for writes. It will flush the transaction from the
768 * cache before returning.
770 error = VOP_WRITE(be_lun->vn, &xuio, flags, file_data->cred);
771 VOP_UNLOCK(be_lun->vn);
773 vn_finished_write(mountpoint);
774 SDT_PROBE0(cbb, , write, file_done);
777 mtx_lock(&be_lun->io_lock);
778 devstat_end_transaction(beio->lun->disk_stats, beio->io_len,
779 beio->ds_tag_type, beio->ds_trans_type,
780 /*now*/ NULL, /*then*/&beio->ds_t0);
781 mtx_unlock(&be_lun->io_lock);
784 * If we got an error, set the sense data to "MEDIUM ERROR" and
785 * return the I/O to the user.
788 if (error == ENOSPC || error == EDQUOT) {
789 ctl_set_space_alloc_fail(&io->scsiio);
790 } else if (error == EROFS || error == EACCES) {
791 ctl_set_hw_write_protected(&io->scsiio);
793 ctl_set_medium_error(&io->scsiio,
794 beio->bio_cmd == BIO_READ);
796 ctl_complete_beio(beio);
801 * If this is a write or a verify, we're all done.
802 * If this is a read, we can now send the data to the user.
804 if ((beio->bio_cmd == BIO_WRITE) ||
805 (ARGS(io)->flags & CTL_LLF_VERIFY)) {
806 ctl_set_success(&io->scsiio);
807 ctl_complete_beio(beio);
809 if ((ARGS(io)->flags & CTL_LLF_READ) &&
810 beio->beio_cont == NULL) {
811 ctl_set_success(&io->scsiio);
815 getbinuptime(&io->io_hdr.dma_start_bt);
822 ctl_be_block_gls_file(struct ctl_be_block_lun *be_lun,
823 struct ctl_be_block_io *beio)
825 union ctl_io *io = beio->io;
826 struct ctl_lba_len_flags *lbalen = ARGS(io);
827 struct scsi_get_lba_status_data *data;
831 DPRINTF("entered\n");
833 off = roff = ((off_t)lbalen->lba) * be_lun->cbe_lun.blocksize;
834 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY);
835 error = VOP_IOCTL(be_lun->vn, FIOSEEKHOLE, &off,
836 0, curthread->td_ucred, curthread);
837 if (error == 0 && off > roff)
838 status = 0; /* mapped up to off */
840 error = VOP_IOCTL(be_lun->vn, FIOSEEKDATA, &off,
841 0, curthread->td_ucred, curthread);
842 if (error == 0 && off > roff)
843 status = 1; /* deallocated up to off */
845 status = 0; /* unknown up to the end */
846 off = be_lun->size_bytes;
849 VOP_UNLOCK(be_lun->vn);
851 data = (struct scsi_get_lba_status_data *)io->scsiio.kern_data_ptr;
852 scsi_u64to8b(lbalen->lba, data->descr[0].addr);
853 scsi_ulto4b(MIN(UINT32_MAX, off / be_lun->cbe_lun.blocksize -
854 lbalen->lba), data->descr[0].length);
855 data->descr[0].status = status;
857 ctl_complete_beio(beio);
861 ctl_be_block_getattr_file(struct ctl_be_block_lun *be_lun, const char *attrname)
864 struct statfs statfs;
869 if (be_lun->vn == NULL)
871 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY);
872 if (strcmp(attrname, "blocksused") == 0) {
873 error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred);
875 val = vattr.va_bytes / be_lun->cbe_lun.blocksize;
877 if (strcmp(attrname, "blocksavail") == 0 &&
878 !VN_IS_DOOMED(be_lun->vn)) {
879 error = VFS_STATFS(be_lun->vn->v_mount, &statfs);
881 val = statfs.f_bavail * statfs.f_bsize /
882 be_lun->cbe_lun.blocksize;
884 VOP_UNLOCK(be_lun->vn);
889 ctl_be_block_dispatch_zvol(struct ctl_be_block_lun *be_lun,
890 struct ctl_be_block_io *beio)
896 struct iovec *xiovec;
897 int error, flags, i, ref;
899 DPRINTF("entered\n");
903 if (ARGS(io)->flags & CTL_LLF_DPO)
905 if (beio->bio_cmd == BIO_WRITE && ARGS(io)->flags & CTL_LLF_FUA)
908 bzero(&xuio, sizeof(xuio));
909 if (beio->bio_cmd == BIO_READ) {
910 SDT_PROBE0(cbb, , read, file_start);
911 xuio.uio_rw = UIO_READ;
913 SDT_PROBE0(cbb, , write, file_start);
914 xuio.uio_rw = UIO_WRITE;
916 xuio.uio_offset = beio->io_offset;
917 xuio.uio_resid = beio->io_len;
918 xuio.uio_segflg = UIO_SYSSPACE;
919 xuio.uio_iov = beio->xiovecs;
920 xuio.uio_iovcnt = beio->num_segs;
921 xuio.uio_td = curthread;
923 for (i = 0, xiovec = xuio.uio_iov; i < xuio.uio_iovcnt; i++, xiovec++) {
924 xiovec->iov_base = beio->sg_segs[i].addr;
925 xiovec->iov_len = beio->sg_segs[i].len;
928 binuptime(&beio->ds_t0);
929 devstat_start_transaction(beio->lun->disk_stats, &beio->ds_t0);
931 csw = devvn_refthread(be_lun->vn, &dev, &ref);
933 if (beio->bio_cmd == BIO_READ)
934 error = csw->d_read(dev, &xuio, flags);
936 error = csw->d_write(dev, &xuio, flags);
937 dev_relthread(dev, ref);
941 if (beio->bio_cmd == BIO_READ)
942 SDT_PROBE0(cbb, , read, file_done);
944 SDT_PROBE0(cbb, , write, file_done);
946 mtx_lock(&be_lun->io_lock);
947 devstat_end_transaction(beio->lun->disk_stats, beio->io_len,
948 beio->ds_tag_type, beio->ds_trans_type,
949 /*now*/ NULL, /*then*/&beio->ds_t0);
950 mtx_unlock(&be_lun->io_lock);
953 * If we got an error, set the sense data to "MEDIUM ERROR" and
954 * return the I/O to the user.
957 if (error == ENOSPC || error == EDQUOT) {
958 ctl_set_space_alloc_fail(&io->scsiio);
959 } else if (error == EROFS || error == EACCES) {
960 ctl_set_hw_write_protected(&io->scsiio);
962 ctl_set_medium_error(&io->scsiio,
963 beio->bio_cmd == BIO_READ);
965 ctl_complete_beio(beio);
970 * If this is a write or a verify, we're all done.
971 * If this is a read, we can now send the data to the user.
973 if ((beio->bio_cmd == BIO_WRITE) ||
974 (ARGS(io)->flags & CTL_LLF_VERIFY)) {
975 ctl_set_success(&io->scsiio);
976 ctl_complete_beio(beio);
978 if ((ARGS(io)->flags & CTL_LLF_READ) &&
979 beio->beio_cont == NULL) {
980 ctl_set_success(&io->scsiio);
984 getbinuptime(&io->io_hdr.dma_start_bt);
991 ctl_be_block_gls_zvol(struct ctl_be_block_lun *be_lun,
992 struct ctl_be_block_io *beio)
994 union ctl_io *io = beio->io;
997 struct ctl_lba_len_flags *lbalen = ARGS(io);
998 struct scsi_get_lba_status_data *data;
1000 int error, ref, status;
1002 DPRINTF("entered\n");
1004 csw = devvn_refthread(be_lun->vn, &dev, &ref);
1006 status = 0; /* unknown up to the end */
1007 off = be_lun->size_bytes;
1010 off = roff = ((off_t)lbalen->lba) * be_lun->cbe_lun.blocksize;
1011 error = csw->d_ioctl(dev, FIOSEEKHOLE, (caddr_t)&off, FREAD,
1013 if (error == 0 && off > roff)
1014 status = 0; /* mapped up to off */
1016 error = csw->d_ioctl(dev, FIOSEEKDATA, (caddr_t)&off, FREAD,
1018 if (error == 0 && off > roff)
1019 status = 1; /* deallocated up to off */
1021 status = 0; /* unknown up to the end */
1022 off = be_lun->size_bytes;
1025 dev_relthread(dev, ref);
1028 data = (struct scsi_get_lba_status_data *)io->scsiio.kern_data_ptr;
1029 scsi_u64to8b(lbalen->lba, data->descr[0].addr);
1030 scsi_ulto4b(MIN(UINT32_MAX, off / be_lun->cbe_lun.blocksize -
1031 lbalen->lba), data->descr[0].length);
1032 data->descr[0].status = status;
1034 ctl_complete_beio(beio);
1038 ctl_be_block_flush_dev(struct ctl_be_block_lun *be_lun,
1039 struct ctl_be_block_io *beio)
1046 DPRINTF("entered\n");
1048 /* This can't fail, it's a blocking allocation. */
1049 bio = g_alloc_bio();
1051 bio->bio_cmd = BIO_FLUSH;
1052 bio->bio_offset = 0;
1054 bio->bio_done = ctl_be_block_biodone;
1055 bio->bio_caller1 = beio;
1056 bio->bio_pblkno = 0;
1059 * We don't need to acquire the LUN lock here, because we are only
1060 * sending one bio, and so there is no other context to synchronize
1063 beio->num_bios_sent = 1;
1064 beio->send_complete = 1;
1066 binuptime(&beio->ds_t0);
1067 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0);
1069 csw = devvn_refthread(be_lun->vn, &dev, &ref);
1072 csw->d_strategy(bio);
1073 dev_relthread(dev, ref);
1075 bio->bio_error = ENXIO;
1076 ctl_be_block_biodone(bio);
1081 ctl_be_block_unmap_dev_range(struct ctl_be_block_lun *be_lun,
1082 struct ctl_be_block_io *beio,
1083 uint64_t off, uint64_t len, int last)
1091 csw = devvn_refthread(be_lun->vn, &dev, &ref);
1092 maxlen = LONG_MAX - (LONG_MAX % be_lun->cbe_lun.blocksize);
1094 bio = g_alloc_bio();
1095 bio->bio_cmd = BIO_DELETE;
1097 bio->bio_offset = off;
1098 bio->bio_length = MIN(len, maxlen);
1100 bio->bio_done = ctl_be_block_biodone;
1101 bio->bio_caller1 = beio;
1102 bio->bio_pblkno = off / be_lun->cbe_lun.blocksize;
1104 off += bio->bio_length;
1105 len -= bio->bio_length;
1107 mtx_lock(&be_lun->io_lock);
1108 beio->num_bios_sent++;
1109 if (last && len == 0)
1110 beio->send_complete = 1;
1111 mtx_unlock(&be_lun->io_lock);
1114 csw->d_strategy(bio);
1116 bio->bio_error = ENXIO;
1117 ctl_be_block_biodone(bio);
1121 dev_relthread(dev, ref);
1125 ctl_be_block_unmap_dev(struct ctl_be_block_lun *be_lun,
1126 struct ctl_be_block_io *beio)
1129 struct ctl_ptr_len_flags *ptrlen;
1130 struct scsi_unmap_desc *buf, *end;
1135 DPRINTF("entered\n");
1137 binuptime(&beio->ds_t0);
1138 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0);
1140 if (beio->io_offset == -1) {
1142 ptrlen = (struct ctl_ptr_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
1143 buf = (struct scsi_unmap_desc *)ptrlen->ptr;
1144 end = buf + ptrlen->len / sizeof(*buf);
1145 for (; buf < end; buf++) {
1146 len = (uint64_t)scsi_4btoul(buf->length) *
1147 be_lun->cbe_lun.blocksize;
1148 beio->io_len += len;
1149 ctl_be_block_unmap_dev_range(be_lun, beio,
1150 scsi_8btou64(buf->lba) * be_lun->cbe_lun.blocksize,
1151 len, (end - buf < 2) ? TRUE : FALSE);
1154 ctl_be_block_unmap_dev_range(be_lun, beio,
1155 beio->io_offset, beio->io_len, TRUE);
1159 ctl_be_block_dispatch_dev(struct ctl_be_block_lun *be_lun,
1160 struct ctl_be_block_io *beio)
1162 TAILQ_HEAD(, bio) queue = TAILQ_HEAD_INITIALIZER(queue);
1167 int i, max_iosize, ref;
1169 DPRINTF("entered\n");
1170 csw = devvn_refthread(be_lun->vn, &dev, &ref);
1173 * We have to limit our I/O size to the maximum supported by the
1177 max_iosize = dev->si_iosize_max;
1178 if (max_iosize < PAGE_SIZE)
1179 max_iosize = DFLTPHYS;
1181 max_iosize = DFLTPHYS;
1183 cur_offset = beio->io_offset;
1184 for (i = 0; i < beio->num_segs; i++) {
1188 cur_size = beio->sg_segs[i].len;
1189 cur_ptr = beio->sg_segs[i].addr;
1191 while (cur_size > 0) {
1192 /* This can't fail, it's a blocking allocation. */
1193 bio = g_alloc_bio();
1195 KASSERT(bio != NULL, ("g_alloc_bio() failed!\n"));
1197 bio->bio_cmd = beio->bio_cmd;
1199 bio->bio_caller1 = beio;
1200 bio->bio_length = min(cur_size, max_iosize);
1201 bio->bio_offset = cur_offset;
1202 bio->bio_data = cur_ptr;
1203 bio->bio_done = ctl_be_block_biodone;
1204 bio->bio_pblkno = cur_offset / be_lun->cbe_lun.blocksize;
1206 cur_offset += bio->bio_length;
1207 cur_ptr += bio->bio_length;
1208 cur_size -= bio->bio_length;
1210 TAILQ_INSERT_TAIL(&queue, bio, bio_queue);
1211 beio->num_bios_sent++;
1214 beio->send_complete = 1;
1215 binuptime(&beio->ds_t0);
1216 devstat_start_transaction(be_lun->disk_stats, &beio->ds_t0);
1219 * Fire off all allocated requests!
1221 while ((bio = TAILQ_FIRST(&queue)) != NULL) {
1222 TAILQ_REMOVE(&queue, bio, bio_queue);
1224 csw->d_strategy(bio);
1226 bio->bio_error = ENXIO;
1227 ctl_be_block_biodone(bio);
1231 dev_relthread(dev, ref);
1235 ctl_be_block_getattr_dev(struct ctl_be_block_lun *be_lun, const char *attrname)
1237 struct diocgattr_arg arg;
1242 csw = devvn_refthread(be_lun->vn, &dev, &ref);
1244 return (UINT64_MAX);
1245 strlcpy(arg.name, attrname, sizeof(arg.name));
1246 arg.len = sizeof(arg.value.off);
1248 error = csw->d_ioctl(dev, DIOCGATTR, (caddr_t)&arg, FREAD,
1252 dev_relthread(dev, ref);
1254 return (UINT64_MAX);
1255 return (arg.value.off);
1259 ctl_be_block_cw_dispatch_sync(struct ctl_be_block_lun *be_lun,
1262 struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun;
1263 struct ctl_be_block_io *beio;
1264 struct ctl_lba_len_flags *lbalen;
1266 DPRINTF("entered\n");
1267 beio = (struct ctl_be_block_io *)PRIV(io)->ptr;
1268 lbalen = (struct ctl_lba_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
1270 beio->io_len = lbalen->len * cbe_lun->blocksize;
1271 beio->io_offset = lbalen->lba * cbe_lun->blocksize;
1272 beio->io_arg = (lbalen->flags & SSC_IMMED) != 0;
1273 beio->bio_cmd = BIO_FLUSH;
1274 beio->ds_trans_type = DEVSTAT_NO_DATA;
1276 be_lun->lun_flush(be_lun, beio);
1280 ctl_be_block_cw_done_ws(struct ctl_be_block_io *beio)
1285 ctl_free_beio(beio);
1286 if ((io->io_hdr.flags & CTL_FLAG_ABORT) ||
1287 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE &&
1288 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) {
1289 ctl_config_write_done(io);
1293 ctl_be_block_config_write(io);
1297 ctl_be_block_cw_dispatch_ws(struct ctl_be_block_lun *be_lun,
1300 struct ctl_be_block_softc *softc = be_lun->softc;
1301 struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun;
1302 struct ctl_be_block_io *beio;
1303 struct ctl_lba_len_flags *lbalen;
1304 uint64_t len_left, lba;
1305 uint32_t pb, pbo, adj;
1309 DPRINTF("entered\n");
1311 beio = (struct ctl_be_block_io *)PRIV(io)->ptr;
1312 lbalen = ARGS(beio->io);
1314 if (lbalen->flags & ~(SWS_LBDATA | SWS_UNMAP | SWS_ANCHOR | SWS_NDOB) ||
1315 (lbalen->flags & (SWS_UNMAP | SWS_ANCHOR) && be_lun->unmap == NULL)) {
1316 ctl_free_beio(beio);
1317 ctl_set_invalid_field(&io->scsiio,
1323 ctl_config_write_done(io);
1327 if (lbalen->flags & (SWS_UNMAP | SWS_ANCHOR)) {
1328 beio->io_offset = lbalen->lba * cbe_lun->blocksize;
1329 beio->io_len = (uint64_t)lbalen->len * cbe_lun->blocksize;
1330 beio->bio_cmd = BIO_DELETE;
1331 beio->ds_trans_type = DEVSTAT_FREE;
1333 be_lun->unmap(be_lun, beio);
1337 beio->bio_cmd = BIO_WRITE;
1338 beio->ds_trans_type = DEVSTAT_WRITE;
1340 DPRINTF("WRITE SAME at LBA %jx len %u\n",
1341 (uintmax_t)lbalen->lba, lbalen->len);
1343 pb = cbe_lun->blocksize << be_lun->cbe_lun.pblockexp;
1344 if (be_lun->cbe_lun.pblockoff > 0)
1345 pbo = pb - cbe_lun->blocksize * be_lun->cbe_lun.pblockoff;
1348 len_left = (uint64_t)lbalen->len * cbe_lun->blocksize;
1349 for (i = 0, lba = 0; i < CTLBLK_NUM_SEGS && len_left > 0; i++) {
1351 * Setup the S/G entry for this chunk.
1353 seglen = MIN(CTLBLK_MAX_SEG, len_left);
1354 if (pb > cbe_lun->blocksize) {
1355 adj = ((lbalen->lba + lba) * cbe_lun->blocksize +
1360 seglen -= seglen % cbe_lun->blocksize;
1362 seglen -= seglen % cbe_lun->blocksize;
1363 ctl_alloc_seg(softc, &beio->sg_segs[i], seglen);
1365 DPRINTF("segment %d addr %p len %zd\n", i,
1366 beio->sg_segs[i].addr, beio->sg_segs[i].len);
1371 buf = beio->sg_segs[i].addr;
1373 for (; buf < end; buf += cbe_lun->blocksize) {
1374 if (lbalen->flags & SWS_NDOB) {
1375 memset(buf, 0, cbe_lun->blocksize);
1377 memcpy(buf, io->scsiio.kern_data_ptr,
1378 cbe_lun->blocksize);
1380 if (lbalen->flags & SWS_LBDATA)
1381 scsi_ulto4b(lbalen->lba + lba, buf);
1386 beio->io_offset = lbalen->lba * cbe_lun->blocksize;
1387 beio->io_len = lba * cbe_lun->blocksize;
1389 /* We can not do all in one run. Correct and schedule rerun. */
1393 beio->beio_cont = ctl_be_block_cw_done_ws;
1396 be_lun->dispatch(be_lun, beio);
1400 ctl_be_block_cw_dispatch_unmap(struct ctl_be_block_lun *be_lun,
1403 struct ctl_be_block_io *beio;
1404 struct ctl_ptr_len_flags *ptrlen;
1406 DPRINTF("entered\n");
1408 beio = (struct ctl_be_block_io *)PRIV(io)->ptr;
1409 ptrlen = (struct ctl_ptr_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
1411 if ((ptrlen->flags & ~SU_ANCHOR) != 0 || be_lun->unmap == NULL) {
1412 ctl_free_beio(beio);
1413 ctl_set_invalid_field(&io->scsiio,
1419 ctl_config_write_done(io);
1424 beio->io_offset = -1;
1425 beio->bio_cmd = BIO_DELETE;
1426 beio->ds_trans_type = DEVSTAT_FREE;
1428 be_lun->unmap(be_lun, beio);
1432 ctl_be_block_cr_done(struct ctl_be_block_io *beio)
1437 ctl_free_beio(beio);
1438 ctl_config_read_done(io);
1442 ctl_be_block_cr_dispatch(struct ctl_be_block_lun *be_lun,
1445 struct ctl_be_block_io *beio;
1446 struct ctl_be_block_softc *softc;
1448 DPRINTF("entered\n");
1450 softc = be_lun->softc;
1451 beio = ctl_alloc_beio(softc);
1454 beio->beio_cont = ctl_be_block_cr_done;
1455 PRIV(io)->ptr = (void *)beio;
1457 switch (io->scsiio.cdb[0]) {
1458 case SERVICE_ACTION_IN: /* GET LBA STATUS */
1460 beio->ds_trans_type = DEVSTAT_NO_DATA;
1461 beio->ds_tag_type = DEVSTAT_TAG_ORDERED;
1463 if (be_lun->get_lba_status)
1464 be_lun->get_lba_status(be_lun, beio);
1466 ctl_be_block_cr_done(beio);
1469 panic("Unhandled CDB type %#x", io->scsiio.cdb[0]);
1475 ctl_be_block_cw_done(struct ctl_be_block_io *beio)
1480 ctl_free_beio(beio);
1481 ctl_config_write_done(io);
1485 ctl_be_block_cw_dispatch(struct ctl_be_block_lun *be_lun,
1488 struct ctl_be_block_io *beio;
1489 struct ctl_be_block_softc *softc;
1491 DPRINTF("entered\n");
1493 softc = be_lun->softc;
1494 beio = ctl_alloc_beio(softc);
1497 beio->beio_cont = ctl_be_block_cw_done;
1498 switch (io->scsiio.tag_type) {
1499 case CTL_TAG_ORDERED:
1500 beio->ds_tag_type = DEVSTAT_TAG_ORDERED;
1502 case CTL_TAG_HEAD_OF_QUEUE:
1503 beio->ds_tag_type = DEVSTAT_TAG_HEAD;
1505 case CTL_TAG_UNTAGGED:
1506 case CTL_TAG_SIMPLE:
1509 beio->ds_tag_type = DEVSTAT_TAG_SIMPLE;
1512 PRIV(io)->ptr = (void *)beio;
1514 switch (io->scsiio.cdb[0]) {
1515 case SYNCHRONIZE_CACHE:
1516 case SYNCHRONIZE_CACHE_16:
1517 ctl_be_block_cw_dispatch_sync(be_lun, io);
1521 ctl_be_block_cw_dispatch_ws(be_lun, io);
1524 ctl_be_block_cw_dispatch_unmap(be_lun, io);
1527 panic("Unhandled CDB type %#x", io->scsiio.cdb[0]);
1532 SDT_PROBE_DEFINE1(cbb, , read, start, "uint64_t");
1533 SDT_PROBE_DEFINE1(cbb, , write, start, "uint64_t");
1534 SDT_PROBE_DEFINE1(cbb, , read, alloc_done, "uint64_t");
1535 SDT_PROBE_DEFINE1(cbb, , write, alloc_done, "uint64_t");
1538 ctl_be_block_next(struct ctl_be_block_io *beio)
1540 struct ctl_be_block_lun *be_lun;
1545 ctl_free_beio(beio);
1546 if ((io->io_hdr.flags & CTL_FLAG_ABORT) ||
1547 ((io->io_hdr.status & CTL_STATUS_MASK) != CTL_STATUS_NONE &&
1548 (io->io_hdr.status & CTL_STATUS_MASK) != CTL_SUCCESS)) {
1549 ctl_data_submit_done(io);
1553 io->io_hdr.status &= ~CTL_STATUS_MASK;
1554 io->io_hdr.status |= CTL_STATUS_NONE;
1556 mtx_lock(&be_lun->queue_lock);
1557 STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links);
1558 mtx_unlock(&be_lun->queue_lock);
1559 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task);
1563 ctl_be_block_dispatch(struct ctl_be_block_lun *be_lun,
1566 struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun;
1567 struct ctl_be_block_io *beio;
1568 struct ctl_be_block_softc *softc;
1569 struct ctl_lba_len_flags *lbalen;
1570 struct ctl_ptr_len_flags *bptrlen;
1571 uint64_t len_left, lbas;
1574 softc = be_lun->softc;
1576 DPRINTF("entered\n");
1579 if (lbalen->flags & CTL_LLF_WRITE) {
1580 SDT_PROBE0(cbb, , write, start);
1582 SDT_PROBE0(cbb, , read, start);
1585 beio = ctl_alloc_beio(softc);
1589 bptrlen->ptr = (void *)beio;
1591 switch (io->scsiio.tag_type) {
1592 case CTL_TAG_ORDERED:
1593 beio->ds_tag_type = DEVSTAT_TAG_ORDERED;
1595 case CTL_TAG_HEAD_OF_QUEUE:
1596 beio->ds_tag_type = DEVSTAT_TAG_HEAD;
1598 case CTL_TAG_UNTAGGED:
1599 case CTL_TAG_SIMPLE:
1602 beio->ds_tag_type = DEVSTAT_TAG_SIMPLE;
1606 if (lbalen->flags & CTL_LLF_WRITE) {
1607 beio->bio_cmd = BIO_WRITE;
1608 beio->ds_trans_type = DEVSTAT_WRITE;
1610 beio->bio_cmd = BIO_READ;
1611 beio->ds_trans_type = DEVSTAT_READ;
1614 DPRINTF("%s at LBA %jx len %u @%ju\n",
1615 (beio->bio_cmd == BIO_READ) ? "READ" : "WRITE",
1616 (uintmax_t)lbalen->lba, lbalen->len, bptrlen->len);
1617 if (lbalen->flags & CTL_LLF_COMPARE) {
1618 beio->two_sglists = 1;
1619 lbas = CTLBLK_HALF_IO_SIZE;
1621 lbas = CTLBLK_MAX_IO_SIZE;
1623 lbas = MIN(lbalen->len - bptrlen->len, lbas / cbe_lun->blocksize);
1624 beio->io_offset = (lbalen->lba + bptrlen->len) * cbe_lun->blocksize;
1625 beio->io_len = lbas * cbe_lun->blocksize;
1626 bptrlen->len += lbas;
1628 for (i = 0, len_left = beio->io_len; len_left > 0; i++) {
1629 KASSERT(i < CTLBLK_MAX_SEGS, ("Too many segs (%d >= %d)",
1630 i, CTLBLK_MAX_SEGS));
1633 * Setup the S/G entry for this chunk.
1635 ctl_alloc_seg(softc, &beio->sg_segs[i],
1636 MIN(CTLBLK_MAX_SEG, len_left));
1638 DPRINTF("segment %d addr %p len %zd\n", i,
1639 beio->sg_segs[i].addr, beio->sg_segs[i].len);
1641 /* Set up second segment for compare operation. */
1642 if (beio->two_sglists) {
1643 ctl_alloc_seg(softc,
1644 &beio->sg_segs[i + CTLBLK_HALF_SEGS],
1645 beio->sg_segs[i].len);
1649 len_left -= beio->sg_segs[i].len;
1651 if (bptrlen->len < lbalen->len)
1652 beio->beio_cont = ctl_be_block_next;
1653 io->scsiio.be_move_done = ctl_be_block_move_done;
1654 /* For compare we have separate S/G lists for read and datamove. */
1655 if (beio->two_sglists)
1656 io->scsiio.kern_data_ptr = (uint8_t *)&beio->sg_segs[CTLBLK_HALF_SEGS];
1658 io->scsiio.kern_data_ptr = (uint8_t *)beio->sg_segs;
1659 io->scsiio.kern_data_len = beio->io_len;
1660 io->scsiio.kern_sg_entries = beio->num_segs;
1661 io->scsiio.kern_data_ref = ctl_refcnt_beio;
1662 io->scsiio.kern_data_arg = beio;
1663 io->io_hdr.flags |= CTL_FLAG_ALLOCATED;
1666 * For the read case, we need to read the data into our buffers and
1667 * then we can send it back to the user. For the write case, we
1668 * need to get the data from the user first.
1670 if (beio->bio_cmd == BIO_READ) {
1671 SDT_PROBE0(cbb, , read, alloc_done);
1672 be_lun->dispatch(be_lun, beio);
1674 SDT_PROBE0(cbb, , write, alloc_done);
1676 getbinuptime(&io->io_hdr.dma_start_bt);
1683 ctl_be_block_worker(void *context, int pending)
1685 struct ctl_be_block_lun *be_lun = (struct ctl_be_block_lun *)context;
1686 struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun;
1688 struct ctl_be_block_io *beio;
1690 DPRINTF("entered\n");
1692 * Fetch and process I/Os from all queues. If we detect LUN
1693 * CTL_LUN_FLAG_NO_MEDIA status here -- it is result of a race,
1694 * so make response maximally opaque to not confuse initiator.
1697 mtx_lock(&be_lun->queue_lock);
1698 io = (union ctl_io *)STAILQ_FIRST(&be_lun->datamove_queue);
1700 DPRINTF("datamove queue\n");
1701 STAILQ_REMOVE(&be_lun->datamove_queue, &io->io_hdr,
1703 mtx_unlock(&be_lun->queue_lock);
1704 beio = (struct ctl_be_block_io *)PRIV(io)->ptr;
1705 if (cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) {
1706 ctl_set_busy(&io->scsiio);
1707 ctl_complete_beio(beio);
1710 be_lun->dispatch(be_lun, beio);
1713 io = (union ctl_io *)STAILQ_FIRST(&be_lun->config_write_queue);
1715 DPRINTF("config write queue\n");
1716 STAILQ_REMOVE(&be_lun->config_write_queue, &io->io_hdr,
1718 mtx_unlock(&be_lun->queue_lock);
1719 if (cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) {
1720 ctl_set_busy(&io->scsiio);
1721 ctl_config_write_done(io);
1724 ctl_be_block_cw_dispatch(be_lun, io);
1727 io = (union ctl_io *)STAILQ_FIRST(&be_lun->config_read_queue);
1729 DPRINTF("config read queue\n");
1730 STAILQ_REMOVE(&be_lun->config_read_queue, &io->io_hdr,
1732 mtx_unlock(&be_lun->queue_lock);
1733 if (cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) {
1734 ctl_set_busy(&io->scsiio);
1735 ctl_config_read_done(io);
1738 ctl_be_block_cr_dispatch(be_lun, io);
1741 io = (union ctl_io *)STAILQ_FIRST(&be_lun->input_queue);
1743 DPRINTF("input queue\n");
1744 STAILQ_REMOVE(&be_lun->input_queue, &io->io_hdr,
1746 mtx_unlock(&be_lun->queue_lock);
1747 if (cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) {
1748 ctl_set_busy(&io->scsiio);
1749 ctl_data_submit_done(io);
1752 ctl_be_block_dispatch(be_lun, io);
1757 * If we get here, there is no work left in the queues, so
1758 * just break out and let the task queue go to sleep.
1760 mtx_unlock(&be_lun->queue_lock);
1766 * Entry point from CTL to the backend for I/O. We queue everything to a
1767 * work thread, so this just puts the I/O on a queue and wakes up the
1771 ctl_be_block_submit(union ctl_io *io)
1773 struct ctl_be_block_lun *be_lun;
1775 DPRINTF("entered\n");
1777 be_lun = (struct ctl_be_block_lun *)CTL_BACKEND_LUN(io);
1780 * Make sure we only get SCSI I/O.
1782 KASSERT(io->io_hdr.io_type == CTL_IO_SCSI, ("Non-SCSI I/O (type "
1783 "%#x) encountered", io->io_hdr.io_type));
1787 mtx_lock(&be_lun->queue_lock);
1788 STAILQ_INSERT_TAIL(&be_lun->input_queue, &io->io_hdr, links);
1789 mtx_unlock(&be_lun->queue_lock);
1790 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task);
1792 return (CTL_RETVAL_COMPLETE);
1796 ctl_be_block_ioctl(struct cdev *dev, u_long cmd, caddr_t addr,
1797 int flag, struct thread *td)
1799 struct ctl_be_block_softc *softc = &backend_block_softc;
1805 struct ctl_lun_req *lun_req;
1807 lun_req = (struct ctl_lun_req *)addr;
1809 switch (lun_req->reqtype) {
1810 case CTL_LUNREQ_CREATE:
1811 error = ctl_be_block_create(softc, lun_req);
1814 error = ctl_be_block_rm(softc, lun_req);
1816 case CTL_LUNREQ_MODIFY:
1817 error = ctl_be_block_modify(softc, lun_req);
1820 lun_req->status = CTL_LUN_ERROR;
1821 snprintf(lun_req->error_str, sizeof(lun_req->error_str),
1822 "invalid LUN request type %d",
1837 ctl_be_block_open_file(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req)
1839 struct ctl_be_lun *cbe_lun;
1840 struct ctl_be_block_filedata *file_data;
1841 struct ctl_lun_create_params *params;
1844 off_t ps, pss, po, pos, us, uss, uo, uos;
1847 cbe_lun = &be_lun->cbe_lun;
1848 file_data = &be_lun->backend.file;
1849 params = &be_lun->params;
1851 be_lun->dev_type = CTL_BE_BLOCK_FILE;
1852 be_lun->dispatch = ctl_be_block_dispatch_file;
1853 be_lun->lun_flush = ctl_be_block_flush_file;
1854 be_lun->get_lba_status = ctl_be_block_gls_file;
1855 be_lun->getattr = ctl_be_block_getattr_file;
1856 be_lun->unmap = NULL;
1857 cbe_lun->flags &= ~CTL_LUN_FLAG_UNMAP;
1859 error = VOP_GETATTR(be_lun->vn, &vattr, curthread->td_ucred);
1861 snprintf(req->error_str, sizeof(req->error_str),
1862 "error calling VOP_GETATTR() for file %s",
1867 file_data->cred = crhold(curthread->td_ucred);
1868 if (params->lun_size_bytes != 0)
1869 be_lun->size_bytes = params->lun_size_bytes;
1871 be_lun->size_bytes = vattr.va_size;
1874 * For files we can use any logical block size. Prefer 512 bytes
1875 * for compatibility reasons. If file's vattr.va_blocksize
1876 * (preferred I/O block size) is bigger and multiple to chosen
1877 * logical block size -- report it as physical block size.
1879 if (params->blocksize_bytes != 0)
1880 cbe_lun->blocksize = params->blocksize_bytes;
1881 else if (cbe_lun->lun_type == T_CDROM)
1882 cbe_lun->blocksize = 2048;
1884 cbe_lun->blocksize = 512;
1885 be_lun->size_blocks = be_lun->size_bytes / cbe_lun->blocksize;
1886 cbe_lun->maxlba = (be_lun->size_blocks == 0) ?
1887 0 : (be_lun->size_blocks - 1);
1889 us = ps = vattr.va_blocksize;
1892 value = dnvlist_get_string(cbe_lun->options, "pblocksize", NULL);
1894 ctl_expand_number(value, &ps);
1895 value = dnvlist_get_string(cbe_lun->options, "pblockoffset", NULL);
1897 ctl_expand_number(value, &po);
1898 pss = ps / cbe_lun->blocksize;
1899 pos = po / cbe_lun->blocksize;
1900 if ((pss > 0) && (pss * cbe_lun->blocksize == ps) && (pss >= pos) &&
1901 ((pss & (pss - 1)) == 0) && (pos * cbe_lun->blocksize == po)) {
1902 cbe_lun->pblockexp = fls(pss) - 1;
1903 cbe_lun->pblockoff = (pss - pos) % pss;
1906 value = dnvlist_get_string(cbe_lun->options, "ublocksize", NULL);
1908 ctl_expand_number(value, &us);
1909 value = dnvlist_get_string(cbe_lun->options, "ublockoffset", NULL);
1911 ctl_expand_number(value, &uo);
1912 uss = us / cbe_lun->blocksize;
1913 uos = uo / cbe_lun->blocksize;
1914 if ((uss > 0) && (uss * cbe_lun->blocksize == us) && (uss >= uos) &&
1915 ((uss & (uss - 1)) == 0) && (uos * cbe_lun->blocksize == uo)) {
1916 cbe_lun->ublockexp = fls(uss) - 1;
1917 cbe_lun->ublockoff = (uss - uos) % uss;
1921 * Sanity check. The media size has to be at least one
1924 if (be_lun->size_bytes < cbe_lun->blocksize) {
1926 snprintf(req->error_str, sizeof(req->error_str),
1927 "file %s size %ju < block size %u", be_lun->dev_path,
1928 (uintmax_t)be_lun->size_bytes, cbe_lun->blocksize);
1931 cbe_lun->opttxferlen = CTLBLK_MAX_IO_SIZE / cbe_lun->blocksize;
1936 ctl_be_block_open_dev(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req)
1938 struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun;
1939 struct ctl_lun_create_params *params;
1943 int error, atomic, maxio, ref, unmap, tmp;
1944 off_t ps, pss, po, pos, us, uss, uo, uos, otmp;
1946 params = &be_lun->params;
1948 be_lun->dev_type = CTL_BE_BLOCK_DEV;
1949 csw = devvn_refthread(be_lun->vn, &dev, &ref);
1952 if (strcmp(csw->d_name, "zvol") == 0) {
1953 be_lun->dispatch = ctl_be_block_dispatch_zvol;
1954 be_lun->get_lba_status = ctl_be_block_gls_zvol;
1955 atomic = maxio = CTLBLK_MAX_IO_SIZE;
1957 be_lun->dispatch = ctl_be_block_dispatch_dev;
1958 be_lun->get_lba_status = NULL;
1960 maxio = dev->si_iosize_max;
1963 if (maxio > CTLBLK_MAX_SEG)
1964 maxio = CTLBLK_MAX_SEG;
1966 be_lun->lun_flush = ctl_be_block_flush_dev;
1967 be_lun->getattr = ctl_be_block_getattr_dev;
1968 be_lun->unmap = ctl_be_block_unmap_dev;
1970 if (!csw->d_ioctl) {
1971 dev_relthread(dev, ref);
1972 snprintf(req->error_str, sizeof(req->error_str),
1973 "no d_ioctl for device %s!", be_lun->dev_path);
1977 error = csw->d_ioctl(dev, DIOCGSECTORSIZE, (caddr_t)&tmp, FREAD,
1980 dev_relthread(dev, ref);
1981 snprintf(req->error_str, sizeof(req->error_str),
1982 "error %d returned for DIOCGSECTORSIZE ioctl "
1983 "on %s!", error, be_lun->dev_path);
1988 * If the user has asked for a blocksize that is greater than the
1989 * backing device's blocksize, we can do it only if the blocksize
1990 * the user is asking for is an even multiple of the underlying
1991 * device's blocksize.
1993 if ((params->blocksize_bytes != 0) &&
1994 (params->blocksize_bytes >= tmp)) {
1995 if (params->blocksize_bytes % tmp == 0) {
1996 cbe_lun->blocksize = params->blocksize_bytes;
1998 dev_relthread(dev, ref);
1999 snprintf(req->error_str, sizeof(req->error_str),
2000 "requested blocksize %u is not an even "
2001 "multiple of backing device blocksize %u",
2002 params->blocksize_bytes, tmp);
2005 } else if (params->blocksize_bytes != 0) {
2006 dev_relthread(dev, ref);
2007 snprintf(req->error_str, sizeof(req->error_str),
2008 "requested blocksize %u < backing device "
2009 "blocksize %u", params->blocksize_bytes, tmp);
2011 } else if (cbe_lun->lun_type == T_CDROM)
2012 cbe_lun->blocksize = MAX(tmp, 2048);
2014 cbe_lun->blocksize = tmp;
2016 error = csw->d_ioctl(dev, DIOCGMEDIASIZE, (caddr_t)&otmp, FREAD,
2019 dev_relthread(dev, ref);
2020 snprintf(req->error_str, sizeof(req->error_str),
2021 "error %d returned for DIOCGMEDIASIZE "
2022 " ioctl on %s!", error,
2027 if (params->lun_size_bytes != 0) {
2028 if (params->lun_size_bytes > otmp) {
2029 dev_relthread(dev, ref);
2030 snprintf(req->error_str, sizeof(req->error_str),
2031 "requested LUN size %ju > backing device "
2033 (uintmax_t)params->lun_size_bytes,
2038 be_lun->size_bytes = params->lun_size_bytes;
2040 be_lun->size_bytes = otmp;
2041 be_lun->size_blocks = be_lun->size_bytes / cbe_lun->blocksize;
2042 cbe_lun->maxlba = (be_lun->size_blocks == 0) ?
2043 0 : (be_lun->size_blocks - 1);
2045 error = csw->d_ioctl(dev, DIOCGSTRIPESIZE, (caddr_t)&ps, FREAD,
2050 error = csw->d_ioctl(dev, DIOCGSTRIPEOFFSET, (caddr_t)&po,
2058 value = dnvlist_get_string(cbe_lun->options, "pblocksize", NULL);
2060 ctl_expand_number(value, &ps);
2061 value = dnvlist_get_string(cbe_lun->options, "pblockoffset", NULL);
2063 ctl_expand_number(value, &po);
2064 pss = ps / cbe_lun->blocksize;
2065 pos = po / cbe_lun->blocksize;
2066 if ((pss > 0) && (pss * cbe_lun->blocksize == ps) && (pss >= pos) &&
2067 ((pss & (pss - 1)) == 0) && (pos * cbe_lun->blocksize == po)) {
2068 cbe_lun->pblockexp = fls(pss) - 1;
2069 cbe_lun->pblockoff = (pss - pos) % pss;
2072 value = dnvlist_get_string(cbe_lun->options, "ublocksize", NULL);
2074 ctl_expand_number(value, &us);
2075 value = dnvlist_get_string(cbe_lun->options, "ublockoffset", NULL);
2077 ctl_expand_number(value, &uo);
2078 uss = us / cbe_lun->blocksize;
2079 uos = uo / cbe_lun->blocksize;
2080 if ((uss > 0) && (uss * cbe_lun->blocksize == us) && (uss >= uos) &&
2081 ((uss & (uss - 1)) == 0) && (uos * cbe_lun->blocksize == uo)) {
2082 cbe_lun->ublockexp = fls(uss) - 1;
2083 cbe_lun->ublockoff = (uss - uos) % uss;
2086 cbe_lun->atomicblock = atomic / cbe_lun->blocksize;
2087 cbe_lun->opttxferlen = maxio / cbe_lun->blocksize;
2089 if (be_lun->dispatch == ctl_be_block_dispatch_zvol) {
2092 struct diocgattr_arg arg;
2094 strlcpy(arg.name, "GEOM::candelete", sizeof(arg.name));
2095 arg.len = sizeof(arg.value.i);
2096 error = csw->d_ioctl(dev, DIOCGATTR, (caddr_t)&arg, FREAD,
2098 unmap = (error == 0) ? arg.value.i : 0;
2100 value = dnvlist_get_string(cbe_lun->options, "unmap", NULL);
2102 unmap = (strcmp(value, "on") == 0);
2104 cbe_lun->flags |= CTL_LUN_FLAG_UNMAP;
2106 cbe_lun->flags &= ~CTL_LUN_FLAG_UNMAP;
2108 dev_relthread(dev, ref);
2113 ctl_be_block_close(struct ctl_be_block_lun *be_lun)
2115 struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun;
2120 if ((cbe_lun->flags & CTL_LUN_FLAG_READONLY) == 0)
2122 (void)vn_close(be_lun->vn, flags, NOCRED, curthread);
2125 switch (be_lun->dev_type) {
2126 case CTL_BE_BLOCK_DEV:
2128 case CTL_BE_BLOCK_FILE:
2129 if (be_lun->backend.file.cred != NULL) {
2130 crfree(be_lun->backend.file.cred);
2131 be_lun->backend.file.cred = NULL;
2134 case CTL_BE_BLOCK_NONE:
2137 panic("Unexpected backend type %d", be_lun->dev_type);
2140 be_lun->dev_type = CTL_BE_BLOCK_NONE;
2146 ctl_be_block_open(struct ctl_be_block_lun *be_lun, struct ctl_lun_req *req)
2148 struct ctl_be_lun *cbe_lun = &be_lun->cbe_lun;
2149 struct nameidata nd;
2154 if (rootvnode == NULL) {
2155 snprintf(req->error_str, sizeof(req->error_str),
2156 "Root filesystem is not mounted");
2161 value = dnvlist_get_string(cbe_lun->options, "file", NULL);
2162 if (value == NULL) {
2163 snprintf(req->error_str, sizeof(req->error_str),
2164 "no file argument specified");
2167 free(be_lun->dev_path, M_CTLBLK);
2168 be_lun->dev_path = strdup(value, M_CTLBLK);
2171 value = dnvlist_get_string(cbe_lun->options, "readonly", NULL);
2172 if (value != NULL) {
2173 if (strcmp(value, "on") != 0)
2175 } else if (cbe_lun->lun_type == T_DIRECT)
2179 NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, be_lun->dev_path, curthread);
2180 error = vn_open(&nd, &flags, 0, NULL);
2181 if ((error == EROFS || error == EACCES) && (flags & FWRITE)) {
2187 * This is the only reasonable guess we can make as far as
2188 * path if the user doesn't give us a fully qualified path.
2189 * If they want to specify a file, they need to specify the
2192 if (be_lun->dev_path[0] != '/') {
2195 asprintf(&dev_name, M_CTLBLK, "/dev/%s",
2197 free(be_lun->dev_path, M_CTLBLK);
2198 be_lun->dev_path = dev_name;
2201 snprintf(req->error_str, sizeof(req->error_str),
2202 "error opening %s: %d", be_lun->dev_path, error);
2206 cbe_lun->flags &= ~CTL_LUN_FLAG_READONLY;
2208 cbe_lun->flags |= CTL_LUN_FLAG_READONLY;
2210 NDFREE(&nd, NDF_ONLY_PNBUF);
2211 be_lun->vn = nd.ni_vp;
2213 /* We only support disks and files. */
2214 if (vn_isdisk_error(be_lun->vn, &error)) {
2215 error = ctl_be_block_open_dev(be_lun, req);
2216 } else if (be_lun->vn->v_type == VREG) {
2217 error = ctl_be_block_open_file(be_lun, req);
2220 snprintf(req->error_str, sizeof(req->error_str),
2221 "%s is not a disk or plain file", be_lun->dev_path);
2223 VOP_UNLOCK(be_lun->vn);
2226 ctl_be_block_close(be_lun);
2227 cbe_lun->serseq = CTL_LUN_SERSEQ_OFF;
2228 if (be_lun->dispatch != ctl_be_block_dispatch_dev)
2229 cbe_lun->serseq = CTL_LUN_SERSEQ_READ;
2230 value = dnvlist_get_string(cbe_lun->options, "serseq", NULL);
2231 if (value != NULL && strcmp(value, "on") == 0)
2232 cbe_lun->serseq = CTL_LUN_SERSEQ_ON;
2233 else if (value != NULL && strcmp(value, "read") == 0)
2234 cbe_lun->serseq = CTL_LUN_SERSEQ_READ;
2235 else if (value != NULL && strcmp(value, "off") == 0)
2236 cbe_lun->serseq = CTL_LUN_SERSEQ_OFF;
2241 ctl_be_block_create(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
2243 struct ctl_be_lun *cbe_lun;
2244 struct ctl_be_block_lun *be_lun;
2245 struct ctl_lun_create_params *params;
2246 char num_thread_str[16];
2249 int retval, num_threads;
2250 int tmp_num_threads;
2252 params = &req->reqdata.create;
2254 req->status = CTL_LUN_OK;
2256 be_lun = malloc(sizeof(*be_lun), M_CTLBLK, M_ZERO | M_WAITOK);
2257 cbe_lun = &be_lun->cbe_lun;
2258 be_lun->params = req->reqdata.create;
2259 be_lun->softc = softc;
2260 STAILQ_INIT(&be_lun->input_queue);
2261 STAILQ_INIT(&be_lun->config_read_queue);
2262 STAILQ_INIT(&be_lun->config_write_queue);
2263 STAILQ_INIT(&be_lun->datamove_queue);
2264 mtx_init(&be_lun->io_lock, "ctlblock io", NULL, MTX_DEF);
2265 mtx_init(&be_lun->queue_lock, "ctlblock queue", NULL, MTX_DEF);
2266 cbe_lun->options = nvlist_clone(req->args_nvl);
2268 if (params->flags & CTL_LUN_FLAG_DEV_TYPE)
2269 cbe_lun->lun_type = params->device_type;
2271 cbe_lun->lun_type = T_DIRECT;
2274 value = dnvlist_get_string(cbe_lun->options, "ha_role", NULL);
2275 if (value != NULL) {
2276 if (strcmp(value, "primary") == 0)
2277 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
2278 } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF)
2279 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
2281 if (cbe_lun->lun_type == T_DIRECT ||
2282 cbe_lun->lun_type == T_CDROM) {
2283 be_lun->size_bytes = params->lun_size_bytes;
2284 if (params->blocksize_bytes != 0)
2285 cbe_lun->blocksize = params->blocksize_bytes;
2286 else if (cbe_lun->lun_type == T_CDROM)
2287 cbe_lun->blocksize = 2048;
2289 cbe_lun->blocksize = 512;
2290 be_lun->size_blocks = be_lun->size_bytes / cbe_lun->blocksize;
2291 cbe_lun->maxlba = (be_lun->size_blocks == 0) ?
2292 0 : (be_lun->size_blocks - 1);
2294 if ((cbe_lun->flags & CTL_LUN_FLAG_PRIMARY) ||
2295 control_softc->ha_mode == CTL_HA_MODE_SER_ONLY) {
2296 retval = ctl_be_block_open(be_lun, req);
2299 req->status = CTL_LUN_WARNING;
2302 num_threads = cbb_num_threads;
2307 value = dnvlist_get_string(cbe_lun->options, "num_threads", NULL);
2308 if (value != NULL) {
2309 tmp_num_threads = strtol(value, NULL, 0);
2312 * We don't let the user specify less than one
2313 * thread, but hope he's clueful enough not to
2314 * specify 1000 threads.
2316 if (tmp_num_threads < 1) {
2317 snprintf(req->error_str, sizeof(req->error_str),
2318 "invalid number of threads %s",
2322 num_threads = tmp_num_threads;
2325 if (be_lun->vn == NULL)
2326 cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA;
2327 /* Tell the user the blocksize we ended up using */
2328 params->lun_size_bytes = be_lun->size_bytes;
2329 params->blocksize_bytes = cbe_lun->blocksize;
2330 if (params->flags & CTL_LUN_FLAG_ID_REQ) {
2331 cbe_lun->req_lun_id = params->req_lun_id;
2332 cbe_lun->flags |= CTL_LUN_FLAG_ID_REQ;
2334 cbe_lun->req_lun_id = 0;
2336 cbe_lun->lun_shutdown = ctl_be_block_lun_shutdown;
2337 cbe_lun->be = &ctl_be_block_driver;
2339 if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) {
2340 snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%04d",
2342 strncpy((char *)cbe_lun->serial_num, tmpstr,
2343 MIN(sizeof(cbe_lun->serial_num), sizeof(tmpstr)));
2345 /* Tell the user what we used for a serial number */
2346 strncpy((char *)params->serial_num, tmpstr,
2347 MIN(sizeof(params->serial_num), sizeof(tmpstr)));
2349 strncpy((char *)cbe_lun->serial_num, params->serial_num,
2350 MIN(sizeof(cbe_lun->serial_num),
2351 sizeof(params->serial_num)));
2353 if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) {
2354 snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%04d", softc->num_luns);
2355 strncpy((char *)cbe_lun->device_id, tmpstr,
2356 MIN(sizeof(cbe_lun->device_id), sizeof(tmpstr)));
2358 /* Tell the user what we used for a device ID */
2359 strncpy((char *)params->device_id, tmpstr,
2360 MIN(sizeof(params->device_id), sizeof(tmpstr)));
2362 strncpy((char *)cbe_lun->device_id, params->device_id,
2363 MIN(sizeof(cbe_lun->device_id),
2364 sizeof(params->device_id)));
2367 TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_be_block_worker, be_lun);
2369 be_lun->io_taskqueue = taskqueue_create("ctlblocktq", M_WAITOK,
2370 taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue);
2372 if (be_lun->io_taskqueue == NULL) {
2373 snprintf(req->error_str, sizeof(req->error_str),
2374 "unable to create taskqueue");
2379 * Note that we start the same number of threads by default for
2380 * both the file case and the block device case. For the file
2381 * case, we need multiple threads to allow concurrency, because the
2382 * vnode interface is designed to be a blocking interface. For the
2383 * block device case, ZFS zvols at least will block the caller's
2384 * context in many instances, and so we need multiple threads to
2385 * overcome that problem. Other block devices don't need as many
2386 * threads, but they shouldn't cause too many problems.
2388 * If the user wants to just have a single thread for a block
2389 * device, he can specify that when the LUN is created, or change
2390 * the tunable/sysctl to alter the default number of threads.
2392 retval = taskqueue_start_threads_in_proc(&be_lun->io_taskqueue,
2393 /*num threads*/num_threads,
2395 /*proc*/control_softc->ctl_proc,
2396 /*thread name*/"block");
2401 be_lun->num_threads = num_threads;
2403 retval = ctl_add_lun(&be_lun->cbe_lun);
2405 snprintf(req->error_str, sizeof(req->error_str),
2406 "ctl_add_lun() returned error %d, see dmesg for "
2412 be_lun->disk_stats = devstat_new_entry("cbb", cbe_lun->lun_id,
2414 DEVSTAT_ALL_SUPPORTED,
2416 | DEVSTAT_TYPE_IF_OTHER,
2417 DEVSTAT_PRIORITY_OTHER);
2419 mtx_lock(&softc->lock);
2421 SLIST_INSERT_HEAD(&softc->lun_list, be_lun, links);
2422 mtx_unlock(&softc->lock);
2424 params->req_lun_id = cbe_lun->lun_id;
2429 req->status = CTL_LUN_ERROR;
2431 if (be_lun->io_taskqueue != NULL)
2432 taskqueue_free(be_lun->io_taskqueue);
2433 ctl_be_block_close(be_lun);
2434 if (be_lun->dev_path != NULL)
2435 free(be_lun->dev_path, M_CTLBLK);
2436 nvlist_destroy(cbe_lun->options);
2437 mtx_destroy(&be_lun->queue_lock);
2438 mtx_destroy(&be_lun->io_lock);
2439 free(be_lun, M_CTLBLK);
2445 ctl_be_block_rm(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
2447 struct ctl_lun_rm_params *params;
2448 struct ctl_be_block_lun *be_lun;
2449 struct ctl_be_lun *cbe_lun;
2452 params = &req->reqdata.rm;
2454 sx_xlock(&softc->modify_lock);
2455 mtx_lock(&softc->lock);
2456 SLIST_FOREACH(be_lun, &softc->lun_list, links) {
2457 if (be_lun->cbe_lun.lun_id == params->lun_id) {
2458 SLIST_REMOVE(&softc->lun_list, be_lun,
2459 ctl_be_block_lun, links);
2464 mtx_unlock(&softc->lock);
2465 sx_xunlock(&softc->modify_lock);
2466 if (be_lun == NULL) {
2467 snprintf(req->error_str, sizeof(req->error_str),
2468 "LUN %u is not managed by the block backend",
2472 cbe_lun = &be_lun->cbe_lun;
2474 if (be_lun->vn != NULL) {
2475 cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA;
2476 ctl_lun_no_media(cbe_lun);
2477 taskqueue_drain_all(be_lun->io_taskqueue);
2478 ctl_be_block_close(be_lun);
2481 mtx_lock(&softc->lock);
2482 be_lun->flags |= CTL_BE_BLOCK_LUN_WAITING;
2483 mtx_unlock(&softc->lock);
2485 retval = ctl_remove_lun(cbe_lun);
2487 snprintf(req->error_str, sizeof(req->error_str),
2488 "error %d returned from ctl_remove_lun() for "
2489 "LUN %d", retval, params->lun_id);
2490 mtx_lock(&softc->lock);
2491 be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING;
2492 mtx_unlock(&softc->lock);
2496 mtx_lock(&softc->lock);
2497 while ((be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) == 0) {
2498 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlblockrm", 0);
2499 if (retval == EINTR)
2502 be_lun->flags &= ~CTL_BE_BLOCK_LUN_WAITING;
2503 if (be_lun->flags & CTL_BE_BLOCK_LUN_UNCONFIGURED) {
2504 mtx_unlock(&softc->lock);
2505 free(be_lun, M_CTLBLK);
2507 mtx_unlock(&softc->lock);
2511 req->status = CTL_LUN_OK;
2515 req->status = CTL_LUN_ERROR;
2520 ctl_be_block_modify(struct ctl_be_block_softc *softc, struct ctl_lun_req *req)
2522 struct ctl_lun_modify_params *params;
2523 struct ctl_be_block_lun *be_lun;
2524 struct ctl_be_lun *cbe_lun;
2529 params = &req->reqdata.modify;
2531 sx_xlock(&softc->modify_lock);
2532 mtx_lock(&softc->lock);
2533 SLIST_FOREACH(be_lun, &softc->lun_list, links) {
2534 if (be_lun->cbe_lun.lun_id == params->lun_id)
2537 mtx_unlock(&softc->lock);
2538 if (be_lun == NULL) {
2539 snprintf(req->error_str, sizeof(req->error_str),
2540 "LUN %u is not managed by the block backend",
2544 cbe_lun = &be_lun->cbe_lun;
2546 if (params->lun_size_bytes != 0)
2547 be_lun->params.lun_size_bytes = params->lun_size_bytes;
2549 if (req->args_nvl != NULL) {
2550 nvlist_destroy(cbe_lun->options);
2551 cbe_lun->options = nvlist_clone(req->args_nvl);
2554 wasprim = (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY);
2555 value = dnvlist_get_string(cbe_lun->options, "ha_role", NULL);
2556 if (value != NULL) {
2557 if (strcmp(value, "primary") == 0)
2558 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
2560 cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY;
2561 } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF)
2562 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
2564 cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY;
2565 if (wasprim != (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)) {
2566 if (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)
2567 ctl_lun_primary(cbe_lun);
2569 ctl_lun_secondary(cbe_lun);
2572 oldsize = be_lun->size_blocks;
2573 if ((cbe_lun->flags & CTL_LUN_FLAG_PRIMARY) ||
2574 control_softc->ha_mode == CTL_HA_MODE_SER_ONLY) {
2575 if (be_lun->vn == NULL)
2576 error = ctl_be_block_open(be_lun, req);
2577 else if (vn_isdisk_error(be_lun->vn, &error))
2578 error = ctl_be_block_open_dev(be_lun, req);
2579 else if (be_lun->vn->v_type == VREG) {
2580 vn_lock(be_lun->vn, LK_SHARED | LK_RETRY);
2581 error = ctl_be_block_open_file(be_lun, req);
2582 VOP_UNLOCK(be_lun->vn);
2585 if ((cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) &&
2586 be_lun->vn != NULL) {
2587 cbe_lun->flags &= ~CTL_LUN_FLAG_NO_MEDIA;
2588 ctl_lun_has_media(cbe_lun);
2589 } else if ((cbe_lun->flags & CTL_LUN_FLAG_NO_MEDIA) == 0 &&
2590 be_lun->vn == NULL) {
2591 cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA;
2592 ctl_lun_no_media(cbe_lun);
2594 cbe_lun->flags &= ~CTL_LUN_FLAG_EJECTED;
2596 if (be_lun->vn != NULL) {
2597 cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA;
2598 ctl_lun_no_media(cbe_lun);
2599 taskqueue_drain_all(be_lun->io_taskqueue);
2600 error = ctl_be_block_close(be_lun);
2604 if (be_lun->size_blocks != oldsize)
2605 ctl_lun_capacity_changed(cbe_lun);
2607 /* Tell the user the exact size we ended up using */
2608 params->lun_size_bytes = be_lun->size_bytes;
2610 sx_xunlock(&softc->modify_lock);
2611 req->status = error ? CTL_LUN_WARNING : CTL_LUN_OK;
2615 sx_xunlock(&softc->modify_lock);
2616 req->status = CTL_LUN_ERROR;
2621 ctl_be_block_lun_shutdown(struct ctl_be_lun *cbe_lun)
2623 struct ctl_be_block_lun *be_lun = (struct ctl_be_block_lun *)cbe_lun;
2624 struct ctl_be_block_softc *softc = be_lun->softc;
2626 taskqueue_drain_all(be_lun->io_taskqueue);
2627 taskqueue_free(be_lun->io_taskqueue);
2628 if (be_lun->disk_stats != NULL)
2629 devstat_remove_entry(be_lun->disk_stats);
2630 nvlist_destroy(be_lun->cbe_lun.options);
2631 free(be_lun->dev_path, M_CTLBLK);
2632 mtx_destroy(&be_lun->queue_lock);
2633 mtx_destroy(&be_lun->io_lock);
2635 mtx_lock(&softc->lock);
2636 be_lun->flags |= CTL_BE_BLOCK_LUN_UNCONFIGURED;
2637 if (be_lun->flags & CTL_BE_BLOCK_LUN_WAITING)
2640 free(be_lun, M_CTLBLK);
2641 mtx_unlock(&softc->lock);
2645 ctl_be_block_config_write(union ctl_io *io)
2647 struct ctl_be_block_lun *be_lun;
2648 struct ctl_be_lun *cbe_lun;
2651 DPRINTF("entered\n");
2653 cbe_lun = CTL_BACKEND_LUN(io);
2654 be_lun = (struct ctl_be_block_lun *)cbe_lun;
2657 switch (io->scsiio.cdb[0]) {
2658 case SYNCHRONIZE_CACHE:
2659 case SYNCHRONIZE_CACHE_16:
2664 * The upper level CTL code will filter out any CDBs with
2665 * the immediate bit set and return the proper error.
2667 * We don't really need to worry about what LBA range the
2668 * user asked to be synced out. When they issue a sync
2669 * cache command, we'll sync out the whole thing.
2671 mtx_lock(&be_lun->queue_lock);
2672 STAILQ_INSERT_TAIL(&be_lun->config_write_queue, &io->io_hdr,
2674 mtx_unlock(&be_lun->queue_lock);
2675 taskqueue_enqueue(be_lun->io_taskqueue, &be_lun->io_task);
2677 case START_STOP_UNIT: {
2678 struct scsi_start_stop_unit *cdb;
2679 struct ctl_lun_req req;
2681 cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb;
2682 if ((cdb->how & SSS_PC_MASK) != 0) {
2683 ctl_set_success(&io->scsiio);
2684 ctl_config_write_done(io);
2687 if (cdb->how & SSS_START) {
2688 if ((cdb->how & SSS_LOEJ) && be_lun->vn == NULL) {
2689 retval = ctl_be_block_open(be_lun, &req);
2690 cbe_lun->flags &= ~CTL_LUN_FLAG_EJECTED;
2692 cbe_lun->flags &= ~CTL_LUN_FLAG_NO_MEDIA;
2693 ctl_lun_has_media(cbe_lun);
2695 cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA;
2696 ctl_lun_no_media(cbe_lun);
2699 ctl_start_lun(cbe_lun);
2701 ctl_stop_lun(cbe_lun);
2702 if (cdb->how & SSS_LOEJ) {
2703 cbe_lun->flags |= CTL_LUN_FLAG_NO_MEDIA;
2704 cbe_lun->flags |= CTL_LUN_FLAG_EJECTED;
2705 ctl_lun_ejected(cbe_lun);
2706 if (be_lun->vn != NULL)
2707 ctl_be_block_close(be_lun);
2711 ctl_set_success(&io->scsiio);
2712 ctl_config_write_done(io);
2716 ctl_set_success(&io->scsiio);
2717 ctl_config_write_done(io);
2720 ctl_set_invalid_opcode(&io->scsiio);
2721 ctl_config_write_done(io);
2722 retval = CTL_RETVAL_COMPLETE;
2730 ctl_be_block_config_read(union ctl_io *io)
2732 struct ctl_be_block_lun *be_lun;
2735 DPRINTF("entered\n");
2737 be_lun = (struct ctl_be_block_lun *)CTL_BACKEND_LUN(io);
2739 switch (io->scsiio.cdb[0]) {
2740 case SERVICE_ACTION_IN:
2741 if (io->scsiio.cdb[1] == SGLS_SERVICE_ACTION) {
2742 mtx_lock(&be_lun->queue_lock);
2743 STAILQ_INSERT_TAIL(&be_lun->config_read_queue,
2744 &io->io_hdr, links);
2745 mtx_unlock(&be_lun->queue_lock);
2746 taskqueue_enqueue(be_lun->io_taskqueue,
2748 retval = CTL_RETVAL_QUEUED;
2751 ctl_set_invalid_field(&io->scsiio,
2757 ctl_config_read_done(io);
2758 retval = CTL_RETVAL_COMPLETE;
2761 ctl_set_invalid_opcode(&io->scsiio);
2762 ctl_config_read_done(io);
2763 retval = CTL_RETVAL_COMPLETE;
2771 ctl_be_block_lun_info(struct ctl_be_lun *cbe_lun, struct sbuf *sb)
2773 struct ctl_be_block_lun *lun = (struct ctl_be_block_lun *)cbe_lun;
2776 retval = sbuf_printf(sb, "\t<num_threads>");
2779 retval = sbuf_printf(sb, "%d", lun->num_threads);
2782 retval = sbuf_printf(sb, "</num_threads>\n");
2789 ctl_be_block_lun_attr(struct ctl_be_lun *cbe_lun, const char *attrname)
2791 struct ctl_be_block_lun *lun = (struct ctl_be_block_lun *)cbe_lun;
2793 if (lun->getattr == NULL)
2794 return (UINT64_MAX);
2795 return (lun->getattr(lun, attrname));
2799 ctl_be_block_init(void)
2801 struct ctl_be_block_softc *softc = &backend_block_softc;
2803 sx_init(&softc->modify_lock, "ctlblock modify");
2804 mtx_init(&softc->lock, "ctlblock", NULL, MTX_DEF);
2805 softc->beio_zone = uma_zcreate("beio", sizeof(struct ctl_be_block_io),
2806 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
2807 softc->bufmin_zone = uma_zcreate("ctlblockmin", CTLBLK_MIN_SEG,
2808 NULL, NULL, NULL, NULL, /*align*/ 0, /*flags*/0);
2809 if (CTLBLK_MIN_SEG < CTLBLK_MAX_SEG)
2810 softc->bufmax_zone = uma_zcreate("ctlblockmax", CTLBLK_MAX_SEG,
2811 NULL, NULL, NULL, NULL, /*align*/ 0, /*flags*/0);
2812 SLIST_INIT(&softc->lun_list);
2817 ctl_be_block_shutdown(void)
2819 struct ctl_be_block_softc *softc = &backend_block_softc;
2820 struct ctl_be_block_lun *lun;
2822 mtx_lock(&softc->lock);
2823 while ((lun = SLIST_FIRST(&softc->lun_list)) != NULL) {
2824 SLIST_REMOVE_HEAD(&softc->lun_list, links);
2827 * Drop our lock here. Since ctl_remove_lun() can call
2828 * back into us, this could potentially lead to a recursive
2829 * lock of the same mutex, which would cause a hang.
2831 mtx_unlock(&softc->lock);
2832 ctl_remove_lun(&lun->cbe_lun);
2833 mtx_lock(&softc->lock);
2835 mtx_unlock(&softc->lock);
2836 uma_zdestroy(softc->bufmin_zone);
2837 if (CTLBLK_MIN_SEG < CTLBLK_MAX_SEG)
2838 uma_zdestroy(softc->bufmax_zone);
2839 uma_zdestroy(softc->beio_zone);
2840 mtx_destroy(&softc->lock);
2841 sx_destroy(&softc->modify_lock);