2 * Copyright (c) 2003, 2008 Silicon Graphics International Corp.
3 * Copyright (c) 2012 The FreeBSD Foundation
6 * Portions of this software were developed by Edward Tomasz Napierala
7 * under sponsorship from the FreeBSD Foundation.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions, and the following disclaimer,
14 * without modification.
15 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
16 * substantially similar to the "NO WARRANTY" disclaimer below
17 * ("Disclaimer") and any redistribution must be conditioned upon
18 * including a substantially similar Disclaimer requirement for further
19 * binary redistribution.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
30 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
31 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGES.
34 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_ramdisk.c#3 $
37 * CAM Target Layer backend for a "fake" ramdisk.
39 * Author: Ken Merry <ken@FreeBSD.org>
42 #include <sys/cdefs.h>
43 __FBSDID("$FreeBSD$");
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/condvar.h>
49 #include <sys/types.h>
51 #include <sys/mutex.h>
52 #include <sys/malloc.h>
53 #include <sys/taskqueue.h>
55 #include <sys/queue.h>
57 #include <sys/ioccom.h>
58 #include <sys/module.h>
60 #include <cam/scsi/scsi_all.h>
61 #include <cam/ctl/ctl_io.h>
62 #include <cam/ctl/ctl.h>
63 #include <cam/ctl/ctl_util.h>
64 #include <cam/ctl/ctl_backend.h>
65 #include <cam/ctl/ctl_frontend_internal.h>
66 #include <cam/ctl/ctl_debug.h>
67 #include <cam/ctl/ctl_ioctl.h>
68 #include <cam/ctl/ctl_error.h>
71 CTL_BE_RAMDISK_LUN_UNCONFIGURED = 0x01,
72 CTL_BE_RAMDISK_LUN_CONFIG_ERR = 0x02,
73 CTL_BE_RAMDISK_LUN_WAITING = 0x04
74 } ctl_be_ramdisk_lun_flags;
76 struct ctl_be_ramdisk_lun {
80 struct ctl_be_ramdisk_softc *softc;
81 ctl_be_ramdisk_lun_flags flags;
82 STAILQ_ENTRY(ctl_be_ramdisk_lun) links;
83 struct ctl_be_lun ctl_be_lun;
84 struct taskqueue *io_taskqueue;
86 STAILQ_HEAD(, ctl_io_hdr) cont_queue;
87 struct mtx_padalign queue_lock;
90 struct ctl_be_ramdisk_softc {
93 #ifdef CTL_RAMDISK_PAGES
94 uint8_t **ramdisk_pages;
97 uint8_t *ramdisk_buffer;
100 STAILQ_HEAD(, ctl_be_ramdisk_lun) lun_list;
103 static struct ctl_be_ramdisk_softc rd_softc;
105 int ctl_backend_ramdisk_init(void);
106 void ctl_backend_ramdisk_shutdown(void);
107 static int ctl_backend_ramdisk_move_done(union ctl_io *io);
108 static int ctl_backend_ramdisk_submit(union ctl_io *io);
109 static void ctl_backend_ramdisk_continue(union ctl_io *io);
110 static int ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd,
111 caddr_t addr, int flag, struct thread *td);
112 static int ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc,
113 struct ctl_lun_req *req);
114 static int ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
115 struct ctl_lun_req *req, int do_wait);
116 static int ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc,
117 struct ctl_lun_req *req);
118 static void ctl_backend_ramdisk_worker(void *context, int pending);
119 static void ctl_backend_ramdisk_lun_shutdown(void *be_lun);
120 static void ctl_backend_ramdisk_lun_config_status(void *be_lun,
121 ctl_lun_config_status status);
122 static int ctl_backend_ramdisk_config_write(union ctl_io *io);
123 static int ctl_backend_ramdisk_config_read(union ctl_io *io);
125 static struct ctl_backend_driver ctl_be_ramdisk_driver =
128 .flags = CTL_BE_FLAG_HAS_CONFIG,
129 .init = ctl_backend_ramdisk_init,
130 .data_submit = ctl_backend_ramdisk_submit,
131 .data_move_done = ctl_backend_ramdisk_move_done,
132 .config_read = ctl_backend_ramdisk_config_read,
133 .config_write = ctl_backend_ramdisk_config_write,
134 .ioctl = ctl_backend_ramdisk_ioctl
137 MALLOC_DEFINE(M_RAMDISK, "ramdisk", "Memory used for CTL RAMdisk");
138 CTL_BACKEND_DECLARE(cbr, ctl_be_ramdisk_driver);
141 ctl_backend_ramdisk_init(void)
143 struct ctl_be_ramdisk_softc *softc;
144 #ifdef CTL_RAMDISK_PAGES
151 memset(softc, 0, sizeof(*softc));
153 mtx_init(&softc->lock, "ctlramdisk", NULL, MTX_DEF);
155 STAILQ_INIT(&softc->lun_list);
156 softc->rd_size = 1024 * 1024;
157 #ifdef CTL_RAMDISK_PAGES
158 softc->num_pages = softc->rd_size / PAGE_SIZE;
159 softc->ramdisk_pages = (uint8_t **)malloc(sizeof(uint8_t *) *
160 softc->num_pages, M_RAMDISK,
162 for (i = 0; i < softc->num_pages; i++)
163 softc->ramdisk_pages[i] = malloc(PAGE_SIZE, M_RAMDISK,M_WAITOK);
165 softc->ramdisk_buffer = (uint8_t *)malloc(softc->rd_size, M_RAMDISK,
173 ctl_backend_ramdisk_shutdown(void)
175 struct ctl_be_ramdisk_softc *softc;
176 struct ctl_be_ramdisk_lun *lun, *next_lun;
177 #ifdef CTL_RAMDISK_PAGES
183 mtx_lock(&softc->lock);
184 for (lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; lun = next_lun){
186 * Grab the next LUN. The current LUN may get removed by
187 * ctl_invalidate_lun(), which will call our LUN shutdown
188 * routine, if there is no outstanding I/O for this LUN.
190 next_lun = STAILQ_NEXT(lun, links);
193 * Drop our lock here. Since ctl_invalidate_lun() can call
194 * back into us, this could potentially lead to a recursive
195 * lock of the same mutex, which would cause a hang.
197 mtx_unlock(&softc->lock);
198 ctl_disable_lun(&lun->ctl_be_lun);
199 ctl_invalidate_lun(&lun->ctl_be_lun);
200 mtx_lock(&softc->lock);
202 mtx_unlock(&softc->lock);
204 #ifdef CTL_RAMDISK_PAGES
205 for (i = 0; i < softc->num_pages; i++)
206 free(softc->ramdisk_pages[i], M_RAMDISK);
208 free(softc->ramdisk_pages, M_RAMDISK);
210 free(softc->ramdisk_buffer, M_RAMDISK);
213 if (ctl_backend_deregister(&ctl_be_ramdisk_driver) != 0) {
214 printf("ctl_backend_ramdisk_shutdown: "
215 "ctl_backend_deregister() failed!\n");
220 ctl_backend_ramdisk_move_done(union ctl_io *io)
222 struct ctl_be_lun *ctl_be_lun;
223 struct ctl_be_ramdisk_lun *be_lun;
225 struct bintime cur_bt;
228 CTL_DEBUG_PRINT(("ctl_backend_ramdisk_move_done\n"));
229 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
230 CTL_PRIV_BACKEND_LUN].ptr;
231 be_lun = (struct ctl_be_ramdisk_lun *)ctl_be_lun->be_lun;
234 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt);
235 bintime_add(&io->io_hdr.dma_bt, &cur_bt);
236 io->io_hdr.num_dmas++;
238 if (io->scsiio.kern_sg_entries > 0)
239 free(io->scsiio.kern_data_ptr, M_RAMDISK);
240 io->scsiio.kern_rel_offset += io->scsiio.kern_data_len;
241 if (io->io_hdr.flags & CTL_FLAG_ABORT) {
243 } else if ((io->io_hdr.port_status == 0) &&
244 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) {
245 if (io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer > 0) {
246 mtx_lock(&be_lun->queue_lock);
247 STAILQ_INSERT_TAIL(&be_lun->cont_queue,
249 mtx_unlock(&be_lun->queue_lock);
250 taskqueue_enqueue(be_lun->io_taskqueue,
254 ctl_set_success(&io->scsiio);
255 } else if ((io->io_hdr.port_status != 0) &&
256 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
257 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
259 * For hardware error sense keys, the sense key
260 * specific value is defined to be a retry count,
261 * but we use it to pass back an internal FETD
262 * error code. XXX KDM Hopefully the FETD is only
263 * using 16 bits for an error code, since that's
264 * all the space we have in the sks field.
266 ctl_set_internal_failure(&io->scsiio,
269 io->io_hdr.port_status);
271 ctl_data_submit_done(io);
276 ctl_backend_ramdisk_submit(union ctl_io *io)
278 struct ctl_be_lun *ctl_be_lun;
279 struct ctl_lba_len_flags *lbalen;
281 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
282 CTL_PRIV_BACKEND_LUN].ptr;
283 lbalen = (struct ctl_lba_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
284 if (lbalen->flags & CTL_LLF_VERIFY) {
285 ctl_set_success(&io->scsiio);
286 ctl_data_submit_done(io);
287 return (CTL_RETVAL_COMPLETE);
289 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer =
290 lbalen->len * ctl_be_lun->blocksize;
291 ctl_backend_ramdisk_continue(io);
292 return (CTL_RETVAL_COMPLETE);
296 ctl_backend_ramdisk_continue(union ctl_io *io)
298 struct ctl_be_ramdisk_softc *softc;
299 int len, len_filled, sg_filled;
300 #ifdef CTL_RAMDISK_PAGES
301 struct ctl_sg_entry *sg_entries;
306 len = io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer;
307 #ifdef CTL_RAMDISK_PAGES
308 sg_filled = min(btoc(len), softc->num_pages);
310 io->scsiio.kern_data_ptr = malloc(sizeof(struct ctl_sg_entry) *
311 sg_filled, M_RAMDISK,
313 sg_entries = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
314 for (i = 0, len_filled = 0; i < sg_filled; i++) {
315 sg_entries[i].addr = softc->ramdisk_pages[i];
316 sg_entries[i].len = MIN(PAGE_SIZE, len - len_filled);
317 len_filled += sg_entries[i].len;
319 io->io_hdr.flags |= CTL_FLAG_KDPTR_SGLIST;
323 io->scsiio.kern_data_ptr = softc->ramdisk_pages[0];
327 len_filled = min(len, softc->rd_size);
328 io->scsiio.kern_data_ptr = softc->ramdisk_buffer;
329 #endif /* CTL_RAMDISK_PAGES */
331 io->scsiio.be_move_done = ctl_backend_ramdisk_move_done;
332 io->scsiio.kern_data_resid = 0;
333 io->scsiio.kern_data_len = len_filled;
334 io->scsiio.kern_sg_entries = sg_filled;
335 io->io_hdr.flags |= CTL_FLAG_ALLOCATED;
336 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer -= len_filled;
338 getbintime(&io->io_hdr.dma_start_bt);
344 ctl_backend_ramdisk_worker(void *context, int pending)
346 struct ctl_be_ramdisk_softc *softc;
347 struct ctl_be_ramdisk_lun *be_lun;
350 be_lun = (struct ctl_be_ramdisk_lun *)context;
351 softc = be_lun->softc;
353 mtx_lock(&be_lun->queue_lock);
355 io = (union ctl_io *)STAILQ_FIRST(&be_lun->cont_queue);
357 STAILQ_REMOVE(&be_lun->cont_queue, &io->io_hdr,
360 mtx_unlock(&be_lun->queue_lock);
362 ctl_backend_ramdisk_continue(io);
364 mtx_lock(&be_lun->queue_lock);
369 * If we get here, there is no work left in the queues, so
370 * just break out and let the task queue go to sleep.
374 mtx_unlock(&be_lun->queue_lock);
378 ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, caddr_t addr,
379 int flag, struct thread *td)
381 struct ctl_be_ramdisk_softc *softc;
389 struct ctl_lun_req *lun_req;
391 lun_req = (struct ctl_lun_req *)addr;
393 switch (lun_req->reqtype) {
394 case CTL_LUNREQ_CREATE:
395 retval = ctl_backend_ramdisk_create(softc, lun_req,
399 retval = ctl_backend_ramdisk_rm(softc, lun_req);
401 case CTL_LUNREQ_MODIFY:
402 retval = ctl_backend_ramdisk_modify(softc, lun_req);
405 lun_req->status = CTL_LUN_ERROR;
406 snprintf(lun_req->error_str, sizeof(lun_req->error_str),
407 "%s: invalid LUN request type %d", __func__,
422 ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc,
423 struct ctl_lun_req *req)
425 struct ctl_be_ramdisk_lun *be_lun;
426 struct ctl_lun_rm_params *params;
431 params = &req->reqdata.rm;
435 mtx_lock(&softc->lock);
437 STAILQ_FOREACH(be_lun, &softc->lun_list, links) {
438 if (be_lun->ctl_be_lun.lun_id == params->lun_id)
441 mtx_unlock(&softc->lock);
443 if (be_lun == NULL) {
444 snprintf(req->error_str, sizeof(req->error_str),
445 "%s: LUN %u is not managed by the ramdisk backend",
446 __func__, params->lun_id);
450 retval = ctl_disable_lun(&be_lun->ctl_be_lun);
453 snprintf(req->error_str, sizeof(req->error_str),
454 "%s: error %d returned from ctl_disable_lun() for "
455 "LUN %d", __func__, retval, params->lun_id);
460 * Set the waiting flag before we invalidate the LUN. Our shutdown
461 * routine can be called any time after we invalidate the LUN,
462 * and can be called from our context.
464 * This tells the shutdown routine that we're waiting, or we're
465 * going to wait for the shutdown to happen.
467 mtx_lock(&softc->lock);
468 be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING;
469 mtx_unlock(&softc->lock);
471 retval = ctl_invalidate_lun(&be_lun->ctl_be_lun);
473 snprintf(req->error_str, sizeof(req->error_str),
474 "%s: error %d returned from ctl_invalidate_lun() for "
475 "LUN %d", __func__, retval, params->lun_id);
476 mtx_lock(&softc->lock);
477 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
478 mtx_unlock(&softc->lock);
482 mtx_lock(&softc->lock);
484 while ((be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) == 0) {
485 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0);
489 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
492 * We only remove this LUN from the list and free it (below) if
493 * retval == 0. If the user interrupted the wait, we just bail out
494 * without actually freeing the LUN. We let the shutdown routine
495 * free the LUN if that happens.
498 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
503 mtx_unlock(&softc->lock);
506 taskqueue_drain(be_lun->io_taskqueue, &be_lun->io_task);
507 taskqueue_free(be_lun->io_taskqueue);
508 ctl_free_opts(&be_lun->ctl_be_lun.options);
509 mtx_destroy(&be_lun->queue_lock);
510 free(be_lun, M_RAMDISK);
513 req->status = CTL_LUN_OK;
518 req->status = CTL_LUN_ERROR;
524 ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
525 struct ctl_lun_req *req, int do_wait)
527 struct ctl_be_ramdisk_lun *be_lun;
528 struct ctl_lun_create_params *params;
535 params = &req->reqdata.create;
536 if (params->blocksize_bytes != 0)
537 blocksize = params->blocksize_bytes;
541 be_lun = malloc(sizeof(*be_lun), M_RAMDISK, M_ZERO | (do_wait ?
542 M_WAITOK : M_NOWAIT));
544 if (be_lun == NULL) {
545 snprintf(req->error_str, sizeof(req->error_str),
546 "%s: error allocating %zd bytes", __func__,
550 sprintf(be_lun->lunname, "cram%d", softc->num_luns);
551 ctl_init_opts(&be_lun->ctl_be_lun.options,
552 req->num_be_args, req->kern_be_args);
554 if (params->flags & CTL_LUN_FLAG_DEV_TYPE)
555 be_lun->ctl_be_lun.lun_type = params->device_type;
557 be_lun->ctl_be_lun.lun_type = T_DIRECT;
559 if (be_lun->ctl_be_lun.lun_type == T_DIRECT) {
561 if (params->lun_size_bytes < blocksize) {
562 snprintf(req->error_str, sizeof(req->error_str),
563 "%s: LUN size %ju < blocksize %u", __func__,
564 params->lun_size_bytes, blocksize);
568 be_lun->size_blocks = params->lun_size_bytes / blocksize;
569 be_lun->size_bytes = be_lun->size_blocks * blocksize;
571 be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1;
572 be_lun->ctl_be_lun.atomicblock = UINT32_MAX;
573 be_lun->ctl_be_lun.opttxferlen = softc->rd_size / blocksize;
575 be_lun->ctl_be_lun.maxlba = 0;
577 be_lun->size_bytes = 0;
578 be_lun->size_blocks = 0;
581 be_lun->ctl_be_lun.blocksize = blocksize;
583 /* Tell the user the blocksize we ended up using */
584 params->blocksize_bytes = blocksize;
586 /* Tell the user the exact size we ended up using */
587 params->lun_size_bytes = be_lun->size_bytes;
589 be_lun->softc = softc;
592 value = ctl_get_opt(&be_lun->ctl_be_lun.options, "unmap");
593 if (value != NULL && strcmp(value, "on") == 0)
594 unmap = (strcmp(value, "on") == 0);
596 be_lun->flags = CTL_BE_RAMDISK_LUN_UNCONFIGURED;
597 be_lun->ctl_be_lun.flags = CTL_LUN_FLAG_PRIMARY;
599 be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_UNMAP;
600 be_lun->ctl_be_lun.be_lun = be_lun;
602 if (params->flags & CTL_LUN_FLAG_ID_REQ) {
603 be_lun->ctl_be_lun.req_lun_id = params->req_lun_id;
604 be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_ID_REQ;
606 be_lun->ctl_be_lun.req_lun_id = 0;
608 be_lun->ctl_be_lun.lun_shutdown = ctl_backend_ramdisk_lun_shutdown;
609 be_lun->ctl_be_lun.lun_config_status =
610 ctl_backend_ramdisk_lun_config_status;
611 be_lun->ctl_be_lun.be = &ctl_be_ramdisk_driver;
612 if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) {
613 snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%4d",
615 strncpy((char *)be_lun->ctl_be_lun.serial_num, tmpstr,
616 MIN(sizeof(be_lun->ctl_be_lun.serial_num),
619 /* Tell the user what we used for a serial number */
620 strncpy((char *)params->serial_num, tmpstr,
621 MIN(sizeof(params->serial_num), sizeof(tmpstr)));
623 strncpy((char *)be_lun->ctl_be_lun.serial_num,
625 MIN(sizeof(be_lun->ctl_be_lun.serial_num),
626 sizeof(params->serial_num)));
628 if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) {
629 snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%4d", softc->num_luns);
630 strncpy((char *)be_lun->ctl_be_lun.device_id, tmpstr,
631 MIN(sizeof(be_lun->ctl_be_lun.device_id),
634 /* Tell the user what we used for a device ID */
635 strncpy((char *)params->device_id, tmpstr,
636 MIN(sizeof(params->device_id), sizeof(tmpstr)));
638 strncpy((char *)be_lun->ctl_be_lun.device_id,
640 MIN(sizeof(be_lun->ctl_be_lun.device_id),
641 sizeof(params->device_id)));
644 STAILQ_INIT(&be_lun->cont_queue);
645 mtx_init(&be_lun->queue_lock, "cram queue lock", NULL, MTX_DEF);
646 TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_backend_ramdisk_worker,
649 be_lun->io_taskqueue = taskqueue_create(be_lun->lunname, M_WAITOK,
650 taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue);
651 if (be_lun->io_taskqueue == NULL) {
652 snprintf(req->error_str, sizeof(req->error_str),
653 "%s: Unable to create taskqueue", __func__);
657 retval = taskqueue_start_threads(&be_lun->io_taskqueue,
661 "%s taskq", be_lun->lunname);
665 mtx_lock(&softc->lock);
667 STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links);
669 mtx_unlock(&softc->lock);
671 retval = ctl_add_lun(&be_lun->ctl_be_lun);
673 mtx_lock(&softc->lock);
674 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
677 mtx_unlock(&softc->lock);
678 snprintf(req->error_str, sizeof(req->error_str),
679 "%s: ctl_add_lun() returned error %d, see dmesg for "
680 "details", __func__, retval);
688 mtx_lock(&softc->lock);
691 * Tell the config_status routine that we're waiting so it won't
692 * clean up the LUN in the event of an error.
694 be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING;
696 while (be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) {
697 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0);
701 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
703 if (be_lun->flags & CTL_BE_RAMDISK_LUN_CONFIG_ERR) {
704 snprintf(req->error_str, sizeof(req->error_str),
705 "%s: LUN configuration error, see dmesg for details",
707 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
710 mtx_unlock(&softc->lock);
713 params->req_lun_id = be_lun->ctl_be_lun.lun_id;
715 mtx_unlock(&softc->lock);
717 req->status = CTL_LUN_OK;
722 req->status = CTL_LUN_ERROR;
723 if (be_lun != NULL) {
724 if (be_lun->io_taskqueue != NULL) {
725 taskqueue_free(be_lun->io_taskqueue);
727 ctl_free_opts(&be_lun->ctl_be_lun.options);
728 mtx_destroy(&be_lun->queue_lock);
729 free(be_lun, M_RAMDISK);
736 ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc,
737 struct ctl_lun_req *req)
739 struct ctl_be_ramdisk_lun *be_lun;
740 struct ctl_lun_modify_params *params;
743 params = &req->reqdata.modify;
747 mtx_lock(&softc->lock);
748 STAILQ_FOREACH(be_lun, &softc->lun_list, links) {
749 if (be_lun->ctl_be_lun.lun_id == params->lun_id)
752 mtx_unlock(&softc->lock);
754 if (be_lun == NULL) {
755 snprintf(req->error_str, sizeof(req->error_str),
756 "%s: LUN %u is not managed by the ramdisk backend",
757 __func__, params->lun_id);
761 if (params->lun_size_bytes == 0) {
762 snprintf(req->error_str, sizeof(req->error_str),
763 "%s: LUN size \"auto\" not supported "
764 "by the ramdisk backend", __func__);
768 blocksize = be_lun->ctl_be_lun.blocksize;
770 if (params->lun_size_bytes < blocksize) {
771 snprintf(req->error_str, sizeof(req->error_str),
772 "%s: LUN size %ju < blocksize %u", __func__,
773 params->lun_size_bytes, blocksize);
777 be_lun->size_blocks = params->lun_size_bytes / blocksize;
778 be_lun->size_bytes = be_lun->size_blocks * blocksize;
781 * The maximum LBA is the size - 1.
783 * XXX: Note that this field is being updated without locking,
784 * which might cause problems on 32-bit architectures.
786 be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1;
787 ctl_lun_capacity_changed(&be_lun->ctl_be_lun);
789 /* Tell the user the exact size we ended up using */
790 params->lun_size_bytes = be_lun->size_bytes;
792 req->status = CTL_LUN_OK;
797 req->status = CTL_LUN_ERROR;
803 ctl_backend_ramdisk_lun_shutdown(void *be_lun)
805 struct ctl_be_ramdisk_lun *lun;
806 struct ctl_be_ramdisk_softc *softc;
809 lun = (struct ctl_be_ramdisk_lun *)be_lun;
813 mtx_lock(&softc->lock);
815 lun->flags |= CTL_BE_RAMDISK_LUN_UNCONFIGURED;
817 if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) {
820 STAILQ_REMOVE(&softc->lun_list, lun, ctl_be_ramdisk_lun,
826 mtx_unlock(&softc->lock);
829 free(be_lun, M_RAMDISK);
833 ctl_backend_ramdisk_lun_config_status(void *be_lun,
834 ctl_lun_config_status status)
836 struct ctl_be_ramdisk_lun *lun;
837 struct ctl_be_ramdisk_softc *softc;
839 lun = (struct ctl_be_ramdisk_lun *)be_lun;
842 if (status == CTL_LUN_CONFIG_OK) {
843 mtx_lock(&softc->lock);
844 lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED;
845 if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING)
847 mtx_unlock(&softc->lock);
850 * We successfully added the LUN, attempt to enable it.
852 if (ctl_enable_lun(&lun->ctl_be_lun) != 0) {
853 printf("%s: ctl_enable_lun() failed!\n", __func__);
854 if (ctl_invalidate_lun(&lun->ctl_be_lun) != 0) {
855 printf("%s: ctl_invalidate_lun() failed!\n",
864 mtx_lock(&softc->lock);
865 lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED;
868 * If we have a user waiting, let him handle the cleanup. If not,
869 * clean things up here.
871 if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) {
872 lun->flags |= CTL_BE_RAMDISK_LUN_CONFIG_ERR;
875 STAILQ_REMOVE(&softc->lun_list, lun, ctl_be_ramdisk_lun,
878 free(lun, M_RAMDISK);
880 mtx_unlock(&softc->lock);
884 ctl_backend_ramdisk_config_write(union ctl_io *io)
886 struct ctl_be_ramdisk_softc *softc;
892 switch (io->scsiio.cdb[0]) {
893 case SYNCHRONIZE_CACHE:
894 case SYNCHRONIZE_CACHE_16:
896 * The upper level CTL code will filter out any CDBs with
897 * the immediate bit set and return the proper error. It
898 * will also not allow a sync cache command to go to a LUN
899 * that is powered down.
901 * We don't really need to worry about what LBA range the
902 * user asked to be synced out. When they issue a sync
903 * cache command, we'll sync out the whole thing.
905 * This is obviously just a stubbed out implementation.
906 * The real implementation will be in the RAIDCore/CTL
907 * interface, and can only really happen when RAIDCore
908 * implements a per-array cache sync.
910 ctl_set_success(&io->scsiio);
911 ctl_config_write_done(io);
913 case START_STOP_UNIT: {
914 struct scsi_start_stop_unit *cdb;
915 struct ctl_be_lun *ctl_be_lun;
916 struct ctl_be_ramdisk_lun *be_lun;
918 cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb;
920 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
921 CTL_PRIV_BACKEND_LUN].ptr;
922 be_lun = (struct ctl_be_ramdisk_lun *)ctl_be_lun->be_lun;
924 if (cdb->how & SSS_START)
925 retval = ctl_start_lun(ctl_be_lun);
927 retval = ctl_stop_lun(ctl_be_lun);
930 && (cdb->byte2 & SSS_ONOFFLINE))
931 retval = ctl_lun_offline(ctl_be_lun);
936 * In general, the above routines should not fail. They
937 * just set state for the LUN. So we've got something
938 * pretty wrong here if we can't start or stop the LUN.
941 ctl_set_internal_failure(&io->scsiio,
943 /*retry_count*/ 0xf051);
944 retval = CTL_RETVAL_COMPLETE;
946 ctl_set_success(&io->scsiio);
948 ctl_config_write_done(io);
954 ctl_set_success(&io->scsiio);
955 ctl_config_write_done(io);
958 ctl_set_invalid_opcode(&io->scsiio);
959 ctl_config_write_done(io);
960 retval = CTL_RETVAL_COMPLETE;
968 ctl_backend_ramdisk_config_read(union ctl_io *io)
972 switch (io->scsiio.cdb[0]) {
973 case SERVICE_ACTION_IN:
974 if (io->scsiio.cdb[1] == SGLS_SERVICE_ACTION) {
975 /* We have nothing to tell, leave default data. */
976 ctl_config_read_done(io);
977 retval = CTL_RETVAL_COMPLETE;
980 ctl_set_invalid_field(&io->scsiio,
986 ctl_config_read_done(io);
987 retval = CTL_RETVAL_COMPLETE;
990 ctl_set_invalid_opcode(&io->scsiio);
991 ctl_config_read_done(io);
992 retval = CTL_RETVAL_COMPLETE;