2 * Copyright (c) 2003, 2008 Silicon Graphics International Corp.
3 * Copyright (c) 2012 The FreeBSD Foundation
6 * Portions of this software were developed by Edward Tomasz Napierala
7 * under sponsorship from the FreeBSD Foundation.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions, and the following disclaimer,
14 * without modification.
15 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
16 * substantially similar to the "NO WARRANTY" disclaimer below
17 * ("Disclaimer") and any redistribution must be conditioned upon
18 * including a substantially similar Disclaimer requirement for further
19 * binary redistribution.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
25 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
30 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
31 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGES.
34 * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_ramdisk.c#3 $
37 * CAM Target Layer backend for a "fake" ramdisk.
39 * Author: Ken Merry <ken@FreeBSD.org>
42 #include <sys/cdefs.h>
43 __FBSDID("$FreeBSD$");
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/condvar.h>
49 #include <sys/types.h>
51 #include <sys/mutex.h>
52 #include <sys/malloc.h>
53 #include <sys/taskqueue.h>
55 #include <sys/queue.h>
57 #include <sys/ioccom.h>
58 #include <sys/module.h>
59 #include <sys/sysctl.h>
61 #include <cam/scsi/scsi_all.h>
62 #include <cam/scsi/scsi_da.h>
63 #include <cam/ctl/ctl_io.h>
64 #include <cam/ctl/ctl.h>
65 #include <cam/ctl/ctl_util.h>
66 #include <cam/ctl/ctl_backend.h>
67 #include <cam/ctl/ctl_debug.h>
68 #include <cam/ctl/ctl_ioctl.h>
69 #include <cam/ctl/ctl_ha.h>
70 #include <cam/ctl/ctl_private.h>
71 #include <cam/ctl/ctl_error.h>
74 CTL_BE_RAMDISK_LUN_UNCONFIGURED = 0x01,
75 CTL_BE_RAMDISK_LUN_CONFIG_ERR = 0x02,
76 CTL_BE_RAMDISK_LUN_WAITING = 0x04
77 } ctl_be_ramdisk_lun_flags;
79 struct ctl_be_ramdisk_lun {
80 struct ctl_lun_create_params params;
84 struct ctl_be_ramdisk_softc *softc;
85 ctl_be_ramdisk_lun_flags flags;
86 STAILQ_ENTRY(ctl_be_ramdisk_lun) links;
87 struct ctl_be_lun cbe_lun;
88 struct taskqueue *io_taskqueue;
90 STAILQ_HEAD(, ctl_io_hdr) cont_queue;
91 struct mtx_padalign queue_lock;
94 struct ctl_be_ramdisk_softc {
97 #ifdef CTL_RAMDISK_PAGES
98 uint8_t **ramdisk_pages;
101 uint8_t *ramdisk_buffer;
104 STAILQ_HEAD(, ctl_be_ramdisk_lun) lun_list;
107 static struct ctl_be_ramdisk_softc rd_softc;
108 extern struct ctl_softc *control_softc;
110 int ctl_backend_ramdisk_init(void);
111 void ctl_backend_ramdisk_shutdown(void);
112 static int ctl_backend_ramdisk_move_done(union ctl_io *io);
113 static int ctl_backend_ramdisk_submit(union ctl_io *io);
114 static void ctl_backend_ramdisk_continue(union ctl_io *io);
115 static int ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd,
116 caddr_t addr, int flag, struct thread *td);
117 static int ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc,
118 struct ctl_lun_req *req);
119 static int ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
120 struct ctl_lun_req *req);
121 static int ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc,
122 struct ctl_lun_req *req);
123 static void ctl_backend_ramdisk_worker(void *context, int pending);
124 static void ctl_backend_ramdisk_lun_shutdown(void *be_lun);
125 static void ctl_backend_ramdisk_lun_config_status(void *be_lun,
126 ctl_lun_config_status status);
127 static int ctl_backend_ramdisk_config_write(union ctl_io *io);
128 static int ctl_backend_ramdisk_config_read(union ctl_io *io);
130 static struct ctl_backend_driver ctl_be_ramdisk_driver =
133 .flags = CTL_BE_FLAG_HAS_CONFIG,
134 .init = ctl_backend_ramdisk_init,
135 .data_submit = ctl_backend_ramdisk_submit,
136 .data_move_done = ctl_backend_ramdisk_move_done,
137 .config_read = ctl_backend_ramdisk_config_read,
138 .config_write = ctl_backend_ramdisk_config_write,
139 .ioctl = ctl_backend_ramdisk_ioctl
142 MALLOC_DEFINE(M_RAMDISK, "ramdisk", "Memory used for CTL RAMdisk");
143 CTL_BACKEND_DECLARE(cbr, ctl_be_ramdisk_driver);
146 ctl_backend_ramdisk_init(void)
148 struct ctl_be_ramdisk_softc *softc;
149 #ifdef CTL_RAMDISK_PAGES
156 memset(softc, 0, sizeof(*softc));
158 mtx_init(&softc->lock, "ctlramdisk", NULL, MTX_DEF);
160 STAILQ_INIT(&softc->lun_list);
161 softc->rd_size = 1024 * 1024;
162 #ifdef CTL_RAMDISK_PAGES
163 softc->num_pages = softc->rd_size / PAGE_SIZE;
164 softc->ramdisk_pages = (uint8_t **)malloc(sizeof(uint8_t *) *
165 softc->num_pages, M_RAMDISK,
167 for (i = 0; i < softc->num_pages; i++)
168 softc->ramdisk_pages[i] = malloc(PAGE_SIZE, M_RAMDISK,M_WAITOK);
170 softc->ramdisk_buffer = (uint8_t *)malloc(softc->rd_size, M_RAMDISK,
178 ctl_backend_ramdisk_shutdown(void)
180 struct ctl_be_ramdisk_softc *softc;
181 struct ctl_be_ramdisk_lun *lun, *next_lun;
182 #ifdef CTL_RAMDISK_PAGES
188 mtx_lock(&softc->lock);
189 for (lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; lun = next_lun){
191 * Grab the next LUN. The current LUN may get removed by
192 * ctl_invalidate_lun(), which will call our LUN shutdown
193 * routine, if there is no outstanding I/O for this LUN.
195 next_lun = STAILQ_NEXT(lun, links);
198 * Drop our lock here. Since ctl_invalidate_lun() can call
199 * back into us, this could potentially lead to a recursive
200 * lock of the same mutex, which would cause a hang.
202 mtx_unlock(&softc->lock);
203 ctl_disable_lun(&lun->cbe_lun);
204 ctl_invalidate_lun(&lun->cbe_lun);
205 mtx_lock(&softc->lock);
207 mtx_unlock(&softc->lock);
209 #ifdef CTL_RAMDISK_PAGES
210 for (i = 0; i < softc->num_pages; i++)
211 free(softc->ramdisk_pages[i], M_RAMDISK);
213 free(softc->ramdisk_pages, M_RAMDISK);
215 free(softc->ramdisk_buffer, M_RAMDISK);
218 if (ctl_backend_deregister(&ctl_be_ramdisk_driver) != 0) {
219 printf("ctl_backend_ramdisk_shutdown: "
220 "ctl_backend_deregister() failed!\n");
225 ctl_backend_ramdisk_move_done(union ctl_io *io)
227 struct ctl_be_lun *cbe_lun;
228 struct ctl_be_ramdisk_lun *be_lun;
230 struct bintime cur_bt;
233 CTL_DEBUG_PRINT(("ctl_backend_ramdisk_move_done\n"));
234 cbe_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
235 CTL_PRIV_BACKEND_LUN].ptr;
236 be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun->be_lun;
239 bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt);
240 bintime_add(&io->io_hdr.dma_bt, &cur_bt);
241 io->io_hdr.num_dmas++;
243 if (io->scsiio.kern_sg_entries > 0)
244 free(io->scsiio.kern_data_ptr, M_RAMDISK);
245 io->scsiio.kern_rel_offset += io->scsiio.kern_data_len;
246 if (io->io_hdr.flags & CTL_FLAG_ABORT) {
248 } else if ((io->io_hdr.port_status == 0) &&
249 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) {
250 if (io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer > 0) {
251 mtx_lock(&be_lun->queue_lock);
252 STAILQ_INSERT_TAIL(&be_lun->cont_queue,
254 mtx_unlock(&be_lun->queue_lock);
255 taskqueue_enqueue(be_lun->io_taskqueue,
259 ctl_set_success(&io->scsiio);
260 } else if ((io->io_hdr.port_status != 0) &&
261 ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
262 (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
264 * For hardware error sense keys, the sense key
265 * specific value is defined to be a retry count,
266 * but we use it to pass back an internal FETD
267 * error code. XXX KDM Hopefully the FETD is only
268 * using 16 bits for an error code, since that's
269 * all the space we have in the sks field.
271 ctl_set_internal_failure(&io->scsiio,
274 io->io_hdr.port_status);
276 ctl_data_submit_done(io);
281 ctl_backend_ramdisk_submit(union ctl_io *io)
283 struct ctl_be_lun *cbe_lun;
284 struct ctl_lba_len_flags *lbalen;
286 cbe_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
287 CTL_PRIV_BACKEND_LUN].ptr;
288 lbalen = (struct ctl_lba_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
289 if (lbalen->flags & CTL_LLF_VERIFY) {
290 ctl_set_success(&io->scsiio);
291 ctl_data_submit_done(io);
292 return (CTL_RETVAL_COMPLETE);
294 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer =
295 lbalen->len * cbe_lun->blocksize;
296 ctl_backend_ramdisk_continue(io);
297 return (CTL_RETVAL_COMPLETE);
301 ctl_backend_ramdisk_continue(union ctl_io *io)
303 struct ctl_be_ramdisk_softc *softc;
304 int len, len_filled, sg_filled;
305 #ifdef CTL_RAMDISK_PAGES
306 struct ctl_sg_entry *sg_entries;
311 len = io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer;
312 #ifdef CTL_RAMDISK_PAGES
313 sg_filled = min(btoc(len), softc->num_pages);
315 io->scsiio.kern_data_ptr = malloc(sizeof(struct ctl_sg_entry) *
316 sg_filled, M_RAMDISK,
318 sg_entries = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
319 for (i = 0, len_filled = 0; i < sg_filled; i++) {
320 sg_entries[i].addr = softc->ramdisk_pages[i];
321 sg_entries[i].len = MIN(PAGE_SIZE, len - len_filled);
322 len_filled += sg_entries[i].len;
327 io->scsiio.kern_data_ptr = softc->ramdisk_pages[0];
331 len_filled = min(len, softc->rd_size);
332 io->scsiio.kern_data_ptr = softc->ramdisk_buffer;
333 #endif /* CTL_RAMDISK_PAGES */
335 io->scsiio.be_move_done = ctl_backend_ramdisk_move_done;
336 io->scsiio.kern_data_resid = 0;
337 io->scsiio.kern_data_len = len_filled;
338 io->scsiio.kern_sg_entries = sg_filled;
339 io->io_hdr.flags |= CTL_FLAG_ALLOCATED;
340 io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer -= len_filled;
342 getbintime(&io->io_hdr.dma_start_bt);
348 ctl_backend_ramdisk_worker(void *context, int pending)
350 struct ctl_be_ramdisk_softc *softc;
351 struct ctl_be_ramdisk_lun *be_lun;
354 be_lun = (struct ctl_be_ramdisk_lun *)context;
355 softc = be_lun->softc;
357 mtx_lock(&be_lun->queue_lock);
359 io = (union ctl_io *)STAILQ_FIRST(&be_lun->cont_queue);
361 STAILQ_REMOVE(&be_lun->cont_queue, &io->io_hdr,
364 mtx_unlock(&be_lun->queue_lock);
366 ctl_backend_ramdisk_continue(io);
368 mtx_lock(&be_lun->queue_lock);
373 * If we get here, there is no work left in the queues, so
374 * just break out and let the task queue go to sleep.
378 mtx_unlock(&be_lun->queue_lock);
382 ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, caddr_t addr,
383 int flag, struct thread *td)
385 struct ctl_be_ramdisk_softc *softc;
393 struct ctl_lun_req *lun_req;
395 lun_req = (struct ctl_lun_req *)addr;
397 switch (lun_req->reqtype) {
398 case CTL_LUNREQ_CREATE:
399 retval = ctl_backend_ramdisk_create(softc, lun_req);
402 retval = ctl_backend_ramdisk_rm(softc, lun_req);
404 case CTL_LUNREQ_MODIFY:
405 retval = ctl_backend_ramdisk_modify(softc, lun_req);
408 lun_req->status = CTL_LUN_ERROR;
409 snprintf(lun_req->error_str, sizeof(lun_req->error_str),
410 "%s: invalid LUN request type %d", __func__,
425 ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc,
426 struct ctl_lun_req *req)
428 struct ctl_be_ramdisk_lun *be_lun;
429 struct ctl_lun_rm_params *params;
434 params = &req->reqdata.rm;
438 mtx_lock(&softc->lock);
440 STAILQ_FOREACH(be_lun, &softc->lun_list, links) {
441 if (be_lun->cbe_lun.lun_id == params->lun_id)
444 mtx_unlock(&softc->lock);
446 if (be_lun == NULL) {
447 snprintf(req->error_str, sizeof(req->error_str),
448 "%s: LUN %u is not managed by the ramdisk backend",
449 __func__, params->lun_id);
453 retval = ctl_disable_lun(&be_lun->cbe_lun);
456 snprintf(req->error_str, sizeof(req->error_str),
457 "%s: error %d returned from ctl_disable_lun() for "
458 "LUN %d", __func__, retval, params->lun_id);
463 * Set the waiting flag before we invalidate the LUN. Our shutdown
464 * routine can be called any time after we invalidate the LUN,
465 * and can be called from our context.
467 * This tells the shutdown routine that we're waiting, or we're
468 * going to wait for the shutdown to happen.
470 mtx_lock(&softc->lock);
471 be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING;
472 mtx_unlock(&softc->lock);
474 retval = ctl_invalidate_lun(&be_lun->cbe_lun);
476 snprintf(req->error_str, sizeof(req->error_str),
477 "%s: error %d returned from ctl_invalidate_lun() for "
478 "LUN %d", __func__, retval, params->lun_id);
479 mtx_lock(&softc->lock);
480 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
481 mtx_unlock(&softc->lock);
485 mtx_lock(&softc->lock);
487 while ((be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) == 0) {
488 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0);
492 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
495 * We only remove this LUN from the list and free it (below) if
496 * retval == 0. If the user interrupted the wait, we just bail out
497 * without actually freeing the LUN. We let the shutdown routine
498 * free the LUN if that happens.
501 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
506 mtx_unlock(&softc->lock);
509 taskqueue_drain_all(be_lun->io_taskqueue);
510 taskqueue_free(be_lun->io_taskqueue);
511 ctl_free_opts(&be_lun->cbe_lun.options);
512 mtx_destroy(&be_lun->queue_lock);
513 free(be_lun, M_RAMDISK);
516 req->status = CTL_LUN_OK;
521 req->status = CTL_LUN_ERROR;
527 ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
528 struct ctl_lun_req *req)
530 struct ctl_be_ramdisk_lun *be_lun;
531 struct ctl_be_lun *cbe_lun;
532 struct ctl_lun_create_params *params;
538 params = &req->reqdata.create;
540 be_lun = malloc(sizeof(*be_lun), M_RAMDISK, M_ZERO | M_WAITOK);
541 cbe_lun = &be_lun->cbe_lun;
542 cbe_lun->be_lun = be_lun;
543 be_lun->params = req->reqdata.create;
544 be_lun->softc = softc;
545 sprintf(be_lun->lunname, "cram%d", softc->num_luns);
546 ctl_init_opts(&cbe_lun->options, req->num_be_args, req->kern_be_args);
548 if (params->flags & CTL_LUN_FLAG_DEV_TYPE)
549 cbe_lun->lun_type = params->device_type;
551 cbe_lun->lun_type = T_DIRECT;
552 be_lun->flags = CTL_BE_RAMDISK_LUN_UNCONFIGURED;
554 value = ctl_get_opt(&cbe_lun->options, "ha_role");
556 if (strcmp(value, "primary") == 0)
557 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
558 } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF)
559 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
561 if (cbe_lun->lun_type == T_DIRECT) {
562 if (params->blocksize_bytes != 0)
563 cbe_lun->blocksize = params->blocksize_bytes;
565 cbe_lun->blocksize = 512;
566 if (params->lun_size_bytes < cbe_lun->blocksize) {
567 snprintf(req->error_str, sizeof(req->error_str),
568 "%s: LUN size %ju < blocksize %u", __func__,
569 params->lun_size_bytes, cbe_lun->blocksize);
572 be_lun->size_blocks = params->lun_size_bytes / cbe_lun->blocksize;
573 be_lun->size_bytes = be_lun->size_blocks * cbe_lun->blocksize;
574 cbe_lun->maxlba = be_lun->size_blocks - 1;
575 cbe_lun->atomicblock = UINT32_MAX;
576 cbe_lun->opttxferlen = softc->rd_size / cbe_lun->blocksize;
579 /* Tell the user the blocksize we ended up using */
580 params->blocksize_bytes = cbe_lun->blocksize;
581 params->lun_size_bytes = be_lun->size_bytes;
583 value = ctl_get_opt(&cbe_lun->options, "unmap");
584 if (value != NULL && strcmp(value, "on") == 0)
585 cbe_lun->flags |= CTL_LUN_FLAG_UNMAP;
586 value = ctl_get_opt(&cbe_lun->options, "readonly");
587 if (value != NULL && strcmp(value, "on") == 0)
588 cbe_lun->flags |= CTL_LUN_FLAG_READONLY;
589 cbe_lun->serseq = CTL_LUN_SERSEQ_OFF;
590 value = ctl_get_opt(&cbe_lun->options, "serseq");
591 if (value != NULL && strcmp(value, "on") == 0)
592 cbe_lun->serseq = CTL_LUN_SERSEQ_ON;
593 else if (value != NULL && strcmp(value, "read") == 0)
594 cbe_lun->serseq = CTL_LUN_SERSEQ_READ;
595 else if (value != NULL && strcmp(value, "off") == 0)
596 cbe_lun->serseq = CTL_LUN_SERSEQ_OFF;
598 if (params->flags & CTL_LUN_FLAG_ID_REQ) {
599 cbe_lun->req_lun_id = params->req_lun_id;
600 cbe_lun->flags |= CTL_LUN_FLAG_ID_REQ;
602 cbe_lun->req_lun_id = 0;
604 cbe_lun->lun_shutdown = ctl_backend_ramdisk_lun_shutdown;
605 cbe_lun->lun_config_status = ctl_backend_ramdisk_lun_config_status;
606 cbe_lun->be = &ctl_be_ramdisk_driver;
607 if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) {
608 snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%4d",
610 strncpy((char *)cbe_lun->serial_num, tmpstr,
611 MIN(sizeof(cbe_lun->serial_num), sizeof(tmpstr)));
613 /* Tell the user what we used for a serial number */
614 strncpy((char *)params->serial_num, tmpstr,
615 MIN(sizeof(params->serial_num), sizeof(tmpstr)));
617 strncpy((char *)cbe_lun->serial_num, params->serial_num,
618 MIN(sizeof(cbe_lun->serial_num),
619 sizeof(params->serial_num)));
621 if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) {
622 snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%4d", softc->num_luns);
623 strncpy((char *)cbe_lun->device_id, tmpstr,
624 MIN(sizeof(cbe_lun->device_id), sizeof(tmpstr)));
626 /* Tell the user what we used for a device ID */
627 strncpy((char *)params->device_id, tmpstr,
628 MIN(sizeof(params->device_id), sizeof(tmpstr)));
630 strncpy((char *)cbe_lun->device_id, params->device_id,
631 MIN(sizeof(cbe_lun->device_id),
632 sizeof(params->device_id)));
635 STAILQ_INIT(&be_lun->cont_queue);
636 mtx_init(&be_lun->queue_lock, "cram queue lock", NULL, MTX_DEF);
637 TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_backend_ramdisk_worker,
640 be_lun->io_taskqueue = taskqueue_create(be_lun->lunname, M_WAITOK,
641 taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue);
642 if (be_lun->io_taskqueue == NULL) {
643 snprintf(req->error_str, sizeof(req->error_str),
644 "%s: Unable to create taskqueue", __func__);
648 retval = taskqueue_start_threads(&be_lun->io_taskqueue,
652 "%s taskq", be_lun->lunname);
656 mtx_lock(&softc->lock);
658 STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links);
660 mtx_unlock(&softc->lock);
662 retval = ctl_add_lun(&be_lun->cbe_lun);
664 mtx_lock(&softc->lock);
665 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
668 mtx_unlock(&softc->lock);
669 snprintf(req->error_str, sizeof(req->error_str),
670 "%s: ctl_add_lun() returned error %d, see dmesg for "
671 "details", __func__, retval);
676 mtx_lock(&softc->lock);
679 * Tell the config_status routine that we're waiting so it won't
680 * clean up the LUN in the event of an error.
682 be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING;
684 while (be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) {
685 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0);
689 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
691 if (be_lun->flags & CTL_BE_RAMDISK_LUN_CONFIG_ERR) {
692 snprintf(req->error_str, sizeof(req->error_str),
693 "%s: LUN configuration error, see dmesg for details",
695 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
698 mtx_unlock(&softc->lock);
701 params->req_lun_id = cbe_lun->lun_id;
703 mtx_unlock(&softc->lock);
705 req->status = CTL_LUN_OK;
710 req->status = CTL_LUN_ERROR;
711 if (be_lun != NULL) {
712 if (be_lun->io_taskqueue != NULL) {
713 taskqueue_free(be_lun->io_taskqueue);
715 ctl_free_opts(&cbe_lun->options);
716 mtx_destroy(&be_lun->queue_lock);
717 free(be_lun, M_RAMDISK);
724 ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc,
725 struct ctl_lun_req *req)
727 struct ctl_be_ramdisk_lun *be_lun;
728 struct ctl_be_lun *cbe_lun;
729 struct ctl_lun_modify_params *params;
734 params = &req->reqdata.modify;
736 mtx_lock(&softc->lock);
737 STAILQ_FOREACH(be_lun, &softc->lun_list, links) {
738 if (be_lun->cbe_lun.lun_id == params->lun_id)
741 mtx_unlock(&softc->lock);
743 if (be_lun == NULL) {
744 snprintf(req->error_str, sizeof(req->error_str),
745 "%s: LUN %u is not managed by the ramdisk backend",
746 __func__, params->lun_id);
749 cbe_lun = &be_lun->cbe_lun;
751 if (params->lun_size_bytes != 0)
752 be_lun->params.lun_size_bytes = params->lun_size_bytes;
753 ctl_update_opts(&cbe_lun->options, req->num_be_args, req->kern_be_args);
755 wasprim = (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY);
756 value = ctl_get_opt(&cbe_lun->options, "ha_role");
758 if (strcmp(value, "primary") == 0)
759 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
761 cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY;
762 } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF)
763 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
765 cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY;
766 if (wasprim != (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)) {
767 if (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)
768 ctl_lun_primary(cbe_lun);
770 ctl_lun_secondary(cbe_lun);
773 blocksize = be_lun->cbe_lun.blocksize;
774 if (be_lun->params.lun_size_bytes < blocksize) {
775 snprintf(req->error_str, sizeof(req->error_str),
776 "%s: LUN size %ju < blocksize %u", __func__,
777 be_lun->params.lun_size_bytes, blocksize);
780 be_lun->size_blocks = be_lun->params.lun_size_bytes / blocksize;
781 be_lun->size_bytes = be_lun->size_blocks * blocksize;
782 be_lun->cbe_lun.maxlba = be_lun->size_blocks - 1;
783 ctl_lun_capacity_changed(&be_lun->cbe_lun);
785 /* Tell the user the exact size we ended up using */
786 params->lun_size_bytes = be_lun->size_bytes;
788 req->status = CTL_LUN_OK;
793 req->status = CTL_LUN_ERROR;
799 ctl_backend_ramdisk_lun_shutdown(void *be_lun)
801 struct ctl_be_ramdisk_lun *lun;
802 struct ctl_be_ramdisk_softc *softc;
805 lun = (struct ctl_be_ramdisk_lun *)be_lun;
809 mtx_lock(&softc->lock);
811 lun->flags |= CTL_BE_RAMDISK_LUN_UNCONFIGURED;
813 if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) {
816 STAILQ_REMOVE(&softc->lun_list, lun, ctl_be_ramdisk_lun,
822 mtx_unlock(&softc->lock);
825 free(be_lun, M_RAMDISK);
829 ctl_backend_ramdisk_lun_config_status(void *be_lun,
830 ctl_lun_config_status status)
832 struct ctl_be_ramdisk_lun *lun;
833 struct ctl_be_ramdisk_softc *softc;
835 lun = (struct ctl_be_ramdisk_lun *)be_lun;
838 if (status == CTL_LUN_CONFIG_OK) {
839 mtx_lock(&softc->lock);
840 lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED;
841 if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING)
843 mtx_unlock(&softc->lock);
846 * We successfully added the LUN, attempt to enable it.
848 if (ctl_enable_lun(&lun->cbe_lun) != 0) {
849 printf("%s: ctl_enable_lun() failed!\n", __func__);
850 if (ctl_invalidate_lun(&lun->cbe_lun) != 0) {
851 printf("%s: ctl_invalidate_lun() failed!\n",
860 mtx_lock(&softc->lock);
861 lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED;
864 * If we have a user waiting, let him handle the cleanup. If not,
865 * clean things up here.
867 if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) {
868 lun->flags |= CTL_BE_RAMDISK_LUN_CONFIG_ERR;
871 STAILQ_REMOVE(&softc->lun_list, lun, ctl_be_ramdisk_lun,
874 free(lun, M_RAMDISK);
876 mtx_unlock(&softc->lock);
880 ctl_backend_ramdisk_config_write(union ctl_io *io)
882 struct ctl_be_ramdisk_softc *softc;
888 switch (io->scsiio.cdb[0]) {
889 case SYNCHRONIZE_CACHE:
890 case SYNCHRONIZE_CACHE_16:
892 * The upper level CTL code will filter out any CDBs with
893 * the immediate bit set and return the proper error. It
894 * will also not allow a sync cache command to go to a LUN
895 * that is powered down.
897 * We don't really need to worry about what LBA range the
898 * user asked to be synced out. When they issue a sync
899 * cache command, we'll sync out the whole thing.
901 * This is obviously just a stubbed out implementation.
902 * The real implementation will be in the RAIDCore/CTL
903 * interface, and can only really happen when RAIDCore
904 * implements a per-array cache sync.
906 ctl_set_success(&io->scsiio);
907 ctl_config_write_done(io);
909 case START_STOP_UNIT: {
910 struct scsi_start_stop_unit *cdb;
911 struct ctl_be_lun *cbe_lun;
912 struct ctl_be_ramdisk_lun *be_lun;
914 cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb;
916 cbe_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
917 CTL_PRIV_BACKEND_LUN].ptr;
918 be_lun = (struct ctl_be_ramdisk_lun *)cbe_lun->be_lun;
920 if (cdb->how & SSS_START)
921 retval = ctl_start_lun(cbe_lun);
923 retval = ctl_stop_lun(cbe_lun);
926 && (cdb->byte2 & SSS_ONOFFLINE))
927 retval = ctl_lun_offline(cbe_lun);
932 * In general, the above routines should not fail. They
933 * just set state for the LUN. So we've got something
934 * pretty wrong here if we can't start or stop the LUN.
937 ctl_set_internal_failure(&io->scsiio,
939 /*retry_count*/ 0xf051);
940 retval = CTL_RETVAL_COMPLETE;
942 ctl_set_success(&io->scsiio);
944 ctl_config_write_done(io);
950 ctl_set_success(&io->scsiio);
951 ctl_config_write_done(io);
954 ctl_set_invalid_opcode(&io->scsiio);
955 ctl_config_write_done(io);
956 retval = CTL_RETVAL_COMPLETE;
964 ctl_backend_ramdisk_config_read(union ctl_io *io)
968 switch (io->scsiio.cdb[0]) {
969 case SERVICE_ACTION_IN:
970 if (io->scsiio.cdb[1] == SGLS_SERVICE_ACTION) {
971 /* We have nothing to tell, leave default data. */
972 ctl_config_read_done(io);
973 retval = CTL_RETVAL_COMPLETE;
976 ctl_set_invalid_field(&io->scsiio,
982 ctl_config_read_done(io);
983 retval = CTL_RETVAL_COMPLETE;
986 ctl_set_invalid_opcode(&io->scsiio);
987 ctl_config_read_done(io);
988 retval = CTL_RETVAL_COMPLETE;