]> CyberLeo.Net >> Repos - FreeBSD/stable/10.git/blob - sys/cam/ctl/ctl_backend_ramdisk.c
MFC r286806: Drop "internal" CTL frontend.
[FreeBSD/stable/10.git] / sys / cam / ctl / ctl_backend_ramdisk.c
1 /*-
2  * Copyright (c) 2003, 2008 Silicon Graphics International Corp.
3  * Copyright (c) 2012 The FreeBSD Foundation
4  * All rights reserved.
5  *
6  * Portions of this software were developed by Edward Tomasz Napierala
7  * under sponsorship from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions, and the following disclaimer,
14  *    without modification.
15  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
16  *    substantially similar to the "NO WARRANTY" disclaimer below
17  *    ("Disclaimer") and any redistribution must be conditioned upon
18  *    including a substantially similar Disclaimer requirement for further
19  *    binary redistribution.
20  *
21  * NO WARRANTY
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
25  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
26  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
30  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
31  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32  * POSSIBILITY OF SUCH DAMAGES.
33  *
34  * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_ramdisk.c#3 $
35  */
36 /*
37  * CAM Target Layer backend for a "fake" ramdisk.
38  *
39  * Author: Ken Merry <ken@FreeBSD.org>
40  */
41
42 #include <sys/cdefs.h>
43 __FBSDID("$FreeBSD$");
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/kernel.h>
48 #include <sys/condvar.h>
49 #include <sys/types.h>
50 #include <sys/lock.h>
51 #include <sys/mutex.h>
52 #include <sys/malloc.h>
53 #include <sys/taskqueue.h>
54 #include <sys/time.h>
55 #include <sys/queue.h>
56 #include <sys/conf.h>
57 #include <sys/ioccom.h>
58 #include <sys/module.h>
59
60 #include <cam/scsi/scsi_all.h>
61 #include <cam/ctl/ctl_io.h>
62 #include <cam/ctl/ctl.h>
63 #include <cam/ctl/ctl_util.h>
64 #include <cam/ctl/ctl_backend.h>
65 #include <cam/ctl/ctl_debug.h>
66 #include <cam/ctl/ctl_ioctl.h>
67 #include <cam/ctl/ctl_error.h>
68
69 typedef enum {
70         CTL_BE_RAMDISK_LUN_UNCONFIGURED = 0x01,
71         CTL_BE_RAMDISK_LUN_CONFIG_ERR   = 0x02,
72         CTL_BE_RAMDISK_LUN_WAITING      = 0x04
73 } ctl_be_ramdisk_lun_flags;
74
75 struct ctl_be_ramdisk_lun {
76         char lunname[32];
77         uint64_t size_bytes;
78         uint64_t size_blocks;
79         struct ctl_be_ramdisk_softc *softc;
80         ctl_be_ramdisk_lun_flags flags;
81         STAILQ_ENTRY(ctl_be_ramdisk_lun) links;
82         struct ctl_be_lun ctl_be_lun;
83         struct taskqueue *io_taskqueue;
84         struct task io_task;
85         STAILQ_HEAD(, ctl_io_hdr) cont_queue;
86         struct mtx_padalign queue_lock;
87 };
88
89 struct ctl_be_ramdisk_softc {
90         struct mtx lock;
91         int rd_size;
92 #ifdef CTL_RAMDISK_PAGES
93         uint8_t **ramdisk_pages;
94         int num_pages;
95 #else
96         uint8_t *ramdisk_buffer;
97 #endif
98         int num_luns;
99         STAILQ_HEAD(, ctl_be_ramdisk_lun) lun_list;
100 };
101
102 static struct ctl_be_ramdisk_softc rd_softc;
103
104 int ctl_backend_ramdisk_init(void);
105 void ctl_backend_ramdisk_shutdown(void);
106 static int ctl_backend_ramdisk_move_done(union ctl_io *io);
107 static int ctl_backend_ramdisk_submit(union ctl_io *io);
108 static void ctl_backend_ramdisk_continue(union ctl_io *io);
109 static int ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd,
110                                      caddr_t addr, int flag, struct thread *td);
111 static int ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc,
112                                   struct ctl_lun_req *req);
113 static int ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
114                                       struct ctl_lun_req *req, int do_wait);
115 static int ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc,
116                                   struct ctl_lun_req *req);
117 static void ctl_backend_ramdisk_worker(void *context, int pending);
118 static void ctl_backend_ramdisk_lun_shutdown(void *be_lun);
119 static void ctl_backend_ramdisk_lun_config_status(void *be_lun,
120                                                   ctl_lun_config_status status);
121 static int ctl_backend_ramdisk_config_write(union ctl_io *io);
122 static int ctl_backend_ramdisk_config_read(union ctl_io *io);
123
124 static struct ctl_backend_driver ctl_be_ramdisk_driver = 
125 {
126         .name = "ramdisk",
127         .flags = CTL_BE_FLAG_HAS_CONFIG,
128         .init = ctl_backend_ramdisk_init,
129         .data_submit = ctl_backend_ramdisk_submit,
130         .data_move_done = ctl_backend_ramdisk_move_done,
131         .config_read = ctl_backend_ramdisk_config_read,
132         .config_write = ctl_backend_ramdisk_config_write,
133         .ioctl = ctl_backend_ramdisk_ioctl
134 };
135
136 MALLOC_DEFINE(M_RAMDISK, "ramdisk", "Memory used for CTL RAMdisk");
137 CTL_BACKEND_DECLARE(cbr, ctl_be_ramdisk_driver);
138
139 int
140 ctl_backend_ramdisk_init(void)
141 {
142         struct ctl_be_ramdisk_softc *softc;
143 #ifdef CTL_RAMDISK_PAGES
144         int i;
145 #endif
146
147
148         softc = &rd_softc;
149
150         memset(softc, 0, sizeof(*softc));
151
152         mtx_init(&softc->lock, "ctlramdisk", NULL, MTX_DEF);
153
154         STAILQ_INIT(&softc->lun_list);
155         softc->rd_size = 1024 * 1024;
156 #ifdef CTL_RAMDISK_PAGES
157         softc->num_pages = softc->rd_size / PAGE_SIZE;
158         softc->ramdisk_pages = (uint8_t **)malloc(sizeof(uint8_t *) *
159                                                   softc->num_pages, M_RAMDISK,
160                                                   M_WAITOK);
161         for (i = 0; i < softc->num_pages; i++)
162                 softc->ramdisk_pages[i] = malloc(PAGE_SIZE, M_RAMDISK,M_WAITOK);
163 #else
164         softc->ramdisk_buffer = (uint8_t *)malloc(softc->rd_size, M_RAMDISK,
165                                                   M_WAITOK);
166 #endif
167
168         return (0);
169 }
170
171 void
172 ctl_backend_ramdisk_shutdown(void)
173 {
174         struct ctl_be_ramdisk_softc *softc;
175         struct ctl_be_ramdisk_lun *lun, *next_lun;
176 #ifdef CTL_RAMDISK_PAGES
177         int i;
178 #endif
179
180         softc = &rd_softc;
181
182         mtx_lock(&softc->lock);
183         for (lun = STAILQ_FIRST(&softc->lun_list); lun != NULL; lun = next_lun){
184                 /*
185                  * Grab the next LUN.  The current LUN may get removed by
186                  * ctl_invalidate_lun(), which will call our LUN shutdown
187                  * routine, if there is no outstanding I/O for this LUN.
188                  */
189                 next_lun = STAILQ_NEXT(lun, links);
190
191                 /*
192                  * Drop our lock here.  Since ctl_invalidate_lun() can call
193                  * back into us, this could potentially lead to a recursive
194                  * lock of the same mutex, which would cause a hang.
195                  */
196                 mtx_unlock(&softc->lock);
197                 ctl_disable_lun(&lun->ctl_be_lun);
198                 ctl_invalidate_lun(&lun->ctl_be_lun);
199                 mtx_lock(&softc->lock);
200         }
201         mtx_unlock(&softc->lock);
202         
203 #ifdef CTL_RAMDISK_PAGES
204         for (i = 0; i < softc->num_pages; i++)
205                 free(softc->ramdisk_pages[i], M_RAMDISK);
206
207         free(softc->ramdisk_pages, M_RAMDISK);
208 #else
209         free(softc->ramdisk_buffer, M_RAMDISK);
210 #endif
211
212         if (ctl_backend_deregister(&ctl_be_ramdisk_driver) != 0) {
213                 printf("ctl_backend_ramdisk_shutdown: "
214                        "ctl_backend_deregister() failed!\n");
215         }
216 }
217
218 static int
219 ctl_backend_ramdisk_move_done(union ctl_io *io)
220 {
221         struct ctl_be_lun *ctl_be_lun;
222         struct ctl_be_ramdisk_lun *be_lun;
223 #ifdef CTL_TIME_IO
224         struct bintime cur_bt;
225 #endif
226
227         CTL_DEBUG_PRINT(("ctl_backend_ramdisk_move_done\n"));
228         ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
229                 CTL_PRIV_BACKEND_LUN].ptr;
230         be_lun = (struct ctl_be_ramdisk_lun *)ctl_be_lun->be_lun;
231 #ifdef CTL_TIME_IO
232         getbintime(&cur_bt);
233         bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt);
234         bintime_add(&io->io_hdr.dma_bt, &cur_bt);
235         io->io_hdr.num_dmas++;
236 #endif
237         if (io->scsiio.kern_sg_entries > 0)
238                 free(io->scsiio.kern_data_ptr, M_RAMDISK);
239         io->scsiio.kern_rel_offset += io->scsiio.kern_data_len;
240         if (io->io_hdr.flags & CTL_FLAG_ABORT) {
241                 ;
242         } else if ((io->io_hdr.port_status == 0) &&
243             ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) {
244                 if (io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer > 0) {
245                         mtx_lock(&be_lun->queue_lock);
246                         STAILQ_INSERT_TAIL(&be_lun->cont_queue,
247                             &io->io_hdr, links);
248                         mtx_unlock(&be_lun->queue_lock);
249                         taskqueue_enqueue(be_lun->io_taskqueue,
250                             &be_lun->io_task);
251                         return (0);
252                 }
253                 ctl_set_success(&io->scsiio);
254         } else if ((io->io_hdr.port_status != 0) &&
255             ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
256              (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
257                 /*
258                  * For hardware error sense keys, the sense key
259                  * specific value is defined to be a retry count,
260                  * but we use it to pass back an internal FETD
261                  * error code.  XXX KDM  Hopefully the FETD is only
262                  * using 16 bits for an error code, since that's
263                  * all the space we have in the sks field.
264                  */
265                 ctl_set_internal_failure(&io->scsiio,
266                                          /*sks_valid*/ 1,
267                                          /*retry_count*/
268                                          io->io_hdr.port_status);
269         }
270         ctl_data_submit_done(io);
271         return(0);
272 }
273
274 static int
275 ctl_backend_ramdisk_submit(union ctl_io *io)
276 {
277         struct ctl_be_lun *ctl_be_lun;
278         struct ctl_lba_len_flags *lbalen;
279
280         ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
281                 CTL_PRIV_BACKEND_LUN].ptr;
282         lbalen = (struct ctl_lba_len_flags *)&io->io_hdr.ctl_private[CTL_PRIV_LBA_LEN];
283         if (lbalen->flags & CTL_LLF_VERIFY) {
284                 ctl_set_success(&io->scsiio);
285                 ctl_data_submit_done(io);
286                 return (CTL_RETVAL_COMPLETE);
287         }
288         io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer =
289             lbalen->len * ctl_be_lun->blocksize;
290         ctl_backend_ramdisk_continue(io);
291         return (CTL_RETVAL_COMPLETE);
292 }
293
294 static void
295 ctl_backend_ramdisk_continue(union ctl_io *io)
296 {
297         struct ctl_be_ramdisk_softc *softc;
298         int len, len_filled, sg_filled;
299 #ifdef CTL_RAMDISK_PAGES
300         struct ctl_sg_entry *sg_entries;
301         int i;
302 #endif
303
304         softc = &rd_softc;
305         len = io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer;
306 #ifdef CTL_RAMDISK_PAGES
307         sg_filled = min(btoc(len), softc->num_pages);
308         if (sg_filled > 1) {
309                 io->scsiio.kern_data_ptr = malloc(sizeof(struct ctl_sg_entry) *
310                                                   sg_filled, M_RAMDISK,
311                                                   M_WAITOK);
312                 sg_entries = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
313                 for (i = 0, len_filled = 0; i < sg_filled; i++) {
314                         sg_entries[i].addr = softc->ramdisk_pages[i];
315                         sg_entries[i].len = MIN(PAGE_SIZE, len - len_filled);
316                         len_filled += sg_entries[i].len;
317                 }
318                 io->io_hdr.flags |= CTL_FLAG_KDPTR_SGLIST;
319         } else {
320                 sg_filled = 0;
321                 len_filled = len;
322                 io->scsiio.kern_data_ptr = softc->ramdisk_pages[0];
323         }
324 #else
325         sg_filled = 0;
326         len_filled = min(len, softc->rd_size);
327         io->scsiio.kern_data_ptr = softc->ramdisk_buffer;
328 #endif /* CTL_RAMDISK_PAGES */
329
330         io->scsiio.be_move_done = ctl_backend_ramdisk_move_done;
331         io->scsiio.kern_data_resid = 0;
332         io->scsiio.kern_data_len = len_filled;
333         io->scsiio.kern_sg_entries = sg_filled;
334         io->io_hdr.flags |= CTL_FLAG_ALLOCATED;
335         io->io_hdr.ctl_private[CTL_PRIV_BACKEND].integer -= len_filled;
336 #ifdef CTL_TIME_IO
337         getbintime(&io->io_hdr.dma_start_bt);
338 #endif
339         ctl_datamove(io);
340 }
341
342 static void
343 ctl_backend_ramdisk_worker(void *context, int pending)
344 {
345         struct ctl_be_ramdisk_softc *softc;
346         struct ctl_be_ramdisk_lun *be_lun;
347         union ctl_io *io;
348
349         be_lun = (struct ctl_be_ramdisk_lun *)context;
350         softc = be_lun->softc;
351
352         mtx_lock(&be_lun->queue_lock);
353         for (;;) {
354                 io = (union ctl_io *)STAILQ_FIRST(&be_lun->cont_queue);
355                 if (io != NULL) {
356                         STAILQ_REMOVE(&be_lun->cont_queue, &io->io_hdr,
357                                       ctl_io_hdr, links);
358
359                         mtx_unlock(&be_lun->queue_lock);
360
361                         ctl_backend_ramdisk_continue(io);
362
363                         mtx_lock(&be_lun->queue_lock);
364                         continue;
365                 }
366
367                 /*
368                  * If we get here, there is no work left in the queues, so
369                  * just break out and let the task queue go to sleep.
370                  */
371                 break;
372         }
373         mtx_unlock(&be_lun->queue_lock);
374 }
375
376 static int
377 ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, caddr_t addr,
378                           int flag, struct thread *td)
379 {
380         struct ctl_be_ramdisk_softc *softc;
381         int retval;
382
383         retval = 0;
384         softc = &rd_softc;
385
386         switch (cmd) {
387         case CTL_LUN_REQ: {
388                 struct ctl_lun_req *lun_req;
389
390                 lun_req = (struct ctl_lun_req *)addr;
391
392                 switch (lun_req->reqtype) {
393                 case CTL_LUNREQ_CREATE:
394                         retval = ctl_backend_ramdisk_create(softc, lun_req,
395                                                             /*do_wait*/ 1);
396                         break;
397                 case CTL_LUNREQ_RM:
398                         retval = ctl_backend_ramdisk_rm(softc, lun_req);
399                         break;
400                 case CTL_LUNREQ_MODIFY:
401                         retval = ctl_backend_ramdisk_modify(softc, lun_req);
402                         break;
403                 default:
404                         lun_req->status = CTL_LUN_ERROR;
405                         snprintf(lun_req->error_str, sizeof(lun_req->error_str),
406                                  "%s: invalid LUN request type %d", __func__,
407                                  lun_req->reqtype);
408                         break;
409                 }
410                 break;
411         }
412         default:
413                 retval = ENOTTY;
414                 break;
415         }
416
417         return (retval);
418 }
419
420 static int
421 ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc,
422                        struct ctl_lun_req *req)
423 {
424         struct ctl_be_ramdisk_lun *be_lun;
425         struct ctl_lun_rm_params *params;
426         int retval;
427
428
429         retval = 0;
430         params = &req->reqdata.rm;
431
432         be_lun = NULL;
433
434         mtx_lock(&softc->lock);
435
436         STAILQ_FOREACH(be_lun, &softc->lun_list, links) {
437                 if (be_lun->ctl_be_lun.lun_id == params->lun_id)
438                         break;
439         }
440         mtx_unlock(&softc->lock);
441
442         if (be_lun == NULL) {
443                 snprintf(req->error_str, sizeof(req->error_str),
444                          "%s: LUN %u is not managed by the ramdisk backend",
445                          __func__, params->lun_id);
446                 goto bailout_error;
447         }
448
449         retval = ctl_disable_lun(&be_lun->ctl_be_lun);
450
451         if (retval != 0) {
452                 snprintf(req->error_str, sizeof(req->error_str),
453                          "%s: error %d returned from ctl_disable_lun() for "
454                          "LUN %d", __func__, retval, params->lun_id);
455                 goto bailout_error;
456         }
457
458         /*
459          * Set the waiting flag before we invalidate the LUN.  Our shutdown
460          * routine can be called any time after we invalidate the LUN,
461          * and can be called from our context.
462          *
463          * This tells the shutdown routine that we're waiting, or we're
464          * going to wait for the shutdown to happen.
465          */
466         mtx_lock(&softc->lock);
467         be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING;
468         mtx_unlock(&softc->lock);
469
470         retval = ctl_invalidate_lun(&be_lun->ctl_be_lun);
471         if (retval != 0) {
472                 snprintf(req->error_str, sizeof(req->error_str),
473                          "%s: error %d returned from ctl_invalidate_lun() for "
474                          "LUN %d", __func__, retval, params->lun_id);
475                 mtx_lock(&softc->lock);
476                 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
477                 mtx_unlock(&softc->lock);
478                 goto bailout_error;
479         }
480
481         mtx_lock(&softc->lock);
482
483         while ((be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) == 0) {
484                 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0);
485                 if (retval == EINTR)   
486                         break;
487         }
488         be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
489
490         /*
491          * We only remove this LUN from the list and free it (below) if
492          * retval == 0.  If the user interrupted the wait, we just bail out
493          * without actually freeing the LUN.  We let the shutdown routine
494          * free the LUN if that happens.
495          */
496         if (retval == 0) {
497                 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
498                               links);
499                 softc->num_luns--;
500         }
501
502         mtx_unlock(&softc->lock);
503
504         if (retval == 0) {
505                 taskqueue_drain(be_lun->io_taskqueue, &be_lun->io_task);
506                 taskqueue_free(be_lun->io_taskqueue);
507                 ctl_free_opts(&be_lun->ctl_be_lun.options);
508                 mtx_destroy(&be_lun->queue_lock);
509                 free(be_lun, M_RAMDISK);
510         }
511
512         req->status = CTL_LUN_OK;
513
514         return (retval);
515
516 bailout_error:
517         req->status = CTL_LUN_ERROR;
518
519         return (0);
520 }
521
522 static int
523 ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
524                            struct ctl_lun_req *req, int do_wait)
525 {
526         struct ctl_be_ramdisk_lun *be_lun;
527         struct ctl_lun_create_params *params;
528         uint32_t blocksize;
529         char *value;
530         char tmpstr[32];
531         int retval, unmap;
532
533         retval = 0;
534         params = &req->reqdata.create;
535         if (params->blocksize_bytes != 0)
536                 blocksize = params->blocksize_bytes;
537         else
538                 blocksize = 512;
539
540         be_lun = malloc(sizeof(*be_lun), M_RAMDISK, M_ZERO | (do_wait ?
541                         M_WAITOK : M_NOWAIT));
542
543         if (be_lun == NULL) {
544                 snprintf(req->error_str, sizeof(req->error_str),
545                          "%s: error allocating %zd bytes", __func__,
546                          sizeof(*be_lun));
547                 goto bailout_error;
548         }
549         sprintf(be_lun->lunname, "cram%d", softc->num_luns);
550         ctl_init_opts(&be_lun->ctl_be_lun.options,
551             req->num_be_args, req->kern_be_args);
552
553         if (params->flags & CTL_LUN_FLAG_DEV_TYPE)
554                 be_lun->ctl_be_lun.lun_type = params->device_type;
555         else
556                 be_lun->ctl_be_lun.lun_type = T_DIRECT;
557
558         if (be_lun->ctl_be_lun.lun_type == T_DIRECT) {
559
560                 if (params->lun_size_bytes < blocksize) {
561                         snprintf(req->error_str, sizeof(req->error_str),
562                                  "%s: LUN size %ju < blocksize %u", __func__,
563                                  params->lun_size_bytes, blocksize);
564                         goto bailout_error;
565                 }
566
567                 be_lun->size_blocks = params->lun_size_bytes / blocksize;
568                 be_lun->size_bytes = be_lun->size_blocks * blocksize;
569
570                 be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1;
571                 be_lun->ctl_be_lun.atomicblock = UINT32_MAX;
572                 be_lun->ctl_be_lun.opttxferlen = softc->rd_size / blocksize;
573         } else {
574                 be_lun->ctl_be_lun.maxlba = 0;
575                 blocksize = 0;
576                 be_lun->size_bytes = 0;
577                 be_lun->size_blocks = 0;
578         }
579
580         be_lun->ctl_be_lun.blocksize = blocksize;
581
582         /* Tell the user the blocksize we ended up using */
583         params->blocksize_bytes = blocksize;
584
585         /* Tell the user the exact size we ended up using */
586         params->lun_size_bytes = be_lun->size_bytes;
587
588         be_lun->softc = softc;
589
590         unmap = 1;
591         value = ctl_get_opt(&be_lun->ctl_be_lun.options, "unmap");
592         if (value != NULL && strcmp(value, "on") == 0)
593                 unmap = (strcmp(value, "on") == 0);
594
595         be_lun->flags = CTL_BE_RAMDISK_LUN_UNCONFIGURED;
596         be_lun->ctl_be_lun.flags = CTL_LUN_FLAG_PRIMARY;
597         if (unmap)
598                 be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_UNMAP;
599         be_lun->ctl_be_lun.be_lun = be_lun;
600
601         if (params->flags & CTL_LUN_FLAG_ID_REQ) {
602                 be_lun->ctl_be_lun.req_lun_id = params->req_lun_id;
603                 be_lun->ctl_be_lun.flags |= CTL_LUN_FLAG_ID_REQ;
604         } else
605                 be_lun->ctl_be_lun.req_lun_id = 0;
606
607         be_lun->ctl_be_lun.lun_shutdown = ctl_backend_ramdisk_lun_shutdown;
608         be_lun->ctl_be_lun.lun_config_status =
609                 ctl_backend_ramdisk_lun_config_status;
610         be_lun->ctl_be_lun.be = &ctl_be_ramdisk_driver;
611         if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) {
612                 snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%4d",
613                          softc->num_luns);
614                 strncpy((char *)be_lun->ctl_be_lun.serial_num, tmpstr,
615                         MIN(sizeof(be_lun->ctl_be_lun.serial_num),
616                             sizeof(tmpstr)));
617
618                 /* Tell the user what we used for a serial number */
619                 strncpy((char *)params->serial_num, tmpstr,
620                         MIN(sizeof(params->serial_num), sizeof(tmpstr)));
621         } else { 
622                 strncpy((char *)be_lun->ctl_be_lun.serial_num,
623                         params->serial_num,
624                         MIN(sizeof(be_lun->ctl_be_lun.serial_num),
625                             sizeof(params->serial_num)));
626         }
627         if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) {
628                 snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%4d", softc->num_luns);
629                 strncpy((char *)be_lun->ctl_be_lun.device_id, tmpstr,
630                         MIN(sizeof(be_lun->ctl_be_lun.device_id),
631                             sizeof(tmpstr)));
632
633                 /* Tell the user what we used for a device ID */
634                 strncpy((char *)params->device_id, tmpstr,
635                         MIN(sizeof(params->device_id), sizeof(tmpstr)));
636         } else {
637                 strncpy((char *)be_lun->ctl_be_lun.device_id,
638                         params->device_id,
639                         MIN(sizeof(be_lun->ctl_be_lun.device_id),
640                             sizeof(params->device_id)));
641         }
642
643         STAILQ_INIT(&be_lun->cont_queue);
644         mtx_init(&be_lun->queue_lock, "cram queue lock", NULL, MTX_DEF);
645         TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_backend_ramdisk_worker,
646             be_lun);
647
648         be_lun->io_taskqueue = taskqueue_create(be_lun->lunname, M_WAITOK,
649             taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue);
650         if (be_lun->io_taskqueue == NULL) {
651                 snprintf(req->error_str, sizeof(req->error_str),
652                          "%s: Unable to create taskqueue", __func__);
653                 goto bailout_error;
654         }
655
656         retval = taskqueue_start_threads(&be_lun->io_taskqueue,
657                                          /*num threads*/1,
658                                          /*priority*/PWAIT,
659                                          /*thread name*/
660                                          "%s taskq", be_lun->lunname);
661         if (retval != 0)
662                 goto bailout_error;
663
664         mtx_lock(&softc->lock);
665         softc->num_luns++;
666         STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links);
667
668         mtx_unlock(&softc->lock);
669
670         retval = ctl_add_lun(&be_lun->ctl_be_lun);
671         if (retval != 0) {
672                 mtx_lock(&softc->lock);
673                 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
674                               links);
675                 softc->num_luns--;
676                 mtx_unlock(&softc->lock);
677                 snprintf(req->error_str, sizeof(req->error_str),
678                          "%s: ctl_add_lun() returned error %d, see dmesg for "
679                         "details", __func__, retval);
680                 retval = 0;
681                 goto bailout_error;
682         }
683
684         if (do_wait == 0)
685                 return (retval);
686
687         mtx_lock(&softc->lock);
688
689         /*
690          * Tell the config_status routine that we're waiting so it won't
691          * clean up the LUN in the event of an error.
692          */
693         be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING;
694
695         while (be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) {
696                 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0);
697                 if (retval == EINTR)
698                         break;
699         }
700         be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
701
702         if (be_lun->flags & CTL_BE_RAMDISK_LUN_CONFIG_ERR) {
703                 snprintf(req->error_str, sizeof(req->error_str),
704                          "%s: LUN configuration error, see dmesg for details",
705                          __func__);
706                 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
707                               links);
708                 softc->num_luns--;
709                 mtx_unlock(&softc->lock);
710                 goto bailout_error;
711         } else {
712                 params->req_lun_id = be_lun->ctl_be_lun.lun_id;
713         }
714         mtx_unlock(&softc->lock);
715
716         req->status = CTL_LUN_OK;
717
718         return (retval);
719
720 bailout_error:
721         req->status = CTL_LUN_ERROR;
722         if (be_lun != NULL) {
723                 if (be_lun->io_taskqueue != NULL) {
724                         taskqueue_free(be_lun->io_taskqueue);
725                 }
726                 ctl_free_opts(&be_lun->ctl_be_lun.options);
727                 mtx_destroy(&be_lun->queue_lock);
728                 free(be_lun, M_RAMDISK);
729         }
730
731         return (retval);
732 }
733
734 static int
735 ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc,
736                        struct ctl_lun_req *req)
737 {
738         struct ctl_be_ramdisk_lun *be_lun;
739         struct ctl_lun_modify_params *params;
740         uint32_t blocksize;
741
742         params = &req->reqdata.modify;
743
744         be_lun = NULL;
745
746         mtx_lock(&softc->lock);
747         STAILQ_FOREACH(be_lun, &softc->lun_list, links) {
748                 if (be_lun->ctl_be_lun.lun_id == params->lun_id)
749                         break;
750         }
751         mtx_unlock(&softc->lock);
752
753         if (be_lun == NULL) {
754                 snprintf(req->error_str, sizeof(req->error_str),
755                          "%s: LUN %u is not managed by the ramdisk backend",
756                          __func__, params->lun_id);
757                 goto bailout_error;
758         }
759
760         if (params->lun_size_bytes == 0) {
761                 snprintf(req->error_str, sizeof(req->error_str),
762                         "%s: LUN size \"auto\" not supported "
763                         "by the ramdisk backend", __func__);
764                 goto bailout_error;
765         }
766
767         blocksize = be_lun->ctl_be_lun.blocksize;
768
769         if (params->lun_size_bytes < blocksize) {
770                 snprintf(req->error_str, sizeof(req->error_str),
771                         "%s: LUN size %ju < blocksize %u", __func__,
772                         params->lun_size_bytes, blocksize);
773                 goto bailout_error;
774         }
775
776         be_lun->size_blocks = params->lun_size_bytes / blocksize;
777         be_lun->size_bytes = be_lun->size_blocks * blocksize;
778
779         /*
780          * The maximum LBA is the size - 1.
781          *
782          * XXX: Note that this field is being updated without locking,
783          *      which might cause problems on 32-bit architectures.
784          */
785         be_lun->ctl_be_lun.maxlba = be_lun->size_blocks - 1;
786         ctl_lun_capacity_changed(&be_lun->ctl_be_lun);
787
788         /* Tell the user the exact size we ended up using */
789         params->lun_size_bytes = be_lun->size_bytes;
790
791         req->status = CTL_LUN_OK;
792
793         return (0);
794
795 bailout_error:
796         req->status = CTL_LUN_ERROR;
797
798         return (0);
799 }
800
801 static void
802 ctl_backend_ramdisk_lun_shutdown(void *be_lun)
803 {
804         struct ctl_be_ramdisk_lun *lun;
805         struct ctl_be_ramdisk_softc *softc;
806         int do_free;
807
808         lun = (struct ctl_be_ramdisk_lun *)be_lun;
809         softc = lun->softc;
810         do_free = 0;
811
812         mtx_lock(&softc->lock);
813
814         lun->flags |= CTL_BE_RAMDISK_LUN_UNCONFIGURED;
815
816         if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) {
817                 wakeup(lun);
818         } else {
819                 STAILQ_REMOVE(&softc->lun_list, lun, ctl_be_ramdisk_lun,
820                               links);
821                 softc->num_luns--;
822                 do_free = 1;
823         }
824
825         mtx_unlock(&softc->lock);
826
827         if (do_free != 0)
828                 free(be_lun, M_RAMDISK);
829 }
830
831 static void
832 ctl_backend_ramdisk_lun_config_status(void *be_lun,
833                                       ctl_lun_config_status status)
834 {
835         struct ctl_be_ramdisk_lun *lun;
836         struct ctl_be_ramdisk_softc *softc;
837
838         lun = (struct ctl_be_ramdisk_lun *)be_lun;
839         softc = lun->softc;
840
841         if (status == CTL_LUN_CONFIG_OK) {
842                 mtx_lock(&softc->lock);
843                 lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED;
844                 if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING)
845                         wakeup(lun);
846                 mtx_unlock(&softc->lock);
847
848                 /*
849                  * We successfully added the LUN, attempt to enable it.
850                  */
851                 if (ctl_enable_lun(&lun->ctl_be_lun) != 0) {
852                         printf("%s: ctl_enable_lun() failed!\n", __func__);
853                         if (ctl_invalidate_lun(&lun->ctl_be_lun) != 0) {
854                                 printf("%s: ctl_invalidate_lun() failed!\n",
855                                        __func__);
856                         }
857                 }
858
859                 return;
860         }
861
862
863         mtx_lock(&softc->lock);
864         lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED;
865
866         /*
867          * If we have a user waiting, let him handle the cleanup.  If not,
868          * clean things up here.
869          */
870         if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) {
871                 lun->flags |= CTL_BE_RAMDISK_LUN_CONFIG_ERR;
872                 wakeup(lun);
873         } else {
874                 STAILQ_REMOVE(&softc->lun_list, lun, ctl_be_ramdisk_lun,
875                               links);
876                 softc->num_luns--;
877                 free(lun, M_RAMDISK);
878         }
879         mtx_unlock(&softc->lock);
880 }
881
882 static int
883 ctl_backend_ramdisk_config_write(union ctl_io *io)
884 {
885         struct ctl_be_ramdisk_softc *softc;
886         int retval;
887
888         retval = 0;
889         softc = &rd_softc;
890
891         switch (io->scsiio.cdb[0]) {
892         case SYNCHRONIZE_CACHE:
893         case SYNCHRONIZE_CACHE_16:
894                 /*
895                  * The upper level CTL code will filter out any CDBs with
896                  * the immediate bit set and return the proper error.  It
897                  * will also not allow a sync cache command to go to a LUN
898                  * that is powered down.
899                  *
900                  * We don't really need to worry about what LBA range the
901                  * user asked to be synced out.  When they issue a sync
902                  * cache command, we'll sync out the whole thing.
903                  *
904                  * This is obviously just a stubbed out implementation.
905                  * The real implementation will be in the RAIDCore/CTL
906                  * interface, and can only really happen when RAIDCore
907                  * implements a per-array cache sync.
908                  */
909                 ctl_set_success(&io->scsiio);
910                 ctl_config_write_done(io);
911                 break;
912         case START_STOP_UNIT: {
913                 struct scsi_start_stop_unit *cdb;
914                 struct ctl_be_lun *ctl_be_lun;
915                 struct ctl_be_ramdisk_lun *be_lun;
916
917                 cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb;
918
919                 ctl_be_lun = (struct ctl_be_lun *)io->io_hdr.ctl_private[
920                         CTL_PRIV_BACKEND_LUN].ptr;
921                 be_lun = (struct ctl_be_ramdisk_lun *)ctl_be_lun->be_lun;
922
923                 if (cdb->how & SSS_START)
924                         retval = ctl_start_lun(ctl_be_lun);
925                 else {
926                         retval = ctl_stop_lun(ctl_be_lun);
927 #ifdef NEEDTOPORT
928                         if ((retval == 0)
929                          && (cdb->byte2 & SSS_ONOFFLINE))
930                                 retval = ctl_lun_offline(ctl_be_lun);
931 #endif
932                 }
933
934                 /*
935                  * In general, the above routines should not fail.  They
936                  * just set state for the LUN.  So we've got something
937                  * pretty wrong here if we can't start or stop the LUN.
938                  */
939                 if (retval != 0) {
940                         ctl_set_internal_failure(&io->scsiio,
941                                                  /*sks_valid*/ 1,
942                                                  /*retry_count*/ 0xf051);
943                         retval = CTL_RETVAL_COMPLETE;
944                 } else {
945                         ctl_set_success(&io->scsiio);
946                 }
947                 ctl_config_write_done(io);
948                 break;
949         }
950         case WRITE_SAME_10:
951         case WRITE_SAME_16:
952         case UNMAP:
953                 ctl_set_success(&io->scsiio);
954                 ctl_config_write_done(io);
955                 break;
956         default:
957                 ctl_set_invalid_opcode(&io->scsiio);
958                 ctl_config_write_done(io);
959                 retval = CTL_RETVAL_COMPLETE;
960                 break;
961         }
962
963         return (retval);
964 }
965
966 static int
967 ctl_backend_ramdisk_config_read(union ctl_io *io)
968 {
969         int retval = 0;
970
971         switch (io->scsiio.cdb[0]) {
972         case SERVICE_ACTION_IN:
973                 if (io->scsiio.cdb[1] == SGLS_SERVICE_ACTION) {
974                         /* We have nothing to tell, leave default data. */
975                         ctl_config_read_done(io);
976                         retval = CTL_RETVAL_COMPLETE;
977                         break;
978                 }
979                 ctl_set_invalid_field(&io->scsiio,
980                                       /*sks_valid*/ 1,
981                                       /*command*/ 1,
982                                       /*field*/ 1,
983                                       /*bit_valid*/ 1,
984                                       /*bit*/ 4);
985                 ctl_config_read_done(io);
986                 retval = CTL_RETVAL_COMPLETE;
987                 break;
988         default:
989                 ctl_set_invalid_opcode(&io->scsiio);
990                 ctl_config_read_done(io);
991                 retval = CTL_RETVAL_COMPLETE;
992                 break;
993         }
994
995         return (retval);
996 }