]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/cam/ctl/ctl_backend_ramdisk.c
Merge llvm, clang, lld, lldb, compiler-rt and libc++ r308421, and update
[FreeBSD/FreeBSD.git] / sys / cam / ctl / ctl_backend_ramdisk.c
1 /*-
2  * Copyright (c) 2003, 2008 Silicon Graphics International Corp.
3  * Copyright (c) 2012 The FreeBSD Foundation
4  * Copyright (c) 2014-2017 Alexander Motin <mav@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Portions of this software were developed by Edward Tomasz Napierala
8  * under sponsorship from the FreeBSD Foundation.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions, and the following disclaimer,
15  *    without modification.
16  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
17  *    substantially similar to the "NO WARRANTY" disclaimer below
18  *    ("Disclaimer") and any redistribution must be conditioned upon
19  *    including a substantially similar Disclaimer requirement for further
20  *    binary redistribution.
21  *
22  * NO WARRANTY
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
26  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
31  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
32  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGES.
34  *
35  * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_ramdisk.c#3 $
36  */
37 /*
38  * CAM Target Layer black hole and RAM disk backend.
39  *
40  * Author: Ken Merry <ken@FreeBSD.org>
41  */
42
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/condvar.h>
50 #include <sys/types.h>
51 #include <sys/limits.h>
52 #include <sys/lock.h>
53 #include <sys/mutex.h>
54 #include <sys/malloc.h>
55 #include <sys/sx.h>
56 #include <sys/taskqueue.h>
57 #include <sys/time.h>
58 #include <sys/queue.h>
59 #include <sys/conf.h>
60 #include <sys/ioccom.h>
61 #include <sys/module.h>
62 #include <sys/sysctl.h>
63
64 #include <cam/scsi/scsi_all.h>
65 #include <cam/scsi/scsi_da.h>
66 #include <cam/ctl/ctl_io.h>
67 #include <cam/ctl/ctl.h>
68 #include <cam/ctl/ctl_util.h>
69 #include <cam/ctl/ctl_backend.h>
70 #include <cam/ctl/ctl_debug.h>
71 #include <cam/ctl/ctl_ioctl.h>
72 #include <cam/ctl/ctl_ha.h>
73 #include <cam/ctl/ctl_private.h>
74 #include <cam/ctl/ctl_error.h>
75
76 #define PRIV(io)        \
77     ((struct ctl_ptr_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_BACKEND])
78 #define ARGS(io)        \
79     ((struct ctl_lba_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_LBA_LEN])
80
81 #define PPP     (PAGE_SIZE / sizeof(uint8_t **))
82 #ifdef __LP64__
83 #define PPPS    (PAGE_SHIFT - 3)
84 #else
85 #define PPPS    (PAGE_SHIFT - 2)
86 #endif
87 #define SGPP    (PAGE_SIZE / sizeof(struct ctl_sg_entry))
88
89 #define P_UNMAPPED      NULL                    /* Page is unmapped. */
90 #define P_ANCHORED      ((void *)(uintptr_t)1)  /* Page is anchored. */
91
92 typedef enum {
93         GP_READ,        /* Return data page or zero page. */
94         GP_WRITE,       /* Return data page, try allocate if none. */
95         GP_ANCHOR,      /* Return data page, try anchor if none. */
96         GP_OTHER,       /* Return what present, do not allocate/anchor. */
97 } getpage_op_t;
98
99 typedef enum {
100         CTL_BE_RAMDISK_LUN_UNCONFIGURED = 0x01,
101         CTL_BE_RAMDISK_LUN_CONFIG_ERR   = 0x02,
102         CTL_BE_RAMDISK_LUN_WAITING      = 0x04
103 } ctl_be_ramdisk_lun_flags;
104
105 struct ctl_be_ramdisk_lun {
106         struct ctl_lun_create_params params;
107         char                    lunname[32];
108         int                     indir;
109         uint8_t                 **pages;
110         uint8_t                 *zero_page;
111         struct sx               page_lock;
112         u_int                   pblocksize;
113         u_int                   pblockmul;
114         uint64_t                size_bytes;
115         uint64_t                size_blocks;
116         uint64_t                cap_bytes;
117         uint64_t                cap_used;
118         struct ctl_be_ramdisk_softc *softc;
119         ctl_be_ramdisk_lun_flags flags;
120         STAILQ_ENTRY(ctl_be_ramdisk_lun) links;
121         struct ctl_be_lun       cbe_lun;
122         struct taskqueue        *io_taskqueue;
123         struct task             io_task;
124         STAILQ_HEAD(, ctl_io_hdr) cont_queue;
125         struct mtx_padalign     queue_lock;
126 };
127
128 struct ctl_be_ramdisk_softc {
129         struct mtx lock;
130         int num_luns;
131         STAILQ_HEAD(, ctl_be_ramdisk_lun) lun_list;
132 };
133
134 static struct ctl_be_ramdisk_softc rd_softc;
135 extern struct ctl_softc *control_softc;
136
137 static int ctl_backend_ramdisk_init(void);
138 static int ctl_backend_ramdisk_shutdown(void);
139 static int ctl_backend_ramdisk_move_done(union ctl_io *io);
140 static void ctl_backend_ramdisk_compare(union ctl_io *io);
141 static void ctl_backend_ramdisk_rw(union ctl_io *io);
142 static int ctl_backend_ramdisk_submit(union ctl_io *io);
143 static void ctl_backend_ramdisk_worker(void *context, int pending);
144 static int ctl_backend_ramdisk_config_read(union ctl_io *io);
145 static int ctl_backend_ramdisk_config_write(union ctl_io *io);
146 static uint64_t ctl_backend_ramdisk_lun_attr(void *be_lun, const char *attrname);
147 static int ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd,
148                                      caddr_t addr, int flag, struct thread *td);
149 static int ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc,
150                                   struct ctl_lun_req *req);
151 static int ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
152                                       struct ctl_lun_req *req);
153 static int ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc,
154                                   struct ctl_lun_req *req);
155 static void ctl_backend_ramdisk_lun_shutdown(void *be_lun);
156 static void ctl_backend_ramdisk_lun_config_status(void *be_lun,
157                                                   ctl_lun_config_status status);
158
159 static struct ctl_backend_driver ctl_be_ramdisk_driver = 
160 {
161         .name = "ramdisk",
162         .flags = CTL_BE_FLAG_HAS_CONFIG,
163         .init = ctl_backend_ramdisk_init,
164         .shutdown = ctl_backend_ramdisk_shutdown,
165         .data_submit = ctl_backend_ramdisk_submit,
166         .data_move_done = ctl_backend_ramdisk_move_done,
167         .config_read = ctl_backend_ramdisk_config_read,
168         .config_write = ctl_backend_ramdisk_config_write,
169         .ioctl = ctl_backend_ramdisk_ioctl,
170         .lun_attr = ctl_backend_ramdisk_lun_attr,
171 };
172
173 MALLOC_DEFINE(M_RAMDISK, "ramdisk", "Memory used for CTL RAMdisk");
174 CTL_BACKEND_DECLARE(cbr, ctl_be_ramdisk_driver);
175
176 static int
177 ctl_backend_ramdisk_init(void)
178 {
179         struct ctl_be_ramdisk_softc *softc = &rd_softc;
180
181         memset(softc, 0, sizeof(*softc));
182         mtx_init(&softc->lock, "ctlramdisk", NULL, MTX_DEF);
183         STAILQ_INIT(&softc->lun_list);
184         return (0);
185 }
186
187 static int
188 ctl_backend_ramdisk_shutdown(void)
189 {
190         struct ctl_be_ramdisk_softc *softc = &rd_softc;
191         struct ctl_be_ramdisk_lun *lun, *next_lun;
192
193         mtx_lock(&softc->lock);
194         STAILQ_FOREACH_SAFE(lun, &softc->lun_list, links, next_lun) {
195                 /*
196                  * Drop our lock here.  Since ctl_invalidate_lun() can call
197                  * back into us, this could potentially lead to a recursive
198                  * lock of the same mutex, which would cause a hang.
199                  */
200                 mtx_unlock(&softc->lock);
201                 ctl_disable_lun(&lun->cbe_lun);
202                 ctl_invalidate_lun(&lun->cbe_lun);
203                 mtx_lock(&softc->lock);
204         }
205         mtx_unlock(&softc->lock);
206         mtx_destroy(&softc->lock);
207         return (0);
208 }
209
210 static uint8_t *
211 ctl_backend_ramdisk_getpage(struct ctl_be_ramdisk_lun *be_lun, off_t pn,
212     getpage_op_t op)
213 {
214         uint8_t **p, ***pp;
215         off_t i;
216         int s;
217
218         if (be_lun->cap_bytes == 0) {
219                 switch (op) {
220                 case GP_READ:
221                         return (be_lun->zero_page);
222                 case GP_WRITE:
223                         return ((uint8_t *)be_lun->pages);
224                 case GP_ANCHOR:
225                         return (P_ANCHORED);
226                 default:
227                         return (P_UNMAPPED);
228                 }
229         }
230         if (op == GP_WRITE || op == GP_ANCHOR) {
231                 sx_xlock(&be_lun->page_lock);
232                 pp = &be_lun->pages;
233                 for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) {
234                         if (*pp == NULL) {
235                                 *pp = malloc(PAGE_SIZE, M_RAMDISK,
236                                     M_WAITOK|M_ZERO);
237                         }
238                         i = pn >> s;
239                         pp = (uint8_t ***)&(*pp)[i];
240                         pn -= i << s;
241                 }
242                 if (*pp == P_UNMAPPED && be_lun->cap_used < be_lun->cap_bytes) {
243                         if (op == GP_WRITE) {
244                                 *pp = malloc(be_lun->pblocksize, M_RAMDISK,
245                                     M_WAITOK|M_ZERO);
246                         } else
247                                 *pp = P_ANCHORED;
248                         be_lun->cap_used += be_lun->pblocksize;
249                 } else if (*pp == P_ANCHORED && op == GP_WRITE) {
250                         *pp = malloc(be_lun->pblocksize, M_RAMDISK,
251                             M_WAITOK|M_ZERO);
252                 }
253                 sx_xunlock(&be_lun->page_lock);
254                 return ((uint8_t *)*pp);
255         } else {
256                 sx_slock(&be_lun->page_lock);
257                 p = be_lun->pages;
258                 for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) {
259                         if (p == NULL)
260                                 break;
261                         i = pn >> s;
262                         p = (uint8_t **)p[i];
263                         pn -= i << s;
264                 }
265                 sx_sunlock(&be_lun->page_lock);
266                 if ((p == P_UNMAPPED || p == P_ANCHORED) && op == GP_READ)
267                         return (be_lun->zero_page);
268                 return ((uint8_t *)p);
269         }
270 };
271
272 static void
273 ctl_backend_ramdisk_unmappage(struct ctl_be_ramdisk_lun *be_lun, off_t pn)
274 {
275         uint8_t ***pp;
276         off_t i;
277         int s;
278
279         if (be_lun->cap_bytes == 0)
280                 return;
281         sx_xlock(&be_lun->page_lock);
282         pp = &be_lun->pages;
283         for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) {
284                 if (*pp == NULL)
285                         goto noindir;
286                 i = pn >> s;
287                 pp = (uint8_t ***)&(*pp)[i];
288                 pn -= i << s;
289         }
290         if (*pp == P_ANCHORED) {
291                 be_lun->cap_used -= be_lun->pblocksize;
292                 *pp = P_UNMAPPED;
293         } else if (*pp != P_UNMAPPED) {
294                 free(*pp, M_RAMDISK);
295                 be_lun->cap_used -= be_lun->pblocksize;
296                 *pp = P_UNMAPPED;
297         }
298 noindir:
299         sx_xunlock(&be_lun->page_lock);
300 };
301
302 static void
303 ctl_backend_ramdisk_anchorpage(struct ctl_be_ramdisk_lun *be_lun, off_t pn)
304 {
305         uint8_t ***pp;
306         off_t i;
307         int s;
308
309         if (be_lun->cap_bytes == 0)
310                 return;
311         sx_xlock(&be_lun->page_lock);
312         pp = &be_lun->pages;
313         for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) {
314                 if (*pp == NULL)
315                         goto noindir;
316                 i = pn >> s;
317                 pp = (uint8_t ***)&(*pp)[i];
318                 pn -= i << s;
319         }
320         if (*pp == P_UNMAPPED && be_lun->cap_used < be_lun->cap_bytes) {
321                 be_lun->cap_used += be_lun->pblocksize;
322                 *pp = P_ANCHORED;
323         } else if (*pp != P_ANCHORED) {
324                 free(*pp, M_RAMDISK);
325                 *pp = P_ANCHORED;
326         }
327 noindir:
328         sx_xunlock(&be_lun->page_lock);
329 };
330
331 static void
332 ctl_backend_ramdisk_freeallpages(uint8_t **p, int indir)
333 {
334         int i;
335
336         if (p == NULL)
337                 return;
338         if (indir == 0) {
339                 free(p, M_RAMDISK);
340                 return;
341         }
342         for (i = 0; i < PPP; i++) {
343                 if (p[i] == NULL)
344                         continue;
345                 ctl_backend_ramdisk_freeallpages((uint8_t **)p[i], indir - 1);
346         }
347         free(p, M_RAMDISK);
348 };
349
350 static size_t
351 cmp(uint8_t *a, uint8_t *b, size_t size)
352 {
353         size_t i;
354
355         for (i = 0; i < size; i++) {
356                 if (a[i] != b[i])
357                         break;
358         }
359         return (i);
360 }
361
362 static int
363 ctl_backend_ramdisk_cmp(union ctl_io *io)
364 {
365         struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
366         struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun;
367         uint8_t *page;
368         uint8_t info[8];
369         uint64_t lba;
370         u_int lbaoff, lbas, res, off;
371
372         lbas = io->scsiio.kern_data_len / cbe_lun->blocksize;
373         lba = ARGS(io)->lba + PRIV(io)->len - lbas;
374         off = 0;
375         for (; lbas > 0; lbas--, lba++) {
376                 page = ctl_backend_ramdisk_getpage(be_lun,
377                     lba >> cbe_lun->pblockexp, GP_READ);
378                 lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp);
379                 page += lbaoff * cbe_lun->blocksize;
380                 res = cmp(io->scsiio.kern_data_ptr + off, page,
381                     cbe_lun->blocksize);
382                 off += res;
383                 if (res < cbe_lun->blocksize)
384                         break;
385         }
386         if (lbas > 0) {
387                 off += io->scsiio.kern_rel_offset - io->scsiio.kern_data_len;
388                 scsi_u64to8b(off, info);
389                 ctl_set_sense(&io->scsiio, /*current_error*/ 1,
390                     /*sense_key*/ SSD_KEY_MISCOMPARE,
391                     /*asc*/ 0x1D, /*ascq*/ 0x00,
392                     /*type*/ SSD_ELEM_INFO,
393                     /*size*/ sizeof(info), /*data*/ &info,
394                     /*type*/ SSD_ELEM_NONE);
395                 return (1);
396         }
397         return (0);
398 }
399
400 static int
401 ctl_backend_ramdisk_move_done(union ctl_io *io)
402 {
403         struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
404         struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun;
405 #ifdef CTL_TIME_IO
406         struct bintime cur_bt;
407 #endif
408
409         CTL_DEBUG_PRINT(("ctl_backend_ramdisk_move_done\n"));
410 #ifdef CTL_TIME_IO
411         getbinuptime(&cur_bt);
412         bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt);
413         bintime_add(&io->io_hdr.dma_bt, &cur_bt);
414 #endif
415         io->io_hdr.num_dmas++;
416         if (io->scsiio.kern_sg_entries > 0)
417                 free(io->scsiio.kern_data_ptr, M_RAMDISK);
418         io->scsiio.kern_rel_offset += io->scsiio.kern_data_len;
419         if (io->io_hdr.flags & CTL_FLAG_ABORT) {
420                 ;
421         } else if (io->io_hdr.port_status != 0 &&
422             ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
423              (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
424                 ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1,
425                     /*retry_count*/ io->io_hdr.port_status);
426         } else if (io->scsiio.kern_data_resid != 0 &&
427             (io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT &&
428             ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
429              (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
430                 ctl_set_invalid_field_ciu(&io->scsiio);
431         } else if ((io->io_hdr.port_status == 0) &&
432             ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) {
433                 if (ARGS(io)->flags & CTL_LLF_COMPARE) {
434                         /* We have data block ready for comparison. */
435                         if (ctl_backend_ramdisk_cmp(io))
436                                 goto done;
437                 }
438                 if (ARGS(io)->len > PRIV(io)->len) {
439                         mtx_lock(&be_lun->queue_lock);
440                         STAILQ_INSERT_TAIL(&be_lun->cont_queue,
441                             &io->io_hdr, links);
442                         mtx_unlock(&be_lun->queue_lock);
443                         taskqueue_enqueue(be_lun->io_taskqueue,
444                             &be_lun->io_task);
445                         return (0);
446                 }
447                 ctl_set_success(&io->scsiio);
448         }
449 done:
450         ctl_data_submit_done(io);
451         return(0);
452 }
453
454 static void
455 ctl_backend_ramdisk_compare(union ctl_io *io)
456 {
457         struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
458         u_int lbas, len;
459
460         lbas = ARGS(io)->len - PRIV(io)->len;
461         lbas = MIN(lbas, 131072 / cbe_lun->blocksize);
462         len = lbas * cbe_lun->blocksize;
463
464         io->scsiio.be_move_done = ctl_backend_ramdisk_move_done;
465         io->scsiio.kern_data_ptr = malloc(len, M_RAMDISK, M_WAITOK);
466         io->scsiio.kern_data_len = len;
467         io->scsiio.kern_sg_entries = 0;
468         io->io_hdr.flags |= CTL_FLAG_ALLOCATED;
469         PRIV(io)->len += lbas;
470 #ifdef CTL_TIME_IO
471         getbinuptime(&io->io_hdr.dma_start_bt);
472 #endif
473         ctl_datamove(io);
474 }
475
476 static void
477 ctl_backend_ramdisk_rw(union ctl_io *io)
478 {
479         struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
480         struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun;
481         struct ctl_sg_entry *sg_entries;
482         uint8_t *page;
483         uint64_t lba;
484         u_int i, len, lbaoff, lbas, sgs, off;
485         getpage_op_t op;
486
487         lba = ARGS(io)->lba + PRIV(io)->len;
488         lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp);
489         lbas = ARGS(io)->len - PRIV(io)->len;
490         lbas = MIN(lbas, (SGPP << cbe_lun->pblockexp) - lbaoff);
491         sgs = (lbas + lbaoff + be_lun->pblockmul - 1) >> cbe_lun->pblockexp;
492         off = lbaoff * cbe_lun->blocksize;
493         op = (ARGS(io)->flags & CTL_LLF_WRITE) ? GP_WRITE : GP_READ;
494         if (sgs > 1) {
495                 io->scsiio.kern_data_ptr = malloc(sizeof(struct ctl_sg_entry) *
496                     sgs, M_RAMDISK, M_WAITOK);
497                 sg_entries = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
498                 len = lbas * cbe_lun->blocksize;
499                 for (i = 0; i < sgs; i++) {
500                         page = ctl_backend_ramdisk_getpage(be_lun,
501                             (lba >> cbe_lun->pblockexp) + i, op);
502                         if (page == P_UNMAPPED || page == P_ANCHORED) {
503                                 free(io->scsiio.kern_data_ptr, M_RAMDISK);
504 nospc:
505                                 ctl_set_space_alloc_fail(&io->scsiio);
506                                 ctl_data_submit_done(io);
507                                 return;
508                         }
509                         sg_entries[i].addr = page + off;
510                         sg_entries[i].len = MIN(len, be_lun->pblocksize - off);
511                         len -= sg_entries[i].len;
512                         off = 0;
513                 }
514         } else {
515                 page = ctl_backend_ramdisk_getpage(be_lun,
516                     lba >> cbe_lun->pblockexp, op);
517                 if (page == P_UNMAPPED || page == P_ANCHORED)
518                         goto nospc;
519                 sgs = 0;
520                 io->scsiio.kern_data_ptr = page + off;
521         }
522
523         io->scsiio.be_move_done = ctl_backend_ramdisk_move_done;
524         io->scsiio.kern_data_len = lbas * cbe_lun->blocksize;
525         io->scsiio.kern_sg_entries = sgs;
526         io->io_hdr.flags |= CTL_FLAG_ALLOCATED;
527         PRIV(io)->len += lbas;
528         if ((ARGS(io)->flags & CTL_LLF_READ) &&
529             ARGS(io)->len <= PRIV(io)->len) {
530                 ctl_set_success(&io->scsiio);
531                 ctl_serseq_done(io);
532         }
533 #ifdef CTL_TIME_IO
534         getbinuptime(&io->io_hdr.dma_start_bt);
535 #endif
536         ctl_datamove(io);
537 }
538
539 static int
540 ctl_backend_ramdisk_submit(union ctl_io *io)
541 {
542         struct ctl_lba_len_flags *lbalen = ARGS(io);
543
544         if (lbalen->flags & CTL_LLF_VERIFY) {
545                 ctl_set_success(&io->scsiio);
546                 ctl_data_submit_done(io);
547                 return (CTL_RETVAL_COMPLETE);
548         }
549         PRIV(io)->len = 0;
550         if (lbalen->flags & CTL_LLF_COMPARE)
551                 ctl_backend_ramdisk_compare(io);
552         else
553                 ctl_backend_ramdisk_rw(io);
554         return (CTL_RETVAL_COMPLETE);
555 }
556
557 static void
558 ctl_backend_ramdisk_worker(void *context, int pending)
559 {
560         struct ctl_be_ramdisk_lun *be_lun;
561         union ctl_io *io;
562
563         be_lun = (struct ctl_be_ramdisk_lun *)context;
564         mtx_lock(&be_lun->queue_lock);
565         for (;;) {
566                 io = (union ctl_io *)STAILQ_FIRST(&be_lun->cont_queue);
567                 if (io != NULL) {
568                         STAILQ_REMOVE(&be_lun->cont_queue, &io->io_hdr,
569                                       ctl_io_hdr, links);
570                         mtx_unlock(&be_lun->queue_lock);
571                         if (ARGS(io)->flags & CTL_LLF_COMPARE)
572                                 ctl_backend_ramdisk_compare(io);
573                         else
574                                 ctl_backend_ramdisk_rw(io);
575                         mtx_lock(&be_lun->queue_lock);
576                         continue;
577                 }
578
579                 /*
580                  * If we get here, there is no work left in the queues, so
581                  * just break out and let the task queue go to sleep.
582                  */
583                 break;
584         }
585         mtx_unlock(&be_lun->queue_lock);
586 }
587
588 static int
589 ctl_backend_ramdisk_gls(union ctl_io *io)
590 {
591         struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
592         struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun;
593         struct scsi_get_lba_status_data *data;
594         uint8_t *page;
595         u_int lbaoff;
596
597         data = (struct scsi_get_lba_status_data *)io->scsiio.kern_data_ptr;
598         scsi_u64to8b(ARGS(io)->lba, data->descr[0].addr);
599         lbaoff = ARGS(io)->lba & ~(UINT_MAX << cbe_lun->pblockexp);
600         scsi_ulto4b(be_lun->pblockmul - lbaoff, data->descr[0].length);
601         page = ctl_backend_ramdisk_getpage(be_lun,
602             ARGS(io)->lba >> cbe_lun->pblockexp, GP_OTHER);
603         if (page == P_UNMAPPED)
604                 data->descr[0].status = 1;
605         else if (page == P_ANCHORED)
606                 data->descr[0].status = 2;
607         else
608                 data->descr[0].status = 0;
609         ctl_config_read_done(io);
610         return (CTL_RETVAL_COMPLETE);
611 }
612
613 static int
614 ctl_backend_ramdisk_config_read(union ctl_io *io)
615 {
616         int retval = 0;
617
618         switch (io->scsiio.cdb[0]) {
619         case SERVICE_ACTION_IN:
620                 if (io->scsiio.cdb[1] == SGLS_SERVICE_ACTION) {
621                         retval = ctl_backend_ramdisk_gls(io);
622                         break;
623                 }
624                 ctl_set_invalid_field(&io->scsiio,
625                                       /*sks_valid*/ 1,
626                                       /*command*/ 1,
627                                       /*field*/ 1,
628                                       /*bit_valid*/ 1,
629                                       /*bit*/ 4);
630                 ctl_config_read_done(io);
631                 retval = CTL_RETVAL_COMPLETE;
632                 break;
633         default:
634                 ctl_set_invalid_opcode(&io->scsiio);
635                 ctl_config_read_done(io);
636                 retval = CTL_RETVAL_COMPLETE;
637                 break;
638         }
639         return (retval);
640 }
641
642 static void
643 ctl_backend_ramdisk_delete(struct ctl_be_lun *cbe_lun, off_t lba, off_t len,
644     int anchor)
645 {
646         struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun;
647         uint8_t *page;
648         uint64_t p, lp;
649         u_int lbaoff;
650         getpage_op_t op = anchor ? GP_ANCHOR : GP_OTHER;
651
652         /* Partially zero first partial page. */
653         p = lba >> cbe_lun->pblockexp;
654         lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp);
655         if (lbaoff != 0) {
656                 page = ctl_backend_ramdisk_getpage(be_lun, p, op);
657                 if (page != P_UNMAPPED && page != P_ANCHORED) {
658                         memset(page + lbaoff * cbe_lun->blocksize, 0,
659                             min(len, be_lun->pblockmul - lbaoff) *
660                             cbe_lun->blocksize);
661                 }
662                 p++;
663         }
664
665         /* Partially zero last partial page. */
666         lp = (lba + len) >> cbe_lun->pblockexp;
667         lbaoff = (lba + len) & ~(UINT_MAX << cbe_lun->pblockexp);
668         if (p <= lp && lbaoff != 0) {
669                 page = ctl_backend_ramdisk_getpage(be_lun, lp, op);
670                 if (page != P_UNMAPPED && page != P_ANCHORED)
671                         memset(page, 0, lbaoff * cbe_lun->blocksize);
672         }
673
674         /* Delete remaining full pages. */
675         if (anchor) {
676                 for (; p < lp; p++)
677                         ctl_backend_ramdisk_anchorpage(be_lun, p);
678         } else {
679                 for (; p < lp; p++)
680                         ctl_backend_ramdisk_unmappage(be_lun, p);
681         }
682 }
683
684 static void
685 ctl_backend_ramdisk_ws(union ctl_io *io)
686 {
687         struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
688         struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun;
689         struct ctl_lba_len_flags *lbalen = ARGS(io);
690         uint8_t *page;
691         uint64_t lba;
692         u_int lbaoff, lbas;
693
694         if (lbalen->flags & ~(SWS_LBDATA | SWS_UNMAP | SWS_ANCHOR | SWS_NDOB)) {
695                 ctl_set_invalid_field(&io->scsiio,
696                                       /*sks_valid*/ 1,
697                                       /*command*/ 1,
698                                       /*field*/ 1,
699                                       /*bit_valid*/ 0,
700                                       /*bit*/ 0);
701                 ctl_config_write_done(io);
702                 return;
703         }
704         if (lbalen->flags & SWS_UNMAP) {
705                 ctl_backend_ramdisk_delete(cbe_lun, lbalen->lba, lbalen->len,
706                     (lbalen->flags & SWS_ANCHOR) != 0);
707                 ctl_set_success(&io->scsiio);
708                 ctl_config_write_done(io);
709                 return;
710         }
711
712         for (lba = lbalen->lba, lbas = lbalen->len; lbas > 0; lba++, lbas--) {
713                 page = ctl_backend_ramdisk_getpage(be_lun,
714                     lba >> cbe_lun->pblockexp, GP_WRITE);
715                 if (page == P_UNMAPPED || page == P_ANCHORED) {
716                         ctl_set_space_alloc_fail(&io->scsiio);
717                         ctl_data_submit_done(io);
718                         return;
719                 }
720                 lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp);
721                 page += lbaoff * cbe_lun->blocksize;
722                 if (lbalen->flags & SWS_NDOB) {
723                         memset(page, 0, cbe_lun->blocksize);
724                 } else {
725                         memcpy(page, io->scsiio.kern_data_ptr,
726                             cbe_lun->blocksize);
727                 }
728                 if (lbalen->flags & SWS_LBDATA)
729                         scsi_ulto4b(lba, page);
730         }
731         ctl_set_success(&io->scsiio);
732         ctl_config_write_done(io);
733 }
734
735 static void
736 ctl_backend_ramdisk_unmap(union ctl_io *io)
737 {
738         struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
739         struct ctl_ptr_len_flags *ptrlen = (struct ctl_ptr_len_flags *)ARGS(io);
740         struct scsi_unmap_desc *buf, *end;
741
742         if ((ptrlen->flags & ~SU_ANCHOR) != 0) {
743                 ctl_set_invalid_field(&io->scsiio,
744                                       /*sks_valid*/ 0,
745                                       /*command*/ 0,
746                                       /*field*/ 0,
747                                       /*bit_valid*/ 0,
748                                       /*bit*/ 0);
749                 ctl_config_write_done(io);
750                 return;
751         }
752
753         buf = (struct scsi_unmap_desc *)ptrlen->ptr;
754         end = buf + ptrlen->len / sizeof(*buf);
755         for (; buf < end; buf++) {
756                 ctl_backend_ramdisk_delete(cbe_lun,
757                     scsi_8btou64(buf->lba), scsi_4btoul(buf->length),
758                     (ptrlen->flags & SU_ANCHOR) != 0);
759         }
760
761         ctl_set_success(&io->scsiio);
762         ctl_config_write_done(io);
763 }
764
765 static int
766 ctl_backend_ramdisk_config_write(union ctl_io *io)
767 {
768         struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
769         int retval = 0;
770
771         switch (io->scsiio.cdb[0]) {
772         case SYNCHRONIZE_CACHE:
773         case SYNCHRONIZE_CACHE_16:
774                 /* We have no cache to flush. */
775                 ctl_set_success(&io->scsiio);
776                 ctl_config_write_done(io);
777                 break;
778         case START_STOP_UNIT: {
779                 struct scsi_start_stop_unit *cdb;
780
781                 cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb;
782                 if ((cdb->how & SSS_PC_MASK) != 0) {
783                         ctl_set_success(&io->scsiio);
784                         ctl_config_write_done(io);
785                         break;
786                 }
787                 if (cdb->how & SSS_START) {
788                         if (cdb->how & SSS_LOEJ)
789                                 ctl_lun_has_media(cbe_lun);
790                         ctl_start_lun(cbe_lun);
791                 } else {
792                         ctl_stop_lun(cbe_lun);
793                         if (cdb->how & SSS_LOEJ)
794                                 ctl_lun_ejected(cbe_lun);
795                 }
796                 ctl_set_success(&io->scsiio);
797                 ctl_config_write_done(io);
798                 break;
799         }
800         case PREVENT_ALLOW:
801                 ctl_set_success(&io->scsiio);
802                 ctl_config_write_done(io);
803                 break;
804         case WRITE_SAME_10:
805         case WRITE_SAME_16:
806                 ctl_backend_ramdisk_ws(io);
807                 break;
808         case UNMAP:
809                 ctl_backend_ramdisk_unmap(io);
810                 break;
811         default:
812                 ctl_set_invalid_opcode(&io->scsiio);
813                 ctl_config_write_done(io);
814                 retval = CTL_RETVAL_COMPLETE;
815                 break;
816         }
817
818         return (retval);
819 }
820
821 static uint64_t
822 ctl_backend_ramdisk_lun_attr(void *arg, const char *attrname)
823 {
824         struct ctl_be_ramdisk_lun *be_lun = arg;
825         uint64_t                val;
826
827         val = UINT64_MAX;
828         if (be_lun->cap_bytes == 0)
829                 return (val);
830         sx_slock(&be_lun->page_lock);
831         if (strcmp(attrname, "blocksused") == 0) {
832                 val = be_lun->cap_used / be_lun->cbe_lun.blocksize;
833         } else if (strcmp(attrname, "blocksavail") == 0) {
834                 val = (be_lun->cap_bytes - be_lun->cap_used) /
835                     be_lun->cbe_lun.blocksize;
836         }
837         sx_sunlock(&be_lun->page_lock);
838         return (val);
839 }
840
841 static int
842 ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, caddr_t addr,
843                           int flag, struct thread *td)
844 {
845         struct ctl_be_ramdisk_softc *softc = &rd_softc;
846         struct ctl_lun_req *lun_req;
847         int retval;
848
849         retval = 0;
850         switch (cmd) {
851         case CTL_LUN_REQ:
852                 lun_req = (struct ctl_lun_req *)addr;
853                 switch (lun_req->reqtype) {
854                 case CTL_LUNREQ_CREATE:
855                         retval = ctl_backend_ramdisk_create(softc, lun_req);
856                         break;
857                 case CTL_LUNREQ_RM:
858                         retval = ctl_backend_ramdisk_rm(softc, lun_req);
859                         break;
860                 case CTL_LUNREQ_MODIFY:
861                         retval = ctl_backend_ramdisk_modify(softc, lun_req);
862                         break;
863                 default:
864                         lun_req->status = CTL_LUN_ERROR;
865                         snprintf(lun_req->error_str, sizeof(lun_req->error_str),
866                                  "%s: invalid LUN request type %d", __func__,
867                                  lun_req->reqtype);
868                         break;
869                 }
870                 break;
871         default:
872                 retval = ENOTTY;
873                 break;
874         }
875
876         return (retval);
877 }
878
879 static int
880 ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc,
881                        struct ctl_lun_req *req)
882 {
883         struct ctl_be_ramdisk_lun *be_lun;
884         struct ctl_lun_rm_params *params;
885         int retval;
886
887         params = &req->reqdata.rm;
888         mtx_lock(&softc->lock);
889         STAILQ_FOREACH(be_lun, &softc->lun_list, links) {
890                 if (be_lun->cbe_lun.lun_id == params->lun_id)
891                         break;
892         }
893         mtx_unlock(&softc->lock);
894         if (be_lun == NULL) {
895                 snprintf(req->error_str, sizeof(req->error_str),
896                          "%s: LUN %u is not managed by the ramdisk backend",
897                          __func__, params->lun_id);
898                 goto bailout_error;
899         }
900
901         retval = ctl_disable_lun(&be_lun->cbe_lun);
902         if (retval != 0) {
903                 snprintf(req->error_str, sizeof(req->error_str),
904                          "%s: error %d returned from ctl_disable_lun() for "
905                          "LUN %d", __func__, retval, params->lun_id);
906                 goto bailout_error;
907         }
908
909         /*
910          * Set the waiting flag before we invalidate the LUN.  Our shutdown
911          * routine can be called any time after we invalidate the LUN,
912          * and can be called from our context.
913          *
914          * This tells the shutdown routine that we're waiting, or we're
915          * going to wait for the shutdown to happen.
916          */
917         mtx_lock(&softc->lock);
918         be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING;
919         mtx_unlock(&softc->lock);
920
921         retval = ctl_invalidate_lun(&be_lun->cbe_lun);
922         if (retval != 0) {
923                 snprintf(req->error_str, sizeof(req->error_str),
924                          "%s: error %d returned from ctl_invalidate_lun() for "
925                          "LUN %d", __func__, retval, params->lun_id);
926                 mtx_lock(&softc->lock);
927                 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
928                 mtx_unlock(&softc->lock);
929                 goto bailout_error;
930         }
931
932         mtx_lock(&softc->lock);
933         while ((be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) == 0) {
934                 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0);
935                 if (retval == EINTR)
936                         break;
937         }
938         be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
939
940         /*
941          * We only remove this LUN from the list and free it (below) if
942          * retval == 0.  If the user interrupted the wait, we just bail out
943          * without actually freeing the LUN.  We let the shutdown routine
944          * free the LUN if that happens.
945          */
946         if (retval == 0) {
947                 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
948                               links);
949                 softc->num_luns--;
950         }
951
952         mtx_unlock(&softc->lock);
953
954         if (retval == 0) {
955                 taskqueue_drain_all(be_lun->io_taskqueue);
956                 taskqueue_free(be_lun->io_taskqueue);
957                 ctl_free_opts(&be_lun->cbe_lun.options);
958                 free(be_lun->zero_page, M_RAMDISK);
959                 ctl_backend_ramdisk_freeallpages(be_lun->pages, be_lun->indir);
960                 sx_destroy(&be_lun->page_lock);
961                 mtx_destroy(&be_lun->queue_lock);
962                 free(be_lun, M_RAMDISK);
963         }
964
965         req->status = CTL_LUN_OK;
966         return (retval);
967
968 bailout_error:
969         req->status = CTL_LUN_ERROR;
970         return (0);
971 }
972
973 static int
974 ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
975                            struct ctl_lun_req *req)
976 {
977         struct ctl_be_ramdisk_lun *be_lun;
978         struct ctl_be_lun *cbe_lun;
979         struct ctl_lun_create_params *params;
980         char *value;
981         char tmpstr[32];
982         uint64_t t;
983         int retval;
984
985         retval = 0;
986         params = &req->reqdata.create;
987
988         be_lun = malloc(sizeof(*be_lun), M_RAMDISK, M_ZERO | M_WAITOK);
989         cbe_lun = &be_lun->cbe_lun;
990         cbe_lun->be_lun = be_lun;
991         be_lun->params = req->reqdata.create;
992         be_lun->softc = softc;
993         sprintf(be_lun->lunname, "cram%d", softc->num_luns);
994         ctl_init_opts(&cbe_lun->options, req->num_be_args, req->kern_be_args);
995
996         if (params->flags & CTL_LUN_FLAG_DEV_TYPE)
997                 cbe_lun->lun_type = params->device_type;
998         else
999                 cbe_lun->lun_type = T_DIRECT;
1000         be_lun->flags = CTL_BE_RAMDISK_LUN_UNCONFIGURED;
1001         cbe_lun->flags = 0;
1002         value = ctl_get_opt(&cbe_lun->options, "ha_role");
1003         if (value != NULL) {
1004                 if (strcmp(value, "primary") == 0)
1005                         cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
1006         } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF)
1007                 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
1008
1009         be_lun->pblocksize = PAGE_SIZE;
1010         value = ctl_get_opt(&cbe_lun->options, "pblocksize");
1011         if (value != NULL) {
1012                 ctl_expand_number(value, &t);
1013                 be_lun->pblocksize = t;
1014         }
1015         if (be_lun->pblocksize < 512 || be_lun->pblocksize > 131072) {
1016                 snprintf(req->error_str, sizeof(req->error_str),
1017                          "%s: unsupported pblocksize %u", __func__,
1018                          be_lun->pblocksize);
1019                 goto bailout_error;
1020         }
1021
1022         if (cbe_lun->lun_type == T_DIRECT ||
1023             cbe_lun->lun_type == T_CDROM) {
1024                 if (params->blocksize_bytes != 0)
1025                         cbe_lun->blocksize = params->blocksize_bytes;
1026                 else if (cbe_lun->lun_type == T_CDROM)
1027                         cbe_lun->blocksize = 2048;
1028                 else
1029                         cbe_lun->blocksize = 512;
1030                 be_lun->pblockmul = be_lun->pblocksize / cbe_lun->blocksize;
1031                 if (be_lun->pblockmul < 1 || !powerof2(be_lun->pblockmul)) {
1032                         snprintf(req->error_str, sizeof(req->error_str),
1033                                  "%s: pblocksize %u not exp2 of blocksize %u",
1034                                  __func__,
1035                                  be_lun->pblocksize, cbe_lun->blocksize);
1036                         goto bailout_error;
1037                 }
1038                 if (params->lun_size_bytes < cbe_lun->blocksize) {
1039                         snprintf(req->error_str, sizeof(req->error_str),
1040                                  "%s: LUN size %ju < blocksize %u", __func__,
1041                                  params->lun_size_bytes, cbe_lun->blocksize);
1042                         goto bailout_error;
1043                 }
1044                 be_lun->size_blocks = params->lun_size_bytes / cbe_lun->blocksize;
1045                 be_lun->size_bytes = be_lun->size_blocks * cbe_lun->blocksize;
1046                 be_lun->indir = 0;
1047                 t = be_lun->size_bytes / be_lun->pblocksize;
1048                 while (t > 1) {
1049                         t /= PPP;
1050                         be_lun->indir++;
1051                 }
1052                 cbe_lun->maxlba = be_lun->size_blocks - 1;
1053                 cbe_lun->pblockexp = fls(be_lun->pblockmul) - 1;
1054                 cbe_lun->pblockoff = 0;
1055                 cbe_lun->ublockexp = cbe_lun->pblockexp;
1056                 cbe_lun->ublockoff = 0;
1057                 cbe_lun->atomicblock = be_lun->pblocksize;
1058                 cbe_lun->opttxferlen = SGPP * be_lun->pblocksize;
1059                 value = ctl_get_opt(&cbe_lun->options, "capacity");
1060                 if (value != NULL)
1061                         ctl_expand_number(value, &be_lun->cap_bytes);
1062         } else {
1063                 be_lun->pblockmul = 1;
1064                 cbe_lun->pblockexp = 0;
1065         }
1066
1067         /* Tell the user the blocksize we ended up using */
1068         params->blocksize_bytes = cbe_lun->blocksize;
1069         params->lun_size_bytes = be_lun->size_bytes;
1070
1071         value = ctl_get_opt(&cbe_lun->options, "unmap");
1072         if (value == NULL || strcmp(value, "off") != 0)
1073                 cbe_lun->flags |= CTL_LUN_FLAG_UNMAP;
1074         value = ctl_get_opt(&cbe_lun->options, "readonly");
1075         if (value != NULL) {
1076                 if (strcmp(value, "on") == 0)
1077                         cbe_lun->flags |= CTL_LUN_FLAG_READONLY;
1078         } else if (cbe_lun->lun_type != T_DIRECT)
1079                 cbe_lun->flags |= CTL_LUN_FLAG_READONLY;
1080         cbe_lun->serseq = CTL_LUN_SERSEQ_OFF;
1081         value = ctl_get_opt(&cbe_lun->options, "serseq");
1082         if (value != NULL && strcmp(value, "on") == 0)
1083                 cbe_lun->serseq = CTL_LUN_SERSEQ_ON;
1084         else if (value != NULL && strcmp(value, "read") == 0)
1085                 cbe_lun->serseq = CTL_LUN_SERSEQ_READ;
1086         else if (value != NULL && strcmp(value, "off") == 0)
1087                 cbe_lun->serseq = CTL_LUN_SERSEQ_OFF;
1088
1089         if (params->flags & CTL_LUN_FLAG_ID_REQ) {
1090                 cbe_lun->req_lun_id = params->req_lun_id;
1091                 cbe_lun->flags |= CTL_LUN_FLAG_ID_REQ;
1092         } else
1093                 cbe_lun->req_lun_id = 0;
1094
1095         cbe_lun->lun_shutdown = ctl_backend_ramdisk_lun_shutdown;
1096         cbe_lun->lun_config_status = ctl_backend_ramdisk_lun_config_status;
1097         cbe_lun->be = &ctl_be_ramdisk_driver;
1098         if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) {
1099                 snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%4d",
1100                          softc->num_luns);
1101                 strncpy((char *)cbe_lun->serial_num, tmpstr,
1102                         MIN(sizeof(cbe_lun->serial_num), sizeof(tmpstr)));
1103
1104                 /* Tell the user what we used for a serial number */
1105                 strncpy((char *)params->serial_num, tmpstr,
1106                         MIN(sizeof(params->serial_num), sizeof(tmpstr)));
1107         } else { 
1108                 strncpy((char *)cbe_lun->serial_num, params->serial_num,
1109                         MIN(sizeof(cbe_lun->serial_num),
1110                             sizeof(params->serial_num)));
1111         }
1112         if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) {
1113                 snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%4d", softc->num_luns);
1114                 strncpy((char *)cbe_lun->device_id, tmpstr,
1115                         MIN(sizeof(cbe_lun->device_id), sizeof(tmpstr)));
1116
1117                 /* Tell the user what we used for a device ID */
1118                 strncpy((char *)params->device_id, tmpstr,
1119                         MIN(sizeof(params->device_id), sizeof(tmpstr)));
1120         } else {
1121                 strncpy((char *)cbe_lun->device_id, params->device_id,
1122                         MIN(sizeof(cbe_lun->device_id),
1123                             sizeof(params->device_id)));
1124         }
1125
1126         STAILQ_INIT(&be_lun->cont_queue);
1127         sx_init(&be_lun->page_lock, "cram page lock");
1128         if (be_lun->cap_bytes == 0) {
1129                 be_lun->indir = 0;
1130                 be_lun->pages = malloc(be_lun->pblocksize, M_RAMDISK, M_WAITOK);
1131         }
1132         be_lun->zero_page = malloc(be_lun->pblocksize, M_RAMDISK,
1133             M_WAITOK|M_ZERO);
1134         mtx_init(&be_lun->queue_lock, "cram queue lock", NULL, MTX_DEF);
1135         TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_backend_ramdisk_worker,
1136             be_lun);
1137
1138         be_lun->io_taskqueue = taskqueue_create(be_lun->lunname, M_WAITOK,
1139             taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue);
1140         if (be_lun->io_taskqueue == NULL) {
1141                 snprintf(req->error_str, sizeof(req->error_str),
1142                          "%s: Unable to create taskqueue", __func__);
1143                 goto bailout_error;
1144         }
1145
1146         retval = taskqueue_start_threads(&be_lun->io_taskqueue,
1147                                          /*num threads*/1,
1148                                          /*priority*/PWAIT,
1149                                          /*thread name*/
1150                                          "%s taskq", be_lun->lunname);
1151         if (retval != 0)
1152                 goto bailout_error;
1153
1154         mtx_lock(&softc->lock);
1155         softc->num_luns++;
1156         STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links);
1157         mtx_unlock(&softc->lock);
1158
1159         retval = ctl_add_lun(&be_lun->cbe_lun);
1160         if (retval != 0) {
1161                 mtx_lock(&softc->lock);
1162                 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
1163                               links);
1164                 softc->num_luns--;
1165                 mtx_unlock(&softc->lock);
1166                 snprintf(req->error_str, sizeof(req->error_str),
1167                          "%s: ctl_add_lun() returned error %d, see dmesg for "
1168                         "details", __func__, retval);
1169                 retval = 0;
1170                 goto bailout_error;
1171         }
1172
1173         mtx_lock(&softc->lock);
1174
1175         /*
1176          * Tell the config_status routine that we're waiting so it won't
1177          * clean up the LUN in the event of an error.
1178          */
1179         be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING;
1180
1181         while (be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) {
1182                 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0);
1183                 if (retval == EINTR)
1184                         break;
1185         }
1186         be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
1187
1188         if (be_lun->flags & CTL_BE_RAMDISK_LUN_CONFIG_ERR) {
1189                 snprintf(req->error_str, sizeof(req->error_str),
1190                          "%s: LUN configuration error, see dmesg for details",
1191                          __func__);
1192                 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
1193                               links);
1194                 softc->num_luns--;
1195                 mtx_unlock(&softc->lock);
1196                 goto bailout_error;
1197         } else {
1198                 params->req_lun_id = cbe_lun->lun_id;
1199         }
1200         mtx_unlock(&softc->lock);
1201
1202         req->status = CTL_LUN_OK;
1203         return (retval);
1204
1205 bailout_error:
1206         req->status = CTL_LUN_ERROR;
1207         if (be_lun != NULL) {
1208                 if (be_lun->io_taskqueue != NULL)
1209                         taskqueue_free(be_lun->io_taskqueue);
1210                 ctl_free_opts(&cbe_lun->options);
1211                 free(be_lun->zero_page, M_RAMDISK);
1212                 ctl_backend_ramdisk_freeallpages(be_lun->pages, be_lun->indir);
1213                 sx_destroy(&be_lun->page_lock);
1214                 mtx_destroy(&be_lun->queue_lock);
1215                 free(be_lun, M_RAMDISK);
1216         }
1217         return (retval);
1218 }
1219
1220 static int
1221 ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc,
1222                        struct ctl_lun_req *req)
1223 {
1224         struct ctl_be_ramdisk_lun *be_lun;
1225         struct ctl_be_lun *cbe_lun;
1226         struct ctl_lun_modify_params *params;
1227         char *value;
1228         uint32_t blocksize;
1229         int wasprim;
1230
1231         params = &req->reqdata.modify;
1232
1233         mtx_lock(&softc->lock);
1234         STAILQ_FOREACH(be_lun, &softc->lun_list, links) {
1235                 if (be_lun->cbe_lun.lun_id == params->lun_id)
1236                         break;
1237         }
1238         mtx_unlock(&softc->lock);
1239         if (be_lun == NULL) {
1240                 snprintf(req->error_str, sizeof(req->error_str),
1241                          "%s: LUN %u is not managed by the ramdisk backend",
1242                          __func__, params->lun_id);
1243                 goto bailout_error;
1244         }
1245         cbe_lun = &be_lun->cbe_lun;
1246
1247         if (params->lun_size_bytes != 0)
1248                 be_lun->params.lun_size_bytes = params->lun_size_bytes;
1249         ctl_update_opts(&cbe_lun->options, req->num_be_args, req->kern_be_args);
1250
1251         wasprim = (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY);
1252         value = ctl_get_opt(&cbe_lun->options, "ha_role");
1253         if (value != NULL) {
1254                 if (strcmp(value, "primary") == 0)
1255                         cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
1256                 else
1257                         cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY;
1258         } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF)
1259                 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
1260         else
1261                 cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY;
1262         if (wasprim != (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)) {
1263                 if (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)
1264                         ctl_lun_primary(cbe_lun);
1265                 else
1266                         ctl_lun_secondary(cbe_lun);
1267         }
1268
1269         blocksize = be_lun->cbe_lun.blocksize;
1270         if (be_lun->params.lun_size_bytes < blocksize) {
1271                 snprintf(req->error_str, sizeof(req->error_str),
1272                         "%s: LUN size %ju < blocksize %u", __func__,
1273                         be_lun->params.lun_size_bytes, blocksize);
1274                 goto bailout_error;
1275         }
1276         be_lun->size_blocks = be_lun->params.lun_size_bytes / blocksize;
1277         be_lun->size_bytes = be_lun->size_blocks * blocksize;
1278         be_lun->cbe_lun.maxlba = be_lun->size_blocks - 1;
1279         ctl_lun_capacity_changed(&be_lun->cbe_lun);
1280
1281         /* Tell the user the exact size we ended up using */
1282         params->lun_size_bytes = be_lun->size_bytes;
1283
1284         req->status = CTL_LUN_OK;
1285         return (0);
1286
1287 bailout_error:
1288         req->status = CTL_LUN_ERROR;
1289         return (0);
1290 }
1291
1292 static void
1293 ctl_backend_ramdisk_lun_shutdown(void *be_lun)
1294 {
1295         struct ctl_be_ramdisk_lun *lun = be_lun;
1296         struct ctl_be_ramdisk_softc *softc = lun->softc;
1297
1298         mtx_lock(&softc->lock);
1299         lun->flags |= CTL_BE_RAMDISK_LUN_UNCONFIGURED;
1300         if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) {
1301                 wakeup(lun);
1302         } else {
1303                 STAILQ_REMOVE(&softc->lun_list, lun, ctl_be_ramdisk_lun,
1304                               links);
1305                 softc->num_luns--;
1306                 free(be_lun, M_RAMDISK);
1307         }
1308         mtx_unlock(&softc->lock);
1309 }
1310
1311 static void
1312 ctl_backend_ramdisk_lun_config_status(void *be_lun,
1313                                       ctl_lun_config_status status)
1314 {
1315         struct ctl_be_ramdisk_lun *lun;
1316         struct ctl_be_ramdisk_softc *softc;
1317
1318         lun = (struct ctl_be_ramdisk_lun *)be_lun;
1319         softc = lun->softc;
1320
1321         if (status == CTL_LUN_CONFIG_OK) {
1322                 mtx_lock(&softc->lock);
1323                 lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED;
1324                 if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING)
1325                         wakeup(lun);
1326                 mtx_unlock(&softc->lock);
1327
1328                 /*
1329                  * We successfully added the LUN, attempt to enable it.
1330                  */
1331                 if (ctl_enable_lun(&lun->cbe_lun) != 0) {
1332                         printf("%s: ctl_enable_lun() failed!\n", __func__);
1333                         if (ctl_invalidate_lun(&lun->cbe_lun) != 0) {
1334                                 printf("%s: ctl_invalidate_lun() failed!\n",
1335                                        __func__);
1336                         }
1337                 }
1338
1339                 return;
1340         }
1341
1342
1343         mtx_lock(&softc->lock);
1344         lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED;
1345
1346         /*
1347          * If we have a user waiting, let him handle the cleanup.  If not,
1348          * clean things up here.
1349          */
1350         if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) {
1351                 lun->flags |= CTL_BE_RAMDISK_LUN_CONFIG_ERR;
1352                 wakeup(lun);
1353         } else {
1354                 STAILQ_REMOVE(&softc->lun_list, lun, ctl_be_ramdisk_lun,
1355                               links);
1356                 softc->num_luns--;
1357                 free(lun, M_RAMDISK);
1358         }
1359         mtx_unlock(&softc->lock);
1360 }