]> CyberLeo.Net >> Repos - FreeBSD/stable/10.git/blob - sys/cam/ctl/ctl_backend_ramdisk.c
MFC r312694: Make CTL ramdisk backend a real RAM disk.
[FreeBSD/stable/10.git] / sys / cam / ctl / ctl_backend_ramdisk.c
1 /*-
2  * Copyright (c) 2003, 2008 Silicon Graphics International Corp.
3  * Copyright (c) 2012 The FreeBSD Foundation
4  * Copyright (c) 2014-2017 Alexander Motin <mav@FreeBSD.org>
5  * All rights reserved.
6  *
7  * Portions of this software were developed by Edward Tomasz Napierala
8  * under sponsorship from the FreeBSD Foundation.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions, and the following disclaimer,
15  *    without modification.
16  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
17  *    substantially similar to the "NO WARRANTY" disclaimer below
18  *    ("Disclaimer") and any redistribution must be conditioned upon
19  *    including a substantially similar Disclaimer requirement for further
20  *    binary redistribution.
21  *
22  * NO WARRANTY
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
26  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
31  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
32  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGES.
34  *
35  * $Id: //depot/users/kenm/FreeBSD-test2/sys/cam/ctl/ctl_backend_ramdisk.c#3 $
36  */
37 /*
38  * CAM Target Layer black hole and RAM disk backend.
39  *
40  * Author: Ken Merry <ken@FreeBSD.org>
41  */
42
43 #include <sys/cdefs.h>
44 __FBSDID("$FreeBSD$");
45
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/kernel.h>
49 #include <sys/condvar.h>
50 #include <sys/types.h>
51 #include <sys/limits.h>
52 #include <sys/lock.h>
53 #include <sys/mutex.h>
54 #include <sys/malloc.h>
55 #include <sys/sx.h>
56 #include <sys/taskqueue.h>
57 #include <sys/time.h>
58 #include <sys/queue.h>
59 #include <sys/conf.h>
60 #include <sys/ioccom.h>
61 #include <sys/module.h>
62 #include <sys/sysctl.h>
63
64 #include <cam/scsi/scsi_all.h>
65 #include <cam/scsi/scsi_da.h>
66 #include <cam/ctl/ctl_io.h>
67 #include <cam/ctl/ctl.h>
68 #include <cam/ctl/ctl_util.h>
69 #include <cam/ctl/ctl_backend.h>
70 #include <cam/ctl/ctl_debug.h>
71 #include <cam/ctl/ctl_ioctl.h>
72 #include <cam/ctl/ctl_ha.h>
73 #include <cam/ctl/ctl_private.h>
74 #include <cam/ctl/ctl_error.h>
75
76 #define PRIV(io)        \
77     ((struct ctl_ptr_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_BACKEND])
78 #define ARGS(io)        \
79     ((struct ctl_lba_len_flags *)&(io)->io_hdr.ctl_private[CTL_PRIV_LBA_LEN])
80
81 #define PPP     (PAGE_SIZE / sizeof(uint8_t **))
82 #ifdef __LP64__
83 #define PPPS    (PAGE_SHIFT - 3)
84 #else
85 #define PPPS    (PAGE_SHIFT - 2)
86 #endif
87 #define SGPP    (PAGE_SIZE / sizeof(struct ctl_sg_entry))
88
89 #define P_UNMAPPED      NULL                    /* Page is unmapped. */
90 #define P_ANCHORED      ((void *)(uintptr_t)1)  /* Page is anchored. */
91
92 typedef enum {
93         GP_READ,        /* Return data page or zero page. */
94         GP_WRITE,       /* Return data page, try allocate if none. */
95         GP_ANCHOR,      /* Return data page, try anchor if none. */
96         GP_OTHER,       /* Return what present, do not allocate/anchor. */
97 } getpage_op_t;
98
99 typedef enum {
100         CTL_BE_RAMDISK_LUN_UNCONFIGURED = 0x01,
101         CTL_BE_RAMDISK_LUN_CONFIG_ERR   = 0x02,
102         CTL_BE_RAMDISK_LUN_WAITING      = 0x04
103 } ctl_be_ramdisk_lun_flags;
104
105 struct ctl_be_ramdisk_lun {
106         struct ctl_lun_create_params params;
107         char                    lunname[32];
108         int                     indir;
109         uint8_t                 **pages;
110         uint8_t                 *zero_page;
111         struct sx               page_lock;
112         u_int                   pblocksize;
113         u_int                   pblockmul;
114         uint64_t                size_bytes;
115         uint64_t                size_blocks;
116         uint64_t                cap_bytes;
117         uint64_t                cap_used;
118         struct ctl_be_ramdisk_softc *softc;
119         ctl_be_ramdisk_lun_flags flags;
120         STAILQ_ENTRY(ctl_be_ramdisk_lun) links;
121         struct ctl_be_lun       cbe_lun;
122         struct taskqueue        *io_taskqueue;
123         struct task             io_task;
124         STAILQ_HEAD(, ctl_io_hdr) cont_queue;
125         struct mtx_padalign     queue_lock;
126 };
127
128 struct ctl_be_ramdisk_softc {
129         struct mtx lock;
130         int num_luns;
131         STAILQ_HEAD(, ctl_be_ramdisk_lun) lun_list;
132 };
133
134 static struct ctl_be_ramdisk_softc rd_softc;
135 extern struct ctl_softc *control_softc;
136
137 static int ctl_backend_ramdisk_init(void);
138 static int ctl_backend_ramdisk_shutdown(void);
139 static int ctl_backend_ramdisk_move_done(union ctl_io *io);
140 static void ctl_backend_ramdisk_compare(union ctl_io *io);
141 static void ctl_backend_ramdisk_rw(union ctl_io *io);
142 static int ctl_backend_ramdisk_submit(union ctl_io *io);
143 static void ctl_backend_ramdisk_worker(void *context, int pending);
144 static int ctl_backend_ramdisk_config_read(union ctl_io *io);
145 static int ctl_backend_ramdisk_config_write(union ctl_io *io);
146 static uint64_t ctl_backend_ramdisk_lun_attr(void *be_lun, const char *attrname);
147 static int ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd,
148                                      caddr_t addr, int flag, struct thread *td);
149 static int ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc,
150                                   struct ctl_lun_req *req);
151 static int ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
152                                       struct ctl_lun_req *req);
153 static int ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc,
154                                   struct ctl_lun_req *req);
155 static void ctl_backend_ramdisk_lun_shutdown(void *be_lun);
156 static void ctl_backend_ramdisk_lun_config_status(void *be_lun,
157                                                   ctl_lun_config_status status);
158
159 static struct ctl_backend_driver ctl_be_ramdisk_driver = 
160 {
161         .name = "ramdisk",
162         .flags = CTL_BE_FLAG_HAS_CONFIG,
163         .init = ctl_backend_ramdisk_init,
164         .shutdown = ctl_backend_ramdisk_shutdown,
165         .data_submit = ctl_backend_ramdisk_submit,
166         .data_move_done = ctl_backend_ramdisk_move_done,
167         .config_read = ctl_backend_ramdisk_config_read,
168         .config_write = ctl_backend_ramdisk_config_write,
169         .ioctl = ctl_backend_ramdisk_ioctl,
170         .lun_attr = ctl_backend_ramdisk_lun_attr,
171 };
172
173 MALLOC_DEFINE(M_RAMDISK, "ramdisk", "Memory used for CTL RAMdisk");
174 CTL_BACKEND_DECLARE(cbr, ctl_be_ramdisk_driver);
175
176 static int
177 ctl_backend_ramdisk_init(void)
178 {
179         struct ctl_be_ramdisk_softc *softc = &rd_softc;
180
181         memset(softc, 0, sizeof(*softc));
182         mtx_init(&softc->lock, "ctlramdisk", NULL, MTX_DEF);
183         STAILQ_INIT(&softc->lun_list);
184         return (0);
185 }
186
187 static int
188 ctl_backend_ramdisk_shutdown(void)
189 {
190         struct ctl_be_ramdisk_softc *softc = &rd_softc;
191         struct ctl_be_ramdisk_lun *lun, *next_lun;
192
193         mtx_lock(&softc->lock);
194         STAILQ_FOREACH_SAFE(lun, &softc->lun_list, links, next_lun) {
195                 /*
196                  * Drop our lock here.  Since ctl_invalidate_lun() can call
197                  * back into us, this could potentially lead to a recursive
198                  * lock of the same mutex, which would cause a hang.
199                  */
200                 mtx_unlock(&softc->lock);
201                 ctl_disable_lun(&lun->cbe_lun);
202                 ctl_invalidate_lun(&lun->cbe_lun);
203                 mtx_lock(&softc->lock);
204         }
205         mtx_unlock(&softc->lock);
206         mtx_destroy(&softc->lock);
207         return (0);
208 }
209
210 static uint8_t *
211 ctl_backend_ramdisk_getpage(struct ctl_be_ramdisk_lun *be_lun, off_t pn,
212     getpage_op_t op)
213 {
214         uint8_t **p, ***pp;
215         off_t i;
216         int s;
217
218         if (be_lun->cap_bytes == 0) {
219                 switch (op) {
220                 case GP_READ:
221                         return (be_lun->zero_page);
222                 case GP_WRITE:
223                         return ((uint8_t *)be_lun->pages);
224                 case GP_ANCHOR:
225                         return (P_ANCHORED);
226                 default:
227                         return (P_UNMAPPED);
228                 }
229         }
230         if (op == GP_WRITE || op == GP_ANCHOR) {
231                 sx_xlock(&be_lun->page_lock);
232                 pp = &be_lun->pages;
233                 for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) {
234                         if (*pp == NULL) {
235                                 *pp = malloc(PAGE_SIZE, M_RAMDISK,
236                                     M_WAITOK|M_ZERO);
237                         }
238                         i = pn >> s;
239                         pp = (uint8_t ***)&(*pp)[i];
240                         pn -= i << s;
241                 }
242                 if (*pp == P_UNMAPPED && be_lun->cap_used < be_lun->cap_bytes) {
243                         if (op == GP_WRITE) {
244                                 *pp = malloc(be_lun->pblocksize, M_RAMDISK,
245                                     M_WAITOK|M_ZERO);
246                         } else
247                                 *pp = P_ANCHORED;
248                         be_lun->cap_used += be_lun->pblocksize;
249                 } else if (*pp == P_ANCHORED && op == GP_WRITE) {
250                         *pp = malloc(be_lun->pblocksize, M_RAMDISK,
251                             M_WAITOK|M_ZERO);
252                 }
253                 sx_xunlock(&be_lun->page_lock);
254                 return ((uint8_t *)*pp);
255         } else {
256                 sx_slock(&be_lun->page_lock);
257                 p = be_lun->pages;
258                 for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) {
259                         if (p == NULL)
260                                 break;
261                         i = pn >> s;
262                         p = (uint8_t **)p[i];
263                         pn -= i << s;
264                 }
265                 sx_sunlock(&be_lun->page_lock);
266                 if ((p == P_UNMAPPED || p == P_ANCHORED) && op == GP_READ)
267                         return (be_lun->zero_page);
268                 return ((uint8_t *)p);
269         }
270 };
271
272 static void
273 ctl_backend_ramdisk_unmappage(struct ctl_be_ramdisk_lun *be_lun, off_t pn)
274 {
275         uint8_t ***pp;
276         off_t i;
277         int s;
278
279         if (be_lun->cap_bytes == 0)
280                 return;
281         sx_xlock(&be_lun->page_lock);
282         pp = &be_lun->pages;
283         for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) {
284                 if (*pp == NULL)
285                         goto noindir;
286                 i = pn >> s;
287                 pp = (uint8_t ***)&(*pp)[i];
288                 pn -= i << s;
289         }
290         if (*pp == P_ANCHORED) {
291                 be_lun->cap_used -= be_lun->pblocksize;
292                 *pp = P_UNMAPPED;
293         } else if (*pp != P_UNMAPPED) {
294                 free(*pp, M_RAMDISK);
295                 be_lun->cap_used -= be_lun->pblocksize;
296                 *pp = P_UNMAPPED;
297         }
298 noindir:
299         sx_xunlock(&be_lun->page_lock);
300 };
301
302 static void
303 ctl_backend_ramdisk_anchorpage(struct ctl_be_ramdisk_lun *be_lun, off_t pn)
304 {
305         uint8_t ***pp;
306         off_t i;
307         int s;
308
309         if (be_lun->cap_bytes == 0)
310                 return;
311         sx_xlock(&be_lun->page_lock);
312         pp = &be_lun->pages;
313         for (s = (be_lun->indir - 1) * PPPS; s >= 0; s -= PPPS) {
314                 if (*pp == NULL)
315                         goto noindir;
316                 i = pn >> s;
317                 pp = (uint8_t ***)&(*pp)[i];
318                 pn -= i << s;
319         }
320         if (*pp == P_UNMAPPED && be_lun->cap_used < be_lun->cap_bytes) {
321                 be_lun->cap_used += be_lun->pblocksize;
322                 *pp = P_ANCHORED;
323         } else if (*pp != P_ANCHORED) {
324                 free(*pp, M_RAMDISK);
325                 *pp = P_ANCHORED;
326         }
327 noindir:
328         sx_xunlock(&be_lun->page_lock);
329 };
330
331 static void
332 ctl_backend_ramdisk_freeallpages(uint8_t **p, int indir)
333 {
334         int i;
335
336         if (p == NULL)
337                 return;
338         if (indir == 0) {
339                 free(p, M_RAMDISK);
340                 return;
341         }
342         for (i = 0; i < PPP; i++) {
343                 if (p[i] == NULL)
344                         continue;
345                 ctl_backend_ramdisk_freeallpages((uint8_t **)p[i], indir - 1);
346         }
347         free(p, M_RAMDISK);
348 };
349
350 static size_t
351 cmp(uint8_t *a, uint8_t *b, size_t size)
352 {
353         size_t i;
354
355         for (i = 0; i < size; i++) {
356                 if (a[i] != b[i])
357                         break;
358         }
359         return (i);
360 }
361
362 static int
363 ctl_backend_ramdisk_cmp(union ctl_io *io)
364 {
365         struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
366         struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun;
367         uint8_t *page;
368         uint8_t info[8];
369         uint64_t lba;
370         u_int lbaoff, lbas, res, off;
371
372         lbas = io->scsiio.kern_data_len / cbe_lun->blocksize;
373         lba = ARGS(io)->lba + PRIV(io)->len - lbas;
374         off = 0;
375         for (; lbas > 0; lbas--, lba++) {
376                 page = ctl_backend_ramdisk_getpage(be_lun,
377                     lba >> cbe_lun->pblockexp, GP_READ);
378                 lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp);
379                 page += lbaoff * cbe_lun->blocksize;
380                 res = cmp(io->scsiio.kern_data_ptr + off, page,
381                     cbe_lun->blocksize);
382                 off += res;
383                 if (res < cbe_lun->blocksize)
384                         break;
385         }
386         if (lbas > 0) {
387                 off += io->scsiio.kern_rel_offset - io->scsiio.kern_data_len;
388                 scsi_u64to8b(off, info);
389                 ctl_set_sense(&io->scsiio, /*current_error*/ 1,
390                     /*sense_key*/ SSD_KEY_MISCOMPARE,
391                     /*asc*/ 0x1D, /*ascq*/ 0x00,
392                     /*type*/ SSD_ELEM_INFO,
393                     /*size*/ sizeof(info), /*data*/ &info,
394                     /*type*/ SSD_ELEM_NONE);
395                 return (1);
396         }
397         return (0);
398 }
399
400 static int
401 ctl_backend_ramdisk_move_done(union ctl_io *io)
402 {
403         struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
404         struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun;
405 #ifdef CTL_TIME_IO
406         struct bintime cur_bt;
407 #endif
408
409         CTL_DEBUG_PRINT(("ctl_backend_ramdisk_move_done\n"));
410 #ifdef CTL_TIME_IO
411         getbinuptime(&cur_bt);
412         bintime_sub(&cur_bt, &io->io_hdr.dma_start_bt);
413         bintime_add(&io->io_hdr.dma_bt, &cur_bt);
414 #endif
415         io->io_hdr.num_dmas++;
416         if (io->scsiio.kern_sg_entries > 0)
417                 free(io->scsiio.kern_data_ptr, M_RAMDISK);
418         io->scsiio.kern_rel_offset += io->scsiio.kern_data_len;
419         if (io->io_hdr.flags & CTL_FLAG_ABORT) {
420                 ;
421         } else if (io->io_hdr.port_status != 0 &&
422             ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
423              (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
424                 ctl_set_internal_failure(&io->scsiio, /*sks_valid*/ 1,
425                     /*retry_count*/ io->io_hdr.port_status);
426         } else if (io->scsiio.kern_data_resid != 0 &&
427             (io->io_hdr.flags & CTL_FLAG_DATA_MASK) == CTL_FLAG_DATA_OUT &&
428             ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE ||
429              (io->io_hdr.status & CTL_STATUS_MASK) == CTL_SUCCESS)) {
430                 ctl_set_invalid_field_ciu(&io->scsiio);
431         } else if ((io->io_hdr.port_status == 0) &&
432             ((io->io_hdr.status & CTL_STATUS_MASK) == CTL_STATUS_NONE)) {
433                 if (ARGS(io)->flags & CTL_LLF_COMPARE) {
434                         /* We have data block ready for comparison. */
435                         if (ctl_backend_ramdisk_cmp(io))
436                                 goto done;
437                 }
438                 if (ARGS(io)->len > PRIV(io)->len) {
439                         mtx_lock(&be_lun->queue_lock);
440                         STAILQ_INSERT_TAIL(&be_lun->cont_queue,
441                             &io->io_hdr, links);
442                         mtx_unlock(&be_lun->queue_lock);
443                         taskqueue_enqueue(be_lun->io_taskqueue,
444                             &be_lun->io_task);
445                         return (0);
446                 }
447                 ctl_set_success(&io->scsiio);
448         }
449 done:
450         ctl_data_submit_done(io);
451         return(0);
452 }
453
454 static void
455 ctl_backend_ramdisk_compare(union ctl_io *io)
456 {
457         struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
458         u_int lbas, len;
459
460         lbas = ARGS(io)->len - PRIV(io)->len;
461         lbas = MIN(lbas, 131072 / cbe_lun->blocksize);
462         len = lbas * cbe_lun->blocksize;
463
464         io->scsiio.be_move_done = ctl_backend_ramdisk_move_done;
465         io->scsiio.kern_data_ptr = malloc(len, M_RAMDISK, M_WAITOK);
466         io->scsiio.kern_data_len = len;
467         io->scsiio.kern_sg_entries = 0;
468         io->io_hdr.flags |= CTL_FLAG_ALLOCATED;
469         PRIV(io)->len += lbas;
470 #ifdef CTL_TIME_IO
471         getbinuptime(&io->io_hdr.dma_start_bt);
472 #endif
473         ctl_datamove(io);
474 }
475
476 static void
477 ctl_backend_ramdisk_rw(union ctl_io *io)
478 {
479         struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
480         struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun;
481         struct ctl_sg_entry *sg_entries;
482         uint8_t *page;
483         uint64_t lba;
484         u_int i, len, lbaoff, lbas, sgs, off;
485         getpage_op_t op;
486
487         lba = ARGS(io)->lba + PRIV(io)->len;
488         lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp);
489         lbas = ARGS(io)->len - PRIV(io)->len;
490         lbas = MIN(lbas, (SGPP << cbe_lun->pblockexp) - lbaoff);
491         sgs = (lbas + lbaoff + be_lun->pblockmul - 1) >> cbe_lun->pblockexp;
492         off = lbaoff * cbe_lun->blocksize;
493         op = (ARGS(io)->flags & CTL_LLF_WRITE) ? GP_WRITE : GP_READ;
494         if (sgs > 1) {
495                 io->scsiio.kern_data_ptr = malloc(sizeof(struct ctl_sg_entry) *
496                     sgs, M_RAMDISK, M_WAITOK);
497                 sg_entries = (struct ctl_sg_entry *)io->scsiio.kern_data_ptr;
498                 len = lbas * cbe_lun->blocksize;
499                 for (i = 0; i < sgs; i++) {
500                         page = ctl_backend_ramdisk_getpage(be_lun,
501                             (lba >> cbe_lun->pblockexp) + i, op);
502                         if (page == P_UNMAPPED || page == P_ANCHORED) {
503                                 free(io->scsiio.kern_data_ptr, M_RAMDISK);
504 nospc:
505                                 ctl_set_space_alloc_fail(&io->scsiio);
506                                 ctl_data_submit_done(io);
507                                 return;
508                         }
509                         sg_entries[i].addr = page + off;
510                         sg_entries[i].len = MIN(len, be_lun->pblocksize - off);
511                         len -= sg_entries[i].len;
512                         off = 0;
513                 }
514         } else {
515                 page = ctl_backend_ramdisk_getpage(be_lun,
516                     lba >> cbe_lun->pblockexp, op);
517                 if (page == P_UNMAPPED || page == P_ANCHORED)
518                         goto nospc;
519                 sgs = 0;
520                 io->scsiio.kern_data_ptr = page + off;
521         }
522
523         io->scsiio.be_move_done = ctl_backend_ramdisk_move_done;
524         io->scsiio.kern_data_len = lbas * cbe_lun->blocksize;
525         io->scsiio.kern_sg_entries = sgs;
526         io->io_hdr.flags |= CTL_FLAG_ALLOCATED;
527         PRIV(io)->len += lbas;
528 #ifdef CTL_TIME_IO
529         getbinuptime(&io->io_hdr.dma_start_bt);
530 #endif
531         ctl_datamove(io);
532 }
533
534 static int
535 ctl_backend_ramdisk_submit(union ctl_io *io)
536 {
537         struct ctl_lba_len_flags *lbalen = ARGS(io);
538
539         if (lbalen->flags & CTL_LLF_VERIFY) {
540                 ctl_set_success(&io->scsiio);
541                 ctl_data_submit_done(io);
542                 return (CTL_RETVAL_COMPLETE);
543         }
544         PRIV(io)->len = 0;
545         if (lbalen->flags & CTL_LLF_COMPARE)
546                 ctl_backend_ramdisk_compare(io);
547         else
548                 ctl_backend_ramdisk_rw(io);
549         return (CTL_RETVAL_COMPLETE);
550 }
551
552 static void
553 ctl_backend_ramdisk_worker(void *context, int pending)
554 {
555         struct ctl_be_ramdisk_lun *be_lun;
556         union ctl_io *io;
557
558         be_lun = (struct ctl_be_ramdisk_lun *)context;
559         mtx_lock(&be_lun->queue_lock);
560         for (;;) {
561                 io = (union ctl_io *)STAILQ_FIRST(&be_lun->cont_queue);
562                 if (io != NULL) {
563                         STAILQ_REMOVE(&be_lun->cont_queue, &io->io_hdr,
564                                       ctl_io_hdr, links);
565                         mtx_unlock(&be_lun->queue_lock);
566                         if (ARGS(io)->flags & CTL_LLF_COMPARE)
567                                 ctl_backend_ramdisk_compare(io);
568                         else
569                                 ctl_backend_ramdisk_rw(io);
570                         mtx_lock(&be_lun->queue_lock);
571                         continue;
572                 }
573
574                 /*
575                  * If we get here, there is no work left in the queues, so
576                  * just break out and let the task queue go to sleep.
577                  */
578                 break;
579         }
580         mtx_unlock(&be_lun->queue_lock);
581 }
582
583 static int
584 ctl_backend_ramdisk_gls(union ctl_io *io)
585 {
586         struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
587         struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun;
588         struct scsi_get_lba_status_data *data;
589         uint8_t *page;
590         u_int lbaoff;
591
592         data = (struct scsi_get_lba_status_data *)io->scsiio.kern_data_ptr;
593         scsi_u64to8b(ARGS(io)->lba, data->descr[0].addr);
594         lbaoff = ARGS(io)->lba & ~(UINT_MAX << cbe_lun->pblockexp);
595         scsi_ulto4b(be_lun->pblockmul - lbaoff, data->descr[0].length);
596         page = ctl_backend_ramdisk_getpage(be_lun,
597             ARGS(io)->lba >> cbe_lun->pblockexp, GP_OTHER);
598         if (page == P_UNMAPPED)
599                 data->descr[0].status = 1;
600         else if (page == P_ANCHORED)
601                 data->descr[0].status = 2;
602         else
603                 data->descr[0].status = 0;
604         ctl_config_read_done(io);
605         return (CTL_RETVAL_COMPLETE);
606 }
607
608 static int
609 ctl_backend_ramdisk_config_read(union ctl_io *io)
610 {
611         int retval = 0;
612
613         switch (io->scsiio.cdb[0]) {
614         case SERVICE_ACTION_IN:
615                 if (io->scsiio.cdb[1] == SGLS_SERVICE_ACTION) {
616                         retval = ctl_backend_ramdisk_gls(io);
617                         break;
618                 }
619                 ctl_set_invalid_field(&io->scsiio,
620                                       /*sks_valid*/ 1,
621                                       /*command*/ 1,
622                                       /*field*/ 1,
623                                       /*bit_valid*/ 1,
624                                       /*bit*/ 4);
625                 ctl_config_read_done(io);
626                 retval = CTL_RETVAL_COMPLETE;
627                 break;
628         default:
629                 ctl_set_invalid_opcode(&io->scsiio);
630                 ctl_config_read_done(io);
631                 retval = CTL_RETVAL_COMPLETE;
632                 break;
633         }
634         return (retval);
635 }
636
637 static void
638 ctl_backend_ramdisk_delete(struct ctl_be_lun *cbe_lun, off_t lba, off_t len,
639     int anchor)
640 {
641         struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun;
642         uint8_t *page;
643         uint64_t p, lp;
644         u_int lbaoff;
645         getpage_op_t op = anchor ? GP_ANCHOR : GP_OTHER;
646
647         /* Partially zero first partial page. */
648         p = lba >> cbe_lun->pblockexp;
649         lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp);
650         if (lbaoff != 0) {
651                 page = ctl_backend_ramdisk_getpage(be_lun, p, op);
652                 if (page != P_UNMAPPED && page != P_ANCHORED) {
653                         memset(page + lbaoff * cbe_lun->blocksize, 0,
654                             min(len, be_lun->pblockmul - lbaoff) *
655                             cbe_lun->blocksize);
656                 }
657                 p++;
658         }
659
660         /* Partially zero last partial page. */
661         lp = (lba + len) >> cbe_lun->pblockexp;
662         lbaoff = (lba + len) & ~(UINT_MAX << cbe_lun->pblockexp);
663         if (p <= lp && lbaoff != 0) {
664                 page = ctl_backend_ramdisk_getpage(be_lun, lp, op);
665                 if (page != P_UNMAPPED && page != P_ANCHORED)
666                         memset(page, 0, lbaoff * cbe_lun->blocksize);
667         }
668
669         /* Delete remaining full pages. */
670         if (anchor) {
671                 for (; p < lp; p++)
672                         ctl_backend_ramdisk_anchorpage(be_lun, p);
673         } else {
674                 for (; p < lp; p++)
675                         ctl_backend_ramdisk_unmappage(be_lun, p);
676         }
677 }
678
679 static void
680 ctl_backend_ramdisk_ws(union ctl_io *io)
681 {
682         struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
683         struct ctl_be_ramdisk_lun *be_lun = cbe_lun->be_lun;
684         struct ctl_lba_len_flags *lbalen = ARGS(io);
685         uint8_t *page;
686         uint64_t lba;
687         u_int lbaoff, lbas;
688
689         if (lbalen->flags & ~(SWS_LBDATA | SWS_UNMAP | SWS_ANCHOR | SWS_NDOB)) {
690                 ctl_set_invalid_field(&io->scsiio,
691                                       /*sks_valid*/ 1,
692                                       /*command*/ 1,
693                                       /*field*/ 1,
694                                       /*bit_valid*/ 0,
695                                       /*bit*/ 0);
696                 ctl_config_write_done(io);
697                 return;
698         }
699         if (lbalen->flags & SWS_UNMAP) {
700                 ctl_backend_ramdisk_delete(cbe_lun, lbalen->lba, lbalen->len,
701                     (lbalen->flags & SWS_ANCHOR) != 0);
702                 ctl_set_success(&io->scsiio);
703                 ctl_config_write_done(io);
704                 return;
705         }
706
707         for (lba = lbalen->lba, lbas = lbalen->len; lbas > 0; lba++, lbas--) {
708                 page = ctl_backend_ramdisk_getpage(be_lun,
709                     lba >> cbe_lun->pblockexp, GP_WRITE);
710                 if (page == P_UNMAPPED || page == P_ANCHORED) {
711                         ctl_set_space_alloc_fail(&io->scsiio);
712                         ctl_data_submit_done(io);
713                         return;
714                 }
715                 lbaoff = lba & ~(UINT_MAX << cbe_lun->pblockexp);
716                 page += lbaoff * cbe_lun->blocksize;
717                 if (lbalen->flags & SWS_NDOB) {
718                         memset(page, 0, cbe_lun->blocksize);
719                 } else {
720                         memcpy(page, io->scsiio.kern_data_ptr,
721                             cbe_lun->blocksize);
722                 }
723                 if (lbalen->flags & SWS_LBDATA)
724                         scsi_ulto4b(lba, page);
725         }
726         ctl_set_success(&io->scsiio);
727         ctl_config_write_done(io);
728 }
729
730 static void
731 ctl_backend_ramdisk_unmap(union ctl_io *io)
732 {
733         struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
734         struct ctl_ptr_len_flags *ptrlen = (struct ctl_ptr_len_flags *)ARGS(io);
735         struct scsi_unmap_desc *buf, *end;
736
737         if ((ptrlen->flags & ~SU_ANCHOR) != 0) {
738                 ctl_set_invalid_field(&io->scsiio,
739                                       /*sks_valid*/ 0,
740                                       /*command*/ 0,
741                                       /*field*/ 0,
742                                       /*bit_valid*/ 0,
743                                       /*bit*/ 0);
744                 ctl_config_write_done(io);
745                 return;
746         }
747
748         buf = (struct scsi_unmap_desc *)ptrlen->ptr;
749         end = buf + ptrlen->len / sizeof(*buf);
750         for (; buf < end; buf++) {
751                 ctl_backend_ramdisk_delete(cbe_lun,
752                     scsi_8btou64(buf->lba), scsi_4btoul(buf->length),
753                     (ptrlen->flags & SU_ANCHOR) != 0);
754         }
755
756         ctl_set_success(&io->scsiio);
757         ctl_config_write_done(io);
758 }
759
760 static int
761 ctl_backend_ramdisk_config_write(union ctl_io *io)
762 {
763         struct ctl_be_lun *cbe_lun = CTL_BACKEND_LUN(io);
764         int retval = 0;
765
766         switch (io->scsiio.cdb[0]) {
767         case SYNCHRONIZE_CACHE:
768         case SYNCHRONIZE_CACHE_16:
769                 /* We have no cache to flush. */
770                 ctl_set_success(&io->scsiio);
771                 ctl_config_write_done(io);
772                 break;
773         case START_STOP_UNIT: {
774                 struct scsi_start_stop_unit *cdb;
775
776                 cdb = (struct scsi_start_stop_unit *)io->scsiio.cdb;
777                 if ((cdb->how & SSS_PC_MASK) != 0) {
778                         ctl_set_success(&io->scsiio);
779                         ctl_config_write_done(io);
780                         break;
781                 }
782                 if (cdb->how & SSS_START) {
783                         if (cdb->how & SSS_LOEJ)
784                                 ctl_lun_has_media(cbe_lun);
785                         ctl_start_lun(cbe_lun);
786                 } else {
787                         ctl_stop_lun(cbe_lun);
788                         if (cdb->how & SSS_LOEJ)
789                                 ctl_lun_ejected(cbe_lun);
790                 }
791                 ctl_set_success(&io->scsiio);
792                 ctl_config_write_done(io);
793                 break;
794         }
795         case PREVENT_ALLOW:
796                 ctl_set_success(&io->scsiio);
797                 ctl_config_write_done(io);
798                 break;
799         case WRITE_SAME_10:
800         case WRITE_SAME_16:
801                 ctl_backend_ramdisk_ws(io);
802                 break;
803         case UNMAP:
804                 ctl_backend_ramdisk_unmap(io);
805                 break;
806         default:
807                 ctl_set_invalid_opcode(&io->scsiio);
808                 ctl_config_write_done(io);
809                 retval = CTL_RETVAL_COMPLETE;
810                 break;
811         }
812
813         return (retval);
814 }
815
816 static uint64_t
817 ctl_backend_ramdisk_lun_attr(void *arg, const char *attrname)
818 {
819         struct ctl_be_ramdisk_lun *be_lun = arg;
820         uint64_t                val;
821
822         val = UINT64_MAX;
823         if (be_lun->cap_bytes == 0)
824                 return (val);
825         sx_slock(&be_lun->page_lock);
826         if (strcmp(attrname, "blocksused") == 0) {
827                 val = be_lun->cap_used / be_lun->cbe_lun.blocksize;
828         } else if (strcmp(attrname, "blocksavail") == 0) {
829                 val = (be_lun->cap_bytes - be_lun->cap_used) /
830                     be_lun->cbe_lun.blocksize;
831         }
832         sx_sunlock(&be_lun->page_lock);
833         return (val);
834 }
835
836 static int
837 ctl_backend_ramdisk_ioctl(struct cdev *dev, u_long cmd, caddr_t addr,
838                           int flag, struct thread *td)
839 {
840         struct ctl_be_ramdisk_softc *softc = &rd_softc;
841         struct ctl_lun_req *lun_req;
842         int retval;
843
844         retval = 0;
845         switch (cmd) {
846         case CTL_LUN_REQ:
847                 lun_req = (struct ctl_lun_req *)addr;
848                 switch (lun_req->reqtype) {
849                 case CTL_LUNREQ_CREATE:
850                         retval = ctl_backend_ramdisk_create(softc, lun_req);
851                         break;
852                 case CTL_LUNREQ_RM:
853                         retval = ctl_backend_ramdisk_rm(softc, lun_req);
854                         break;
855                 case CTL_LUNREQ_MODIFY:
856                         retval = ctl_backend_ramdisk_modify(softc, lun_req);
857                         break;
858                 default:
859                         lun_req->status = CTL_LUN_ERROR;
860                         snprintf(lun_req->error_str, sizeof(lun_req->error_str),
861                                  "%s: invalid LUN request type %d", __func__,
862                                  lun_req->reqtype);
863                         break;
864                 }
865                 break;
866         default:
867                 retval = ENOTTY;
868                 break;
869         }
870
871         return (retval);
872 }
873
874 static int
875 ctl_backend_ramdisk_rm(struct ctl_be_ramdisk_softc *softc,
876                        struct ctl_lun_req *req)
877 {
878         struct ctl_be_ramdisk_lun *be_lun;
879         struct ctl_lun_rm_params *params;
880         int retval;
881
882         params = &req->reqdata.rm;
883         mtx_lock(&softc->lock);
884         STAILQ_FOREACH(be_lun, &softc->lun_list, links) {
885                 if (be_lun->cbe_lun.lun_id == params->lun_id)
886                         break;
887         }
888         mtx_unlock(&softc->lock);
889         if (be_lun == NULL) {
890                 snprintf(req->error_str, sizeof(req->error_str),
891                          "%s: LUN %u is not managed by the ramdisk backend",
892                          __func__, params->lun_id);
893                 goto bailout_error;
894         }
895
896         retval = ctl_disable_lun(&be_lun->cbe_lun);
897         if (retval != 0) {
898                 snprintf(req->error_str, sizeof(req->error_str),
899                          "%s: error %d returned from ctl_disable_lun() for "
900                          "LUN %d", __func__, retval, params->lun_id);
901                 goto bailout_error;
902         }
903
904         /*
905          * Set the waiting flag before we invalidate the LUN.  Our shutdown
906          * routine can be called any time after we invalidate the LUN,
907          * and can be called from our context.
908          *
909          * This tells the shutdown routine that we're waiting, or we're
910          * going to wait for the shutdown to happen.
911          */
912         mtx_lock(&softc->lock);
913         be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING;
914         mtx_unlock(&softc->lock);
915
916         retval = ctl_invalidate_lun(&be_lun->cbe_lun);
917         if (retval != 0) {
918                 snprintf(req->error_str, sizeof(req->error_str),
919                          "%s: error %d returned from ctl_invalidate_lun() for "
920                          "LUN %d", __func__, retval, params->lun_id);
921                 mtx_lock(&softc->lock);
922                 be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
923                 mtx_unlock(&softc->lock);
924                 goto bailout_error;
925         }
926
927         mtx_lock(&softc->lock);
928         while ((be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) == 0) {
929                 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0);
930                 if (retval == EINTR)
931                         break;
932         }
933         be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
934
935         /*
936          * We only remove this LUN from the list and free it (below) if
937          * retval == 0.  If the user interrupted the wait, we just bail out
938          * without actually freeing the LUN.  We let the shutdown routine
939          * free the LUN if that happens.
940          */
941         if (retval == 0) {
942                 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
943                               links);
944                 softc->num_luns--;
945         }
946
947         mtx_unlock(&softc->lock);
948
949         if (retval == 0) {
950                 taskqueue_drain_all(be_lun->io_taskqueue);
951                 taskqueue_free(be_lun->io_taskqueue);
952                 ctl_free_opts(&be_lun->cbe_lun.options);
953                 free(be_lun->zero_page, M_RAMDISK);
954                 ctl_backend_ramdisk_freeallpages(be_lun->pages, be_lun->indir);
955                 sx_destroy(&be_lun->page_lock);
956                 mtx_destroy(&be_lun->queue_lock);
957                 free(be_lun, M_RAMDISK);
958         }
959
960         req->status = CTL_LUN_OK;
961         return (retval);
962
963 bailout_error:
964         req->status = CTL_LUN_ERROR;
965         return (0);
966 }
967
968 static int
969 ctl_backend_ramdisk_create(struct ctl_be_ramdisk_softc *softc,
970                            struct ctl_lun_req *req)
971 {
972         struct ctl_be_ramdisk_lun *be_lun;
973         struct ctl_be_lun *cbe_lun;
974         struct ctl_lun_create_params *params;
975         char *value;
976         char tmpstr[32];
977         uint64_t t;
978         int retval;
979
980         retval = 0;
981         params = &req->reqdata.create;
982
983         be_lun = malloc(sizeof(*be_lun), M_RAMDISK, M_ZERO | M_WAITOK);
984         cbe_lun = &be_lun->cbe_lun;
985         cbe_lun->be_lun = be_lun;
986         be_lun->params = req->reqdata.create;
987         be_lun->softc = softc;
988         sprintf(be_lun->lunname, "cram%d", softc->num_luns);
989         ctl_init_opts(&cbe_lun->options, req->num_be_args, req->kern_be_args);
990
991         if (params->flags & CTL_LUN_FLAG_DEV_TYPE)
992                 cbe_lun->lun_type = params->device_type;
993         else
994                 cbe_lun->lun_type = T_DIRECT;
995         be_lun->flags = CTL_BE_RAMDISK_LUN_UNCONFIGURED;
996         cbe_lun->flags = 0;
997         value = ctl_get_opt(&cbe_lun->options, "ha_role");
998         if (value != NULL) {
999                 if (strcmp(value, "primary") == 0)
1000                         cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
1001         } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF)
1002                 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
1003
1004         be_lun->pblocksize = PAGE_SIZE;
1005         value = ctl_get_opt(&cbe_lun->options, "pblocksize");
1006         if (value != NULL) {
1007                 ctl_expand_number(value, &t);
1008                 be_lun->pblocksize = t;
1009         }
1010         if (be_lun->pblocksize < 512 || be_lun->pblocksize > 131072) {
1011                 snprintf(req->error_str, sizeof(req->error_str),
1012                          "%s: unsupported pblocksize %u", __func__,
1013                          be_lun->pblocksize);
1014                 goto bailout_error;
1015         }
1016
1017         if (cbe_lun->lun_type == T_DIRECT ||
1018             cbe_lun->lun_type == T_CDROM) {
1019                 if (params->blocksize_bytes != 0)
1020                         cbe_lun->blocksize = params->blocksize_bytes;
1021                 else if (cbe_lun->lun_type == T_CDROM)
1022                         cbe_lun->blocksize = 2048;
1023                 else
1024                         cbe_lun->blocksize = 512;
1025                 be_lun->pblockmul = be_lun->pblocksize / cbe_lun->blocksize;
1026                 if (be_lun->pblockmul < 1 || !powerof2(be_lun->pblockmul)) {
1027                         snprintf(req->error_str, sizeof(req->error_str),
1028                                  "%s: pblocksize %u not exp2 of blocksize %u",
1029                                  __func__,
1030                                  be_lun->pblocksize, cbe_lun->blocksize);
1031                         goto bailout_error;
1032                 }
1033                 if (params->lun_size_bytes < cbe_lun->blocksize) {
1034                         snprintf(req->error_str, sizeof(req->error_str),
1035                                  "%s: LUN size %ju < blocksize %u", __func__,
1036                                  params->lun_size_bytes, cbe_lun->blocksize);
1037                         goto bailout_error;
1038                 }
1039                 be_lun->size_blocks = params->lun_size_bytes / cbe_lun->blocksize;
1040                 be_lun->size_bytes = be_lun->size_blocks * cbe_lun->blocksize;
1041                 be_lun->indir = 0;
1042                 t = be_lun->size_bytes / be_lun->pblocksize;
1043                 while (t > 1) {
1044                         t /= PPP;
1045                         be_lun->indir++;
1046                 }
1047                 cbe_lun->maxlba = be_lun->size_blocks - 1;
1048                 cbe_lun->pblockexp = fls(be_lun->pblockmul) - 1;
1049                 cbe_lun->pblockoff = 0;
1050                 cbe_lun->ublockexp = cbe_lun->pblockexp;
1051                 cbe_lun->ublockoff = 0;
1052                 cbe_lun->atomicblock = be_lun->pblocksize;
1053                 cbe_lun->opttxferlen = SGPP * be_lun->pblocksize;
1054                 value = ctl_get_opt(&cbe_lun->options, "capacity");
1055                 if (value != NULL)
1056                         ctl_expand_number(value, &be_lun->cap_bytes);
1057         } else {
1058                 be_lun->pblockmul = 1;
1059                 cbe_lun->pblockexp = 0;
1060         }
1061
1062         /* Tell the user the blocksize we ended up using */
1063         params->blocksize_bytes = cbe_lun->blocksize;
1064         params->lun_size_bytes = be_lun->size_bytes;
1065
1066         value = ctl_get_opt(&cbe_lun->options, "unmap");
1067         if (value == NULL || strcmp(value, "off") != 0)
1068                 cbe_lun->flags |= CTL_LUN_FLAG_UNMAP;
1069         value = ctl_get_opt(&cbe_lun->options, "readonly");
1070         if (value != NULL) {
1071                 if (strcmp(value, "on") == 0)
1072                         cbe_lun->flags |= CTL_LUN_FLAG_READONLY;
1073         } else if (cbe_lun->lun_type != T_DIRECT)
1074                 cbe_lun->flags |= CTL_LUN_FLAG_READONLY;
1075         cbe_lun->serseq = CTL_LUN_SERSEQ_OFF;
1076         value = ctl_get_opt(&cbe_lun->options, "serseq");
1077         if (value != NULL && strcmp(value, "on") == 0)
1078                 cbe_lun->serseq = CTL_LUN_SERSEQ_ON;
1079         else if (value != NULL && strcmp(value, "read") == 0)
1080                 cbe_lun->serseq = CTL_LUN_SERSEQ_READ;
1081         else if (value != NULL && strcmp(value, "off") == 0)
1082                 cbe_lun->serseq = CTL_LUN_SERSEQ_OFF;
1083
1084         if (params->flags & CTL_LUN_FLAG_ID_REQ) {
1085                 cbe_lun->req_lun_id = params->req_lun_id;
1086                 cbe_lun->flags |= CTL_LUN_FLAG_ID_REQ;
1087         } else
1088                 cbe_lun->req_lun_id = 0;
1089
1090         cbe_lun->lun_shutdown = ctl_backend_ramdisk_lun_shutdown;
1091         cbe_lun->lun_config_status = ctl_backend_ramdisk_lun_config_status;
1092         cbe_lun->be = &ctl_be_ramdisk_driver;
1093         if ((params->flags & CTL_LUN_FLAG_SERIAL_NUM) == 0) {
1094                 snprintf(tmpstr, sizeof(tmpstr), "MYSERIAL%4d",
1095                          softc->num_luns);
1096                 strncpy((char *)cbe_lun->serial_num, tmpstr,
1097                         MIN(sizeof(cbe_lun->serial_num), sizeof(tmpstr)));
1098
1099                 /* Tell the user what we used for a serial number */
1100                 strncpy((char *)params->serial_num, tmpstr,
1101                         MIN(sizeof(params->serial_num), sizeof(tmpstr)));
1102         } else { 
1103                 strncpy((char *)cbe_lun->serial_num, params->serial_num,
1104                         MIN(sizeof(cbe_lun->serial_num),
1105                             sizeof(params->serial_num)));
1106         }
1107         if ((params->flags & CTL_LUN_FLAG_DEVID) == 0) {
1108                 snprintf(tmpstr, sizeof(tmpstr), "MYDEVID%4d", softc->num_luns);
1109                 strncpy((char *)cbe_lun->device_id, tmpstr,
1110                         MIN(sizeof(cbe_lun->device_id), sizeof(tmpstr)));
1111
1112                 /* Tell the user what we used for a device ID */
1113                 strncpy((char *)params->device_id, tmpstr,
1114                         MIN(sizeof(params->device_id), sizeof(tmpstr)));
1115         } else {
1116                 strncpy((char *)cbe_lun->device_id, params->device_id,
1117                         MIN(sizeof(cbe_lun->device_id),
1118                             sizeof(params->device_id)));
1119         }
1120
1121         STAILQ_INIT(&be_lun->cont_queue);
1122         sx_init(&be_lun->page_lock, "cram page lock");
1123         if (be_lun->cap_bytes == 0)
1124                 be_lun->pages = malloc(be_lun->pblocksize, M_RAMDISK, M_WAITOK);
1125         be_lun->zero_page = malloc(be_lun->pblocksize, M_RAMDISK,
1126             M_WAITOK|M_ZERO);
1127         mtx_init(&be_lun->queue_lock, "cram queue lock", NULL, MTX_DEF);
1128         TASK_INIT(&be_lun->io_task, /*priority*/0, ctl_backend_ramdisk_worker,
1129             be_lun);
1130
1131         be_lun->io_taskqueue = taskqueue_create(be_lun->lunname, M_WAITOK,
1132             taskqueue_thread_enqueue, /*context*/&be_lun->io_taskqueue);
1133         if (be_lun->io_taskqueue == NULL) {
1134                 snprintf(req->error_str, sizeof(req->error_str),
1135                          "%s: Unable to create taskqueue", __func__);
1136                 goto bailout_error;
1137         }
1138
1139         retval = taskqueue_start_threads(&be_lun->io_taskqueue,
1140                                          /*num threads*/1,
1141                                          /*priority*/PWAIT,
1142                                          /*thread name*/
1143                                          "%s taskq", be_lun->lunname);
1144         if (retval != 0)
1145                 goto bailout_error;
1146
1147         mtx_lock(&softc->lock);
1148         softc->num_luns++;
1149         STAILQ_INSERT_TAIL(&softc->lun_list, be_lun, links);
1150         mtx_unlock(&softc->lock);
1151
1152         retval = ctl_add_lun(&be_lun->cbe_lun);
1153         if (retval != 0) {
1154                 mtx_lock(&softc->lock);
1155                 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
1156                               links);
1157                 softc->num_luns--;
1158                 mtx_unlock(&softc->lock);
1159                 snprintf(req->error_str, sizeof(req->error_str),
1160                          "%s: ctl_add_lun() returned error %d, see dmesg for "
1161                         "details", __func__, retval);
1162                 retval = 0;
1163                 goto bailout_error;
1164         }
1165
1166         mtx_lock(&softc->lock);
1167
1168         /*
1169          * Tell the config_status routine that we're waiting so it won't
1170          * clean up the LUN in the event of an error.
1171          */
1172         be_lun->flags |= CTL_BE_RAMDISK_LUN_WAITING;
1173
1174         while (be_lun->flags & CTL_BE_RAMDISK_LUN_UNCONFIGURED) {
1175                 retval = msleep(be_lun, &softc->lock, PCATCH, "ctlram", 0);
1176                 if (retval == EINTR)
1177                         break;
1178         }
1179         be_lun->flags &= ~CTL_BE_RAMDISK_LUN_WAITING;
1180
1181         if (be_lun->flags & CTL_BE_RAMDISK_LUN_CONFIG_ERR) {
1182                 snprintf(req->error_str, sizeof(req->error_str),
1183                          "%s: LUN configuration error, see dmesg for details",
1184                          __func__);
1185                 STAILQ_REMOVE(&softc->lun_list, be_lun, ctl_be_ramdisk_lun,
1186                               links);
1187                 softc->num_luns--;
1188                 mtx_unlock(&softc->lock);
1189                 goto bailout_error;
1190         } else {
1191                 params->req_lun_id = cbe_lun->lun_id;
1192         }
1193         mtx_unlock(&softc->lock);
1194
1195         req->status = CTL_LUN_OK;
1196         return (retval);
1197
1198 bailout_error:
1199         req->status = CTL_LUN_ERROR;
1200         if (be_lun != NULL) {
1201                 if (be_lun->io_taskqueue != NULL)
1202                         taskqueue_free(be_lun->io_taskqueue);
1203                 ctl_free_opts(&cbe_lun->options);
1204                 free(be_lun->zero_page, M_RAMDISK);
1205                 ctl_backend_ramdisk_freeallpages(be_lun->pages, be_lun->indir);
1206                 sx_destroy(&be_lun->page_lock);
1207                 mtx_destroy(&be_lun->queue_lock);
1208                 free(be_lun, M_RAMDISK);
1209         }
1210         return (retval);
1211 }
1212
1213 static int
1214 ctl_backend_ramdisk_modify(struct ctl_be_ramdisk_softc *softc,
1215                        struct ctl_lun_req *req)
1216 {
1217         struct ctl_be_ramdisk_lun *be_lun;
1218         struct ctl_be_lun *cbe_lun;
1219         struct ctl_lun_modify_params *params;
1220         char *value;
1221         uint32_t blocksize;
1222         int wasprim;
1223
1224         params = &req->reqdata.modify;
1225
1226         mtx_lock(&softc->lock);
1227         STAILQ_FOREACH(be_lun, &softc->lun_list, links) {
1228                 if (be_lun->cbe_lun.lun_id == params->lun_id)
1229                         break;
1230         }
1231         mtx_unlock(&softc->lock);
1232         if (be_lun == NULL) {
1233                 snprintf(req->error_str, sizeof(req->error_str),
1234                          "%s: LUN %u is not managed by the ramdisk backend",
1235                          __func__, params->lun_id);
1236                 goto bailout_error;
1237         }
1238         cbe_lun = &be_lun->cbe_lun;
1239
1240         if (params->lun_size_bytes != 0)
1241                 be_lun->params.lun_size_bytes = params->lun_size_bytes;
1242         ctl_update_opts(&cbe_lun->options, req->num_be_args, req->kern_be_args);
1243
1244         wasprim = (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY);
1245         value = ctl_get_opt(&cbe_lun->options, "ha_role");
1246         if (value != NULL) {
1247                 if (strcmp(value, "primary") == 0)
1248                         cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
1249                 else
1250                         cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY;
1251         } else if (control_softc->flags & CTL_FLAG_ACTIVE_SHELF)
1252                 cbe_lun->flags |= CTL_LUN_FLAG_PRIMARY;
1253         else
1254                 cbe_lun->flags &= ~CTL_LUN_FLAG_PRIMARY;
1255         if (wasprim != (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)) {
1256                 if (cbe_lun->flags & CTL_LUN_FLAG_PRIMARY)
1257                         ctl_lun_primary(cbe_lun);
1258                 else
1259                         ctl_lun_secondary(cbe_lun);
1260         }
1261
1262         blocksize = be_lun->cbe_lun.blocksize;
1263         if (be_lun->params.lun_size_bytes < blocksize) {
1264                 snprintf(req->error_str, sizeof(req->error_str),
1265                         "%s: LUN size %ju < blocksize %u", __func__,
1266                         be_lun->params.lun_size_bytes, blocksize);
1267                 goto bailout_error;
1268         }
1269         be_lun->size_blocks = be_lun->params.lun_size_bytes / blocksize;
1270         be_lun->size_bytes = be_lun->size_blocks * blocksize;
1271         be_lun->cbe_lun.maxlba = be_lun->size_blocks - 1;
1272         ctl_lun_capacity_changed(&be_lun->cbe_lun);
1273
1274         /* Tell the user the exact size we ended up using */
1275         params->lun_size_bytes = be_lun->size_bytes;
1276
1277         req->status = CTL_LUN_OK;
1278         return (0);
1279
1280 bailout_error:
1281         req->status = CTL_LUN_ERROR;
1282         return (0);
1283 }
1284
1285 static void
1286 ctl_backend_ramdisk_lun_shutdown(void *be_lun)
1287 {
1288         struct ctl_be_ramdisk_lun *lun;
1289         struct ctl_be_ramdisk_softc *softc;
1290         int do_free;
1291
1292         lun = (struct ctl_be_ramdisk_lun *)be_lun;
1293         softc = lun->softc;
1294         do_free = 0;
1295
1296         mtx_lock(&softc->lock);
1297         lun->flags |= CTL_BE_RAMDISK_LUN_UNCONFIGURED;
1298         if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) {
1299                 wakeup(lun);
1300         } else {
1301                 STAILQ_REMOVE(&softc->lun_list, lun, ctl_be_ramdisk_lun,
1302                               links);
1303                 softc->num_luns--;
1304                 do_free = 1;
1305         }
1306         mtx_unlock(&softc->lock);
1307
1308         if (do_free != 0)
1309                 free(be_lun, M_RAMDISK);
1310 }
1311
1312 static void
1313 ctl_backend_ramdisk_lun_config_status(void *be_lun,
1314                                       ctl_lun_config_status status)
1315 {
1316         struct ctl_be_ramdisk_lun *lun;
1317         struct ctl_be_ramdisk_softc *softc;
1318
1319         lun = (struct ctl_be_ramdisk_lun *)be_lun;
1320         softc = lun->softc;
1321
1322         if (status == CTL_LUN_CONFIG_OK) {
1323                 mtx_lock(&softc->lock);
1324                 lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED;
1325                 if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING)
1326                         wakeup(lun);
1327                 mtx_unlock(&softc->lock);
1328
1329                 /*
1330                  * We successfully added the LUN, attempt to enable it.
1331                  */
1332                 if (ctl_enable_lun(&lun->cbe_lun) != 0) {
1333                         printf("%s: ctl_enable_lun() failed!\n", __func__);
1334                         if (ctl_invalidate_lun(&lun->cbe_lun) != 0) {
1335                                 printf("%s: ctl_invalidate_lun() failed!\n",
1336                                        __func__);
1337                         }
1338                 }
1339
1340                 return;
1341         }
1342
1343
1344         mtx_lock(&softc->lock);
1345         lun->flags &= ~CTL_BE_RAMDISK_LUN_UNCONFIGURED;
1346
1347         /*
1348          * If we have a user waiting, let him handle the cleanup.  If not,
1349          * clean things up here.
1350          */
1351         if (lun->flags & CTL_BE_RAMDISK_LUN_WAITING) {
1352                 lun->flags |= CTL_BE_RAMDISK_LUN_CONFIG_ERR;
1353                 wakeup(lun);
1354         } else {
1355                 STAILQ_REMOVE(&softc->lun_list, lun, ctl_be_ramdisk_lun,
1356                               links);
1357                 softc->num_luns--;
1358                 free(lun, M_RAMDISK);
1359         }
1360         mtx_unlock(&softc->lock);
1361 }