2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2003-2009 Silicon Graphics International Corp.
5 * Copyright (c) 2012 The FreeBSD Foundation
6 * Copyright (c) 2015 Alexander Motin <mav@FreeBSD.org>
7 * Copyright (c) 2017 Jakub Wojciech Klama <jceel@FreeBSD.org>
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer,
15 * without modification, immediately at the beginning of the file.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
24 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
25 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
29 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/types.h>
37 #include <sys/module.h>
38 #include <sys/mutex.h>
39 #include <sys/condvar.h>
40 #include <sys/malloc.h>
42 #include <sys/queue.h>
43 #include <sys/sysctl.h>
48 #include <cam/scsi/scsi_all.h>
49 #include <cam/scsi/scsi_da.h>
50 #include <cam/ctl/ctl_io.h>
51 #include <cam/ctl/ctl.h>
52 #include <cam/ctl/ctl_frontend.h>
53 #include <cam/ctl/ctl_util.h>
54 #include <cam/ctl/ctl_backend.h>
55 #include <cam/ctl/ctl_ioctl.h>
56 #include <cam/ctl/ctl_ha.h>
57 #include <cam/ctl/ctl_private.h>
58 #include <cam/ctl/ctl_debug.h>
59 #include <cam/ctl/ctl_error.h>
67 struct ctl_fe_ioctl_params {
70 ctl_fe_ioctl_state state;
74 TAILQ_ENTRY(cfi_port) link;
81 TAILQ_HEAD(, cfi_port) ports;
84 static struct cfi_softc cfi_softc;
86 static int cfi_init(void);
87 static int cfi_shutdown(void);
88 static void cfi_datamove(union ctl_io *io);
89 static void cfi_done(union ctl_io *io);
90 static int cfi_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
92 static void cfi_ioctl_port_create(struct ctl_req *req);
93 static void cfi_ioctl_port_remove(struct ctl_req *req);
95 static struct cdevsw cfi_cdevsw = {
96 .d_version = D_VERSION,
98 .d_ioctl = ctl_ioctl_io
101 static struct ctl_frontend cfi_frontend =
106 .shutdown = cfi_shutdown,
108 CTL_FRONTEND_DECLARE(ctlioctl, cfi_frontend);
113 struct cfi_softc *isoftc = &cfi_softc;
114 struct cfi_port *cfi;
115 struct ctl_port *port;
118 memset(isoftc, 0, sizeof(*isoftc));
119 TAILQ_INIT(&isoftc->ports);
121 cfi = malloc(sizeof(*cfi), M_CTL, M_WAITOK | M_ZERO);
123 port->frontend = &cfi_frontend;
124 port->port_type = CTL_PORT_IOCTL;
125 port->num_requested_ctl_io = 100;
126 port->port_name = "ioctl";
127 port->fe_datamove = cfi_datamove;
128 port->fe_done = cfi_done;
129 port->physical_port = 0;
130 port->targ_port = -1;
132 if ((error = ctl_port_register(port)) != 0) {
133 printf("%s: ioctl port registration failed\n", __func__);
137 ctl_port_online(port);
138 TAILQ_INSERT_TAIL(&isoftc->ports, cfi, link);
145 struct cfi_softc *isoftc = &cfi_softc;
146 struct cfi_port *cfi, *temp;
147 struct ctl_port *port;
150 TAILQ_FOREACH_SAFE(cfi, &isoftc->ports, link, temp) {
152 ctl_port_offline(port);
153 error = ctl_port_deregister(port);
155 printf("%s: ctl_frontend_deregister() failed\n",
160 TAILQ_REMOVE(&isoftc->ports, cfi, link);
168 cfi_ioctl_port_create(struct ctl_req *req)
170 struct cfi_softc *isoftc = &cfi_softc;
171 struct cfi_port *cfi;
172 struct ctl_port *port;
173 struct make_dev_args args;
178 val = dnvlist_get_string(req->args_nvl, "pp", NULL);
180 pp = strtol(val, NULL, 10);
182 val = dnvlist_get_string(req->args_nvl, "vp", NULL);
184 vp = strtol(val, NULL, 10);
187 /* Check for duplicates */
188 TAILQ_FOREACH(cfi, &isoftc->ports, link) {
189 if (pp == cfi->port.physical_port &&
190 vp == cfi->port.virtual_port) {
191 req->status = CTL_LUN_ERROR;
192 snprintf(req->error_str, sizeof(req->error_str),
193 "port %d already exists", pp);
199 /* Find free port number */
200 TAILQ_FOREACH(cfi, &isoftc->ports, link) {
201 pp = MAX(pp, cfi->port.physical_port);
207 cfi = malloc(sizeof(*cfi), M_CTL, M_WAITOK | M_ZERO);
209 port->frontend = &cfi_frontend;
210 port->port_type = CTL_PORT_IOCTL;
211 port->num_requested_ctl_io = 100;
212 port->port_name = "ioctl";
213 port->fe_datamove = cfi_datamove;
214 port->fe_done = cfi_done;
215 port->physical_port = pp;
216 port->virtual_port = vp;
217 port->targ_port = -1;
219 retval = ctl_port_register(port);
221 req->status = CTL_LUN_ERROR;
222 snprintf(req->error_str, sizeof(req->error_str),
223 "ctl_port_register() failed with error %d", retval);
228 req->result_nvl = nvlist_create(0);
229 nvlist_add_number(req->result_nvl, "port_id", port->targ_port);
230 ctl_port_online(port);
232 make_dev_args_init(&args);
233 args.mda_devsw = &cfi_cdevsw;
234 args.mda_uid = UID_ROOT;
235 args.mda_gid = GID_OPERATOR;
236 args.mda_mode = 0600;
237 args.mda_si_drv1 = NULL;
238 args.mda_si_drv2 = cfi;
240 retval = make_dev_s(&args, &cfi->dev, "cam/ctl%d.%d", pp, vp);
242 req->status = CTL_LUN_ERROR;
243 snprintf(req->error_str, sizeof(req->error_str),
244 "make_dev_s() failed with error %d", retval);
245 ctl_port_offline(port);
246 ctl_port_deregister(port);
251 req->status = CTL_LUN_OK;
252 TAILQ_INSERT_TAIL(&isoftc->ports, cfi, link);
256 cfi_ioctl_port_remove(struct ctl_req *req)
258 struct cfi_softc *isoftc = &cfi_softc;
259 struct cfi_port *cfi = NULL;
263 val = dnvlist_get_string(req->args_nvl, "port_id", NULL);
265 port_id = strtol(val, NULL, 10);
268 req->status = CTL_LUN_ERROR;
269 snprintf(req->error_str, sizeof(req->error_str),
270 "port_id not provided");
274 TAILQ_FOREACH(cfi, &isoftc->ports, link) {
275 if (cfi->port.targ_port == port_id)
280 req->status = CTL_LUN_ERROR;
281 snprintf(req->error_str, sizeof(req->error_str),
282 "cannot find port %d", port_id);
287 if (cfi->port.physical_port == 0 && cfi->port.virtual_port == 0) {
288 req->status = CTL_LUN_ERROR;
289 snprintf(req->error_str, sizeof(req->error_str),
290 "cannot destroy default ioctl port");
295 ctl_port_offline(&cfi->port);
296 ctl_port_deregister(&cfi->port);
297 TAILQ_REMOVE(&isoftc->ports, cfi, link);
298 destroy_dev(cfi->dev);
300 req->status = CTL_LUN_OK;
304 cfi_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
309 if (cmd == CTL_PORT_REQ) {
310 req = (struct ctl_req *)addr;
311 switch (req->reqtype) {
313 cfi_ioctl_port_create(req);
316 cfi_ioctl_port_remove(req);
319 req->status = CTL_LUN_ERROR;
320 snprintf(req->error_str, sizeof(req->error_str),
321 "Unsupported request type %d", req->reqtype);
330 * Data movement routine for the CTL ioctl frontend port.
333 ctl_ioctl_do_datamove(struct ctl_scsiio *ctsio)
335 struct ctl_sg_entry *ext_sglist, *kern_sglist;
336 struct ctl_sg_entry ext_entry, kern_entry;
337 int ext_sglen, ext_sg_entries, kern_sg_entries;
338 int ext_sg_start, ext_offset;
340 int kern_watermark, ext_watermark;
341 int ext_sglist_malloced;
344 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove\n"));
347 * If this flag is set, fake the data transfer.
349 if (ctsio->io_hdr.flags & CTL_FLAG_NO_DATAMOVE) {
350 ext_sglist_malloced = 0;
351 ctsio->ext_data_filled += ctsio->kern_data_len;
352 ctsio->kern_data_resid = 0;
357 * To simplify things here, if we have a single buffer, stick it in
358 * a S/G entry and just make it a single entry S/G list.
360 if (ctsio->ext_sg_entries > 0) {
363 ext_sglen = ctsio->ext_sg_entries * sizeof(*ext_sglist);
364 ext_sglist = (struct ctl_sg_entry *)malloc(ext_sglen, M_CTL,
366 ext_sglist_malloced = 1;
367 if (copyin(ctsio->ext_data_ptr, ext_sglist, ext_sglen) != 0) {
368 ctsio->io_hdr.port_status = 31343;
371 ext_sg_entries = ctsio->ext_sg_entries;
372 ext_sg_start = ext_sg_entries;
375 for (i = 0; i < ext_sg_entries; i++) {
376 if ((len_seen + ext_sglist[i].len) >=
377 ctsio->ext_data_filled) {
379 ext_offset = ctsio->ext_data_filled - len_seen;
382 len_seen += ext_sglist[i].len;
385 ext_sglist = &ext_entry;
386 ext_sglist_malloced = 0;
387 ext_sglist->addr = ctsio->ext_data_ptr;
388 ext_sglist->len = ctsio->ext_data_len;
391 ext_offset = ctsio->ext_data_filled;
394 if (ctsio->kern_sg_entries > 0) {
395 kern_sglist = (struct ctl_sg_entry *)ctsio->kern_data_ptr;
396 kern_sg_entries = ctsio->kern_sg_entries;
398 kern_sglist = &kern_entry;
399 kern_sglist->addr = ctsio->kern_data_ptr;
400 kern_sglist->len = ctsio->kern_data_len;
405 ext_watermark = ext_offset;
406 for (i = ext_sg_start, j = 0;
407 i < ext_sg_entries && j < kern_sg_entries;) {
408 uint8_t *ext_ptr, *kern_ptr;
410 len_to_copy = MIN(ext_sglist[i].len - ext_watermark,
411 kern_sglist[j].len - kern_watermark);
413 ext_ptr = (uint8_t *)ext_sglist[i].addr;
414 ext_ptr = ext_ptr + ext_watermark;
415 if (ctsio->io_hdr.flags & CTL_FLAG_BUS_ADDR) {
419 panic("need to implement bus address support");
421 kern_ptr = bus_to_virt(kern_sglist[j].addr);
424 kern_ptr = (uint8_t *)kern_sglist[j].addr;
425 kern_ptr = kern_ptr + kern_watermark;
427 if ((ctsio->io_hdr.flags & CTL_FLAG_DATA_MASK) ==
429 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d "
430 "bytes to user\n", len_to_copy));
431 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p "
432 "to %p\n", kern_ptr, ext_ptr));
433 if (copyout(kern_ptr, ext_ptr, len_to_copy) != 0) {
434 ctsio->io_hdr.port_status = 31344;
438 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: copying %d "
439 "bytes from user\n", len_to_copy));
440 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: from %p "
441 "to %p\n", ext_ptr, kern_ptr));
442 if (copyin(ext_ptr, kern_ptr, len_to_copy)!= 0){
443 ctsio->io_hdr.port_status = 31345;
448 ctsio->ext_data_filled += len_to_copy;
449 ctsio->kern_data_resid -= len_to_copy;
451 ext_watermark += len_to_copy;
452 if (ext_sglist[i].len == ext_watermark) {
457 kern_watermark += len_to_copy;
458 if (kern_sglist[j].len == kern_watermark) {
464 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_sg_entries: %d, "
465 "kern_sg_entries: %d\n", ext_sg_entries,
467 CTL_DEBUG_PRINT(("ctl_ioctl_do_datamove: ext_data_len = %d, "
468 "kern_data_len = %d\n", ctsio->ext_data_len,
469 ctsio->kern_data_len));
472 if (ext_sglist_malloced != 0)
473 free(ext_sglist, M_CTL);
475 return (CTL_RETVAL_COMPLETE);
479 cfi_datamove(union ctl_io *io)
481 struct ctl_fe_ioctl_params *params;
483 params = (struct ctl_fe_ioctl_params *)
484 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
486 mtx_lock(¶ms->ioctl_mtx);
487 params->state = CTL_IOCTL_DATAMOVE;
488 cv_broadcast(¶ms->sem);
489 mtx_unlock(¶ms->ioctl_mtx);
493 cfi_done(union ctl_io *io)
495 struct ctl_fe_ioctl_params *params;
497 params = (struct ctl_fe_ioctl_params *)
498 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr;
500 mtx_lock(¶ms->ioctl_mtx);
501 params->state = CTL_IOCTL_DONE;
502 cv_broadcast(¶ms->sem);
503 mtx_unlock(¶ms->ioctl_mtx);
507 cfi_submit_wait(union ctl_io *io)
509 struct ctl_fe_ioctl_params params;
510 ctl_fe_ioctl_state last_state;
513 bzero(¶ms, sizeof(params));
514 mtx_init(¶ms.ioctl_mtx, "ctliocmtx", NULL, MTX_DEF);
515 cv_init(¶ms.sem, "ctlioccv");
516 params.state = CTL_IOCTL_INPROG;
517 last_state = params.state;
519 io->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr = ¶ms;
521 CTL_DEBUG_PRINT(("cfi_submit_wait\n"));
523 /* This shouldn't happen */
524 if ((retval = ctl_run(io)) != CTL_RETVAL_COMPLETE)
530 mtx_lock(¶ms.ioctl_mtx);
532 * Check the state here, and don't sleep if the state has
533 * already changed (i.e. wakeup has already occurred, but we
534 * weren't waiting yet).
536 if (params.state == last_state) {
537 /* XXX KDM cv_wait_sig instead? */
538 cv_wait(¶ms.sem, ¶ms.ioctl_mtx);
540 last_state = params.state;
542 switch (params.state) {
543 case CTL_IOCTL_INPROG:
544 /* Why did we wake up? */
545 /* XXX KDM error here? */
546 mtx_unlock(¶ms.ioctl_mtx);
548 case CTL_IOCTL_DATAMOVE:
549 CTL_DEBUG_PRINT(("got CTL_IOCTL_DATAMOVE\n"));
552 * change last_state back to INPROG to avoid
553 * deadlock on subsequent data moves.
555 params.state = last_state = CTL_IOCTL_INPROG;
557 mtx_unlock(¶ms.ioctl_mtx);
558 ctl_ioctl_do_datamove(&io->scsiio);
560 * Note that in some cases, most notably writes,
561 * this will queue the I/O and call us back later.
562 * In other cases, generally reads, this routine
563 * will immediately call back and wake us up,
564 * probably using our own context.
566 ctl_datamove_done(io, false);
569 mtx_unlock(¶ms.ioctl_mtx);
570 CTL_DEBUG_PRINT(("got CTL_IOCTL_DONE\n"));
574 mtx_unlock(¶ms.ioctl_mtx);
575 /* XXX KDM error here? */
580 mtx_destroy(¶ms.ioctl_mtx);
581 cv_destroy(¶ms.sem);
583 return (CTL_RETVAL_COMPLETE);
587 ctl_ioctl_io(struct cdev *dev, u_long cmd, caddr_t addr, int flag,
590 struct cfi_port *cfi;
592 void *pool_tmp, *sc_tmp;
598 cfi = dev->si_drv2 == NULL
599 ? TAILQ_FIRST(&cfi_softc.ports)
603 * If we haven't been "enabled", don't allow any SCSI I/O
606 if ((cfi->port.status & CTL_PORT_STATUS_ONLINE) == 0)
609 io = ctl_alloc_io(cfi->port.ctl_pool_ref);
612 * Need to save the pool reference so it doesn't get
613 * spammed by the user's ctl_io.
615 pool_tmp = io->io_hdr.pool;
616 sc_tmp = CTL_SOFTC(io);
617 memcpy(io, (void *)addr, sizeof(*io));
618 io->io_hdr.pool = pool_tmp;
619 CTL_SOFTC(io) = sc_tmp;
620 TAILQ_INIT(&io->io_hdr.blocked_queue);
623 * No status yet, so make sure the status is set properly.
625 io->io_hdr.status = CTL_STATUS_NONE;
628 * The user sets the initiator ID, target and LUN IDs.
630 io->io_hdr.nexus.targ_port = cfi->port.targ_port;
631 io->io_hdr.flags |= CTL_FLAG_USER_REQ;
632 if ((io->io_hdr.flags & CTL_FLAG_USER_TAG) == 0 &&
633 io->io_hdr.io_type == CTL_IO_SCSI &&
634 io->scsiio.tag_type != CTL_TAG_UNTAGGED)
635 io->scsiio.tag_num = atomic_fetchadd_int(&cfi->cur_tag_num, 1);
637 retval = cfi_submit_wait(io);
639 memcpy((void *)addr, io, sizeof(*io));