2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2016 Jakub Klama <jceel@FreeBSD.org>.
5 * Copyright (c) 2018 Marcelo Araujo <araujo@FreeBSD.org>.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer
13 * in this position and unchanged.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/linker_set.h>
36 #include <sys/types.h>
39 #include <sys/queue.h>
51 #include <pthread_np.h>
53 #include <cam/scsi/scsi_all.h>
54 #include <cam/scsi/scsi_message.h>
55 #include <cam/ctl/ctl.h>
56 #include <cam/ctl/ctl_io.h>
57 #include <cam/ctl/ctl_backend.h>
58 #include <cam/ctl/ctl_ioctl.h>
59 #include <cam/ctl/ctl_util.h>
60 #include <cam/ctl/ctl_scsi_all.h>
69 #define VTSCSI_RINGSZ 64
70 #define VTSCSI_REQUESTQ 1
71 #define VTSCSI_THR_PER_Q 16
72 #define VTSCSI_MAXQ (VTSCSI_REQUESTQ + 2)
73 #define VTSCSI_MAXSEG 64
75 #define VTSCSI_IN_HEADER_LEN(_sc) \
76 (sizeof(struct pci_vtscsi_req_cmd_rd) + _sc->vss_config.cdb_size)
78 #define VTSCSI_OUT_HEADER_LEN(_sc) \
79 (sizeof(struct pci_vtscsi_req_cmd_wr) + _sc->vss_config.sense_size)
81 #define VIRTIO_SCSI_MAX_CHANNEL 0
82 #define VIRTIO_SCSI_MAX_TARGET 0
83 #define VIRTIO_SCSI_MAX_LUN 16383
85 #define VIRTIO_SCSI_F_INOUT (1 << 0)
86 #define VIRTIO_SCSI_F_HOTPLUG (1 << 1)
87 #define VIRTIO_SCSI_F_CHANGE (1 << 2)
89 static int pci_vtscsi_debug = 0;
90 #define DPRINTF(params) if (pci_vtscsi_debug) PRINTLN params
91 #define WPRINTF(params) PRINTLN params
93 struct pci_vtscsi_config {
98 uint32_t event_info_size;
101 uint16_t max_channel;
104 } __attribute__((packed));
106 struct pci_vtscsi_queue {
107 struct pci_vtscsi_softc * vsq_sc;
108 struct vqueue_info * vsq_vq;
109 pthread_mutex_t vsq_mtx;
110 pthread_mutex_t vsq_qmtx;
111 pthread_cond_t vsq_cv;
112 STAILQ_HEAD(, pci_vtscsi_request) vsq_requests;
113 LIST_HEAD(, pci_vtscsi_worker) vsq_workers;
116 struct pci_vtscsi_worker {
117 struct pci_vtscsi_queue * vsw_queue;
118 pthread_t vsw_thread;
120 LIST_ENTRY(pci_vtscsi_worker) vsw_link;
123 struct pci_vtscsi_request {
124 struct pci_vtscsi_queue * vsr_queue;
125 struct iovec vsr_iov_in[VTSCSI_MAXSEG];
127 struct iovec vsr_iov_out[VTSCSI_MAXSEG];
130 STAILQ_ENTRY(pci_vtscsi_request) vsr_link;
136 struct pci_vtscsi_softc {
137 struct virtio_softc vss_vs;
138 struct vqueue_info vss_vq[VTSCSI_MAXQ];
139 struct pci_vtscsi_queue vss_queues[VTSCSI_REQUESTQ];
140 pthread_mutex_t vss_mtx;
143 uint32_t vss_features;
144 struct pci_vtscsi_config vss_config;
147 #define VIRTIO_SCSI_T_TMF 0
148 #define VIRTIO_SCSI_T_TMF_ABORT_TASK 0
149 #define VIRTIO_SCSI_T_TMF_ABORT_TASK_SET 1
150 #define VIRTIO_SCSI_T_TMF_CLEAR_ACA 2
151 #define VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET 3
152 #define VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET 4
153 #define VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET 5
154 #define VIRTIO_SCSI_T_TMF_QUERY_TASK 6
155 #define VIRTIO_SCSI_T_TMF_QUERY_TASK_SET 7
157 /* command-specific response values */
158 #define VIRTIO_SCSI_S_FUNCTION_COMPLETE 0
159 #define VIRTIO_SCSI_S_FUNCTION_SUCCEEDED 10
160 #define VIRTIO_SCSI_S_FUNCTION_REJECTED 11
162 struct pci_vtscsi_ctrl_tmf {
168 } __attribute__((packed));
170 #define VIRTIO_SCSI_T_AN_QUERY 1
171 #define VIRTIO_SCSI_EVT_ASYNC_OPERATIONAL_CHANGE 2
172 #define VIRTIO_SCSI_EVT_ASYNC_POWER_MGMT 4
173 #define VIRTIO_SCSI_EVT_ASYNC_EXTERNAL_REQUEST 8
174 #define VIRTIO_SCSI_EVT_ASYNC_MEDIA_CHANGE 16
175 #define VIRTIO_SCSI_EVT_ASYNC_MULTI_HOST 32
176 #define VIRTIO_SCSI_EVT_ASYNC_DEVICE_BUSY 64
178 struct pci_vtscsi_ctrl_an {
181 uint32_t event_requested;
182 uint32_t event_actual;
184 } __attribute__((packed));
186 /* command-specific response values */
187 #define VIRTIO_SCSI_S_OK 0
188 #define VIRTIO_SCSI_S_OVERRUN 1
189 #define VIRTIO_SCSI_S_ABORTED 2
190 #define VIRTIO_SCSI_S_BAD_TARGET 3
191 #define VIRTIO_SCSI_S_RESET 4
192 #define VIRTIO_SCSI_S_BUSY 5
193 #define VIRTIO_SCSI_S_TRANSPORT_FAILURE 6
194 #define VIRTIO_SCSI_S_TARGET_FAILURE 7
195 #define VIRTIO_SCSI_S_NEXUS_FAILURE 8
196 #define VIRTIO_SCSI_S_FAILURE 9
197 #define VIRTIO_SCSI_S_INCORRECT_LUN 12
200 #define VIRTIO_SCSI_S_SIMPLE 0
201 #define VIRTIO_SCSI_S_ORDERED 1
202 #define VIRTIO_SCSI_S_HEAD 2
203 #define VIRTIO_SCSI_S_ACA 3
205 struct pci_vtscsi_event {
209 } __attribute__((packed));
211 struct pci_vtscsi_req_cmd_rd {
218 } __attribute__((packed));
220 struct pci_vtscsi_req_cmd_wr {
223 uint16_t status_qualifier;
227 } __attribute__((packed));
229 static void *pci_vtscsi_proc(void *);
230 static void pci_vtscsi_reset(void *);
231 static void pci_vtscsi_neg_features(void *, uint64_t);
232 static int pci_vtscsi_cfgread(void *, int, int, uint32_t *);
233 static int pci_vtscsi_cfgwrite(void *, int, int, uint32_t);
234 static inline int pci_vtscsi_get_lun(uint8_t *);
235 static int pci_vtscsi_control_handle(struct pci_vtscsi_softc *, void *, size_t);
236 static int pci_vtscsi_tmf_handle(struct pci_vtscsi_softc *,
237 struct pci_vtscsi_ctrl_tmf *);
238 static int pci_vtscsi_an_handle(struct pci_vtscsi_softc *,
239 struct pci_vtscsi_ctrl_an *);
240 static int pci_vtscsi_request_handle(struct pci_vtscsi_queue *, struct iovec *,
241 int, struct iovec *, int);
242 static void pci_vtscsi_controlq_notify(void *, struct vqueue_info *);
243 static void pci_vtscsi_eventq_notify(void *, struct vqueue_info *);
244 static void pci_vtscsi_requestq_notify(void *, struct vqueue_info *);
245 static int pci_vtscsi_init_queue(struct pci_vtscsi_softc *,
246 struct pci_vtscsi_queue *, int);
247 static int pci_vtscsi_init(struct vmctx *, struct pci_devinst *, char *);
249 static struct virtio_consts vtscsi_vi_consts = {
250 "vtscsi", /* our name */
251 VTSCSI_MAXQ, /* we support 2+n virtqueues */
252 sizeof(struct pci_vtscsi_config), /* config reg size */
253 pci_vtscsi_reset, /* reset */
254 NULL, /* device-wide qnotify */
255 pci_vtscsi_cfgread, /* read virtio config */
256 pci_vtscsi_cfgwrite, /* write virtio config */
257 pci_vtscsi_neg_features, /* apply negotiated features */
258 0, /* our capabilities */
262 pci_vtscsi_proc(void *arg)
264 struct pci_vtscsi_worker *worker = (struct pci_vtscsi_worker *)arg;
265 struct pci_vtscsi_queue *q = worker->vsw_queue;
266 struct pci_vtscsi_request *req;
270 pthread_mutex_lock(&q->vsq_mtx);
272 while (STAILQ_EMPTY(&q->vsq_requests)
273 && !worker->vsw_exiting)
274 pthread_cond_wait(&q->vsq_cv, &q->vsq_mtx);
276 if (worker->vsw_exiting)
279 req = STAILQ_FIRST(&q->vsq_requests);
280 STAILQ_REMOVE_HEAD(&q->vsq_requests, vsr_link);
282 pthread_mutex_unlock(&q->vsq_mtx);
283 iolen = pci_vtscsi_request_handle(q, req->vsr_iov_in,
284 req->vsr_niov_in, req->vsr_iov_out, req->vsr_niov_out);
286 pthread_mutex_lock(&q->vsq_qmtx);
287 vq_relchain(q->vsq_vq, req->vsr_idx, iolen);
288 vq_endchains(q->vsq_vq, 0);
289 pthread_mutex_unlock(&q->vsq_qmtx);
291 DPRINTF(("virtio-scsi: request <idx=%d> completed",
296 pthread_mutex_unlock(&q->vsq_mtx);
301 pci_vtscsi_reset(void *vsc)
303 struct pci_vtscsi_softc *sc;
307 DPRINTF(("vtscsi: device reset requested"));
308 vi_reset_dev(&sc->vss_vs);
310 /* initialize config structure */
311 sc->vss_config = (struct pci_vtscsi_config){
312 .num_queues = VTSCSI_REQUESTQ,
313 /* Leave room for the request and the response. */
314 .seg_max = VTSCSI_MAXSEG - 2,
317 .event_info_size = sizeof(struct pci_vtscsi_event),
320 .max_channel = VIRTIO_SCSI_MAX_CHANNEL,
321 .max_target = VIRTIO_SCSI_MAX_TARGET,
322 .max_lun = VIRTIO_SCSI_MAX_LUN
327 pci_vtscsi_neg_features(void *vsc, uint64_t negotiated_features)
329 struct pci_vtscsi_softc *sc = vsc;
331 sc->vss_features = negotiated_features;
335 pci_vtscsi_cfgread(void *vsc, int offset, int size, uint32_t *retval)
337 struct pci_vtscsi_softc *sc = vsc;
340 ptr = (uint8_t *)&sc->vss_config + offset;
341 memcpy(retval, ptr, size);
346 pci_vtscsi_cfgwrite(void *vsc, int offset, int size, uint32_t val)
353 pci_vtscsi_get_lun(uint8_t *lun)
356 return (((lun[2] << 8) | lun[3]) & 0x3fff);
360 pci_vtscsi_control_handle(struct pci_vtscsi_softc *sc, void *buf,
363 struct pci_vtscsi_ctrl_tmf *tmf;
364 struct pci_vtscsi_ctrl_an *an;
367 type = *(uint32_t *)buf;
369 if (type == VIRTIO_SCSI_T_TMF) {
370 tmf = (struct pci_vtscsi_ctrl_tmf *)buf;
371 return (pci_vtscsi_tmf_handle(sc, tmf));
374 if (type == VIRTIO_SCSI_T_AN_QUERY) {
375 an = (struct pci_vtscsi_ctrl_an *)buf;
376 return (pci_vtscsi_an_handle(sc, an));
383 pci_vtscsi_tmf_handle(struct pci_vtscsi_softc *sc,
384 struct pci_vtscsi_ctrl_tmf *tmf)
389 io = ctl_scsi_alloc_io(sc->vss_iid);
390 ctl_scsi_zero_io(io);
392 io->io_hdr.io_type = CTL_IO_TASK;
393 io->io_hdr.nexus.initid = sc->vss_iid;
394 io->io_hdr.nexus.targ_lun = pci_vtscsi_get_lun(tmf->lun);
395 io->taskio.tag_type = CTL_TAG_SIMPLE;
396 io->taskio.tag_num = (uint32_t)tmf->id;
398 switch (tmf->subtype) {
399 case VIRTIO_SCSI_T_TMF_ABORT_TASK:
400 io->taskio.task_action = CTL_TASK_ABORT_TASK;
403 case VIRTIO_SCSI_T_TMF_ABORT_TASK_SET:
404 io->taskio.task_action = CTL_TASK_ABORT_TASK_SET;
407 case VIRTIO_SCSI_T_TMF_CLEAR_ACA:
408 io->taskio.task_action = CTL_TASK_CLEAR_ACA;
411 case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET:
412 io->taskio.task_action = CTL_TASK_CLEAR_TASK_SET;
415 case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET:
416 io->taskio.task_action = CTL_TASK_I_T_NEXUS_RESET;
419 case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET:
420 io->taskio.task_action = CTL_TASK_LUN_RESET;
423 case VIRTIO_SCSI_T_TMF_QUERY_TASK:
424 io->taskio.task_action = CTL_TASK_QUERY_TASK;
427 case VIRTIO_SCSI_T_TMF_QUERY_TASK_SET:
428 io->taskio.task_action = CTL_TASK_QUERY_TASK_SET;
432 if (pci_vtscsi_debug) {
433 struct sbuf *sb = sbuf_new_auto();
436 DPRINTF(("pci_virtio_scsi: %s", sbuf_data(sb)));
440 err = ioctl(sc->vss_ctl_fd, CTL_IO, io);
442 WPRINTF(("CTL_IO: err=%d (%s)", errno, strerror(errno)));
444 tmf->response = io->taskio.task_status;
445 ctl_scsi_free_io(io);
450 pci_vtscsi_an_handle(struct pci_vtscsi_softc *sc,
451 struct pci_vtscsi_ctrl_an *an)
458 pci_vtscsi_request_handle(struct pci_vtscsi_queue *q, struct iovec *iov_in,
459 int niov_in, struct iovec *iov_out, int niov_out)
461 struct pci_vtscsi_softc *sc = q->vsq_sc;
462 struct pci_vtscsi_req_cmd_rd *cmd_rd = NULL;
463 struct pci_vtscsi_req_cmd_wr *cmd_wr;
464 struct iovec data_iov_in[VTSCSI_MAXSEG], data_iov_out[VTSCSI_MAXSEG];
466 int data_niov_in, data_niov_out;
467 void *ext_data_ptr = NULL;
468 uint32_t ext_data_len = 0, ext_sg_entries = 0;
471 seek_iov(iov_in, niov_in, data_iov_in, &data_niov_in,
472 VTSCSI_IN_HEADER_LEN(sc));
473 seek_iov(iov_out, niov_out, data_iov_out, &data_niov_out,
474 VTSCSI_OUT_HEADER_LEN(sc));
476 truncate_iov(iov_in, &niov_in, VTSCSI_IN_HEADER_LEN(sc));
477 truncate_iov(iov_out, &niov_out, VTSCSI_OUT_HEADER_LEN(sc));
478 iov_to_buf(iov_in, niov_in, (void **)&cmd_rd);
480 cmd_wr = malloc(VTSCSI_OUT_HEADER_LEN(sc));
481 io = ctl_scsi_alloc_io(sc->vss_iid);
482 ctl_scsi_zero_io(io);
484 io->io_hdr.nexus.initid = sc->vss_iid;
485 io->io_hdr.nexus.targ_lun = pci_vtscsi_get_lun(cmd_rd->lun);
487 io->io_hdr.io_type = CTL_IO_SCSI;
489 if (data_niov_in > 0) {
490 ext_data_ptr = (void *)data_iov_in;
491 ext_sg_entries = data_niov_in;
492 ext_data_len = count_iov(data_iov_in, data_niov_in);
493 io->io_hdr.flags |= CTL_FLAG_DATA_OUT;
494 } else if (data_niov_out > 0) {
495 ext_data_ptr = (void *)data_iov_out;
496 ext_sg_entries = data_niov_out;
497 ext_data_len = count_iov(data_iov_out, data_niov_out);
498 io->io_hdr.flags |= CTL_FLAG_DATA_IN;
501 io->scsiio.sense_len = sc->vss_config.sense_size;
502 io->scsiio.tag_num = (uint32_t)cmd_rd->id;
503 switch (cmd_rd->task_attr) {
504 case VIRTIO_SCSI_S_ORDERED:
505 io->scsiio.tag_type = CTL_TAG_ORDERED;
507 case VIRTIO_SCSI_S_HEAD:
508 io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE;
510 case VIRTIO_SCSI_S_ACA:
511 io->scsiio.tag_type = CTL_TAG_ACA;
513 case VIRTIO_SCSI_S_SIMPLE:
515 io->scsiio.tag_type = CTL_TAG_SIMPLE;
518 io->scsiio.ext_sg_entries = ext_sg_entries;
519 io->scsiio.ext_data_ptr = ext_data_ptr;
520 io->scsiio.ext_data_len = ext_data_len;
521 io->scsiio.ext_data_filled = 0;
522 io->scsiio.cdb_len = sc->vss_config.cdb_size;
523 memcpy(io->scsiio.cdb, cmd_rd->cdb, sc->vss_config.cdb_size);
525 if (pci_vtscsi_debug) {
526 struct sbuf *sb = sbuf_new_auto();
529 DPRINTF(("pci_virtio_scsi: %s", sbuf_data(sb)));
533 err = ioctl(sc->vss_ctl_fd, CTL_IO, io);
535 WPRINTF(("CTL_IO: err=%d (%s)", errno, strerror(errno)));
536 cmd_wr->response = VIRTIO_SCSI_S_FAILURE;
538 cmd_wr->sense_len = MIN(io->scsiio.sense_len,
539 sc->vss_config.sense_size);
540 cmd_wr->residual = io->scsiio.residual;
541 cmd_wr->status = io->scsiio.scsi_status;
542 cmd_wr->response = VIRTIO_SCSI_S_OK;
543 memcpy(&cmd_wr->sense, &io->scsiio.sense_data,
547 buf_to_iov(cmd_wr, VTSCSI_OUT_HEADER_LEN(sc), iov_out, niov_out, 0);
550 ctl_scsi_free_io(io);
551 return (VTSCSI_OUT_HEADER_LEN(sc) + io->scsiio.ext_data_filled);
555 pci_vtscsi_controlq_notify(void *vsc, struct vqueue_info *vq)
557 struct pci_vtscsi_softc *sc;
558 struct iovec iov[VTSCSI_MAXSEG];
566 while (vq_has_descs(vq)) {
567 n = vq_getchain(vq, &idx, iov, VTSCSI_MAXSEG, NULL);
568 assert(n >= 1 && n <= VTSCSI_MAXSEG);
569 bufsize = iov_to_buf(iov, n, &buf);
570 iolen = pci_vtscsi_control_handle(sc, buf, bufsize);
571 buf_to_iov(buf + bufsize - iolen, iolen, iov, n,
575 * Release this chain and handle more
577 vq_relchain(vq, idx, iolen);
579 vq_endchains(vq, 1); /* Generate interrupt if appropriate. */
584 pci_vtscsi_eventq_notify(void *vsc, struct vqueue_info *vq)
591 pci_vtscsi_requestq_notify(void *vsc, struct vqueue_info *vq)
593 struct pci_vtscsi_softc *sc;
594 struct pci_vtscsi_queue *q;
595 struct pci_vtscsi_request *req;
596 struct iovec iov[VTSCSI_MAXSEG];
597 uint16_t flags[VTSCSI_MAXSEG];
602 q = &sc->vss_queues[vq->vq_num - 2];
604 while (vq_has_descs(vq)) {
606 n = vq_getchain(vq, &idx, iov, VTSCSI_MAXSEG, flags);
607 assert(n >= 1 && n <= VTSCSI_MAXSEG);
609 /* Count readable descriptors */
610 for (i = 0; i < n; i++) {
611 if (flags[i] & VRING_DESC_F_WRITE)
617 req = calloc(1, sizeof(struct pci_vtscsi_request));
620 req->vsr_niov_in = readable;
621 req->vsr_niov_out = n - readable;
622 memcpy(req->vsr_iov_in, iov,
623 req->vsr_niov_in * sizeof(struct iovec));
624 memcpy(req->vsr_iov_out, iov + readable,
625 req->vsr_niov_out * sizeof(struct iovec));
627 pthread_mutex_lock(&q->vsq_mtx);
628 STAILQ_INSERT_TAIL(&q->vsq_requests, req, vsr_link);
629 pthread_cond_signal(&q->vsq_cv);
630 pthread_mutex_unlock(&q->vsq_mtx);
632 DPRINTF(("virtio-scsi: request <idx=%d> enqueued", idx));
637 pci_vtscsi_init_queue(struct pci_vtscsi_softc *sc,
638 struct pci_vtscsi_queue *queue, int num)
640 struct pci_vtscsi_worker *worker;
641 char tname[MAXCOMLEN + 1];
645 queue->vsq_vq = &sc->vss_vq[num + 2];
647 pthread_mutex_init(&queue->vsq_mtx, NULL);
648 pthread_mutex_init(&queue->vsq_qmtx, NULL);
649 pthread_cond_init(&queue->vsq_cv, NULL);
650 STAILQ_INIT(&queue->vsq_requests);
651 LIST_INIT(&queue->vsq_workers);
653 for (i = 0; i < VTSCSI_THR_PER_Q; i++) {
654 worker = calloc(1, sizeof(struct pci_vtscsi_worker));
655 worker->vsw_queue = queue;
657 pthread_create(&worker->vsw_thread, NULL, &pci_vtscsi_proc,
660 snprintf(tname, sizeof(tname), "vtscsi:%d-%d", num, i);
661 pthread_set_name_np(worker->vsw_thread, tname);
662 LIST_INSERT_HEAD(&queue->vsq_workers, worker, vsw_link);
669 pci_vtscsi_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
671 struct pci_vtscsi_softc *sc;
676 sc = calloc(1, sizeof(struct pci_vtscsi_softc));
677 devname = "/dev/cam/ctl";
678 while ((opt = strsep(&opts, ",")) != NULL) {
679 optname = strsep(&opt, "=");
680 if (opt == NULL && optidx == 0) {
683 } else if (strcmp(optname, "dev") == 0 && opt != NULL) {
685 } else if (strcmp(optname, "iid") == 0 && opt != NULL) {
686 sc->vss_iid = strtoul(opt, NULL, 10);
688 EPRINTLN("Invalid option %s", optname);
695 sc->vss_ctl_fd = open(devname, O_RDWR);
696 if (sc->vss_ctl_fd < 0) {
697 WPRINTF(("cannot open %s: %s", devname, strerror(errno)));
702 vi_softc_linkup(&sc->vss_vs, &vtscsi_vi_consts, sc, pi, sc->vss_vq);
703 sc->vss_vs.vs_mtx = &sc->vss_mtx;
706 sc->vss_vq[0].vq_qsize = VTSCSI_RINGSZ;
707 sc->vss_vq[0].vq_notify = pci_vtscsi_controlq_notify;
710 sc->vss_vq[1].vq_qsize = VTSCSI_RINGSZ;
711 sc->vss_vq[1].vq_notify = pci_vtscsi_eventq_notify;
714 for (i = 2; i < VTSCSI_MAXQ; i++) {
715 sc->vss_vq[i].vq_qsize = VTSCSI_RINGSZ;
716 sc->vss_vq[i].vq_notify = pci_vtscsi_requestq_notify;
717 pci_vtscsi_init_queue(sc, &sc->vss_queues[i - 2], i - 2);
720 /* initialize config space */
721 pci_set_cfgdata16(pi, PCIR_DEVICE, VIRTIO_DEV_SCSI);
722 pci_set_cfgdata16(pi, PCIR_VENDOR, VIRTIO_VENDOR);
723 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_STORAGE);
724 pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_TYPE_SCSI);
725 pci_set_cfgdata16(pi, PCIR_SUBVEND_0, VIRTIO_VENDOR);
727 if (vi_intr_init(&sc->vss_vs, 1, fbsdrun_virtio_msix()))
729 vi_set_io_bar(&sc->vss_vs, 0);
735 struct pci_devemu pci_de_vscsi = {
736 .pe_emu = "virtio-scsi",
737 .pe_init = pci_vtscsi_init,
738 .pe_barwrite = vi_pci_write,
739 .pe_barread = vi_pci_read
741 PCI_EMUL_SET(pci_de_vscsi);