2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2016 Jakub Klama <jceel@FreeBSD.org>.
5 * Copyright (c) 2018 Marcelo Araujo <araujo@FreeBSD.org>.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer
13 * in this position and unchanged.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <sys/param.h>
32 #include <sys/linker_set.h>
33 #include <sys/types.h>
36 #include <sys/queue.h>
48 #include <pthread_np.h>
50 #include <cam/scsi/scsi_all.h>
51 #include <cam/scsi/scsi_message.h>
52 #include <cam/ctl/ctl.h>
53 #include <cam/ctl/ctl_io.h>
54 #include <cam/ctl/ctl_backend.h>
55 #include <cam/ctl/ctl_ioctl.h>
56 #include <cam/ctl/ctl_util.h>
57 #include <cam/ctl/ctl_scsi_all.h>
67 #define VTSCSI_RINGSZ 64
68 #define VTSCSI_REQUESTQ 1
69 #define VTSCSI_THR_PER_Q 16
70 #define VTSCSI_MAXQ (VTSCSI_REQUESTQ + 2)
71 #define VTSCSI_MAXSEG 64
73 #define VTSCSI_IN_HEADER_LEN(_sc) \
74 (sizeof(struct pci_vtscsi_req_cmd_rd) + _sc->vss_config.cdb_size)
76 #define VTSCSI_OUT_HEADER_LEN(_sc) \
77 (sizeof(struct pci_vtscsi_req_cmd_wr) + _sc->vss_config.sense_size)
79 #define VIRTIO_SCSI_MAX_CHANNEL 0
80 #define VIRTIO_SCSI_MAX_TARGET 0
81 #define VIRTIO_SCSI_MAX_LUN 16383
83 #define VIRTIO_SCSI_F_INOUT (1 << 0)
84 #define VIRTIO_SCSI_F_HOTPLUG (1 << 1)
85 #define VIRTIO_SCSI_F_CHANGE (1 << 2)
87 static int pci_vtscsi_debug = 0;
88 #define WPRINTF(msg, params...) PRINTLN("virtio-scsi: " msg, ##params)
89 #define DPRINTF(msg, params...) if (pci_vtscsi_debug) WPRINTF(msg, ##params)
91 struct pci_vtscsi_config {
96 uint32_t event_info_size;
102 } __attribute__((packed));
104 struct pci_vtscsi_queue {
105 struct pci_vtscsi_softc * vsq_sc;
106 struct vqueue_info * vsq_vq;
107 pthread_mutex_t vsq_mtx;
108 pthread_mutex_t vsq_qmtx;
109 pthread_cond_t vsq_cv;
110 STAILQ_HEAD(, pci_vtscsi_request) vsq_requests;
111 LIST_HEAD(, pci_vtscsi_worker) vsq_workers;
114 struct pci_vtscsi_worker {
115 struct pci_vtscsi_queue * vsw_queue;
116 pthread_t vsw_thread;
118 LIST_ENTRY(pci_vtscsi_worker) vsw_link;
121 struct pci_vtscsi_request {
122 struct pci_vtscsi_queue * vsr_queue;
123 struct iovec vsr_iov_in[VTSCSI_MAXSEG];
125 struct iovec vsr_iov_out[VTSCSI_MAXSEG];
128 STAILQ_ENTRY(pci_vtscsi_request) vsr_link;
134 struct pci_vtscsi_softc {
135 struct virtio_softc vss_vs;
136 struct vqueue_info vss_vq[VTSCSI_MAXQ];
137 struct pci_vtscsi_queue vss_queues[VTSCSI_REQUESTQ];
138 pthread_mutex_t vss_mtx;
141 uint32_t vss_features;
142 struct pci_vtscsi_config vss_config;
145 #define VIRTIO_SCSI_T_TMF 0
146 #define VIRTIO_SCSI_T_TMF_ABORT_TASK 0
147 #define VIRTIO_SCSI_T_TMF_ABORT_TASK_SET 1
148 #define VIRTIO_SCSI_T_TMF_CLEAR_ACA 2
149 #define VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET 3
150 #define VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET 4
151 #define VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET 5
152 #define VIRTIO_SCSI_T_TMF_QUERY_TASK 6
153 #define VIRTIO_SCSI_T_TMF_QUERY_TASK_SET 7
155 /* command-specific response values */
156 #define VIRTIO_SCSI_S_FUNCTION_COMPLETE 0
157 #define VIRTIO_SCSI_S_FUNCTION_SUCCEEDED 10
158 #define VIRTIO_SCSI_S_FUNCTION_REJECTED 11
160 struct pci_vtscsi_ctrl_tmf {
166 } __attribute__((packed));
168 #define VIRTIO_SCSI_T_AN_QUERY 1
169 #define VIRTIO_SCSI_EVT_ASYNC_OPERATIONAL_CHANGE 2
170 #define VIRTIO_SCSI_EVT_ASYNC_POWER_MGMT 4
171 #define VIRTIO_SCSI_EVT_ASYNC_EXTERNAL_REQUEST 8
172 #define VIRTIO_SCSI_EVT_ASYNC_MEDIA_CHANGE 16
173 #define VIRTIO_SCSI_EVT_ASYNC_MULTI_HOST 32
174 #define VIRTIO_SCSI_EVT_ASYNC_DEVICE_BUSY 64
176 struct pci_vtscsi_ctrl_an {
179 uint32_t event_requested;
180 uint32_t event_actual;
182 } __attribute__((packed));
184 /* command-specific response values */
185 #define VIRTIO_SCSI_S_OK 0
186 #define VIRTIO_SCSI_S_OVERRUN 1
187 #define VIRTIO_SCSI_S_ABORTED 2
188 #define VIRTIO_SCSI_S_BAD_TARGET 3
189 #define VIRTIO_SCSI_S_RESET 4
190 #define VIRTIO_SCSI_S_BUSY 5
191 #define VIRTIO_SCSI_S_TRANSPORT_FAILURE 6
192 #define VIRTIO_SCSI_S_TARGET_FAILURE 7
193 #define VIRTIO_SCSI_S_NEXUS_FAILURE 8
194 #define VIRTIO_SCSI_S_FAILURE 9
195 #define VIRTIO_SCSI_S_INCORRECT_LUN 12
198 #define VIRTIO_SCSI_S_SIMPLE 0
199 #define VIRTIO_SCSI_S_ORDERED 1
200 #define VIRTIO_SCSI_S_HEAD 2
201 #define VIRTIO_SCSI_S_ACA 3
203 struct pci_vtscsi_event {
207 } __attribute__((packed));
209 struct pci_vtscsi_req_cmd_rd {
216 } __attribute__((packed));
218 struct pci_vtscsi_req_cmd_wr {
221 uint16_t status_qualifier;
225 } __attribute__((packed));
227 static void *pci_vtscsi_proc(void *);
228 static void pci_vtscsi_reset(void *);
229 static void pci_vtscsi_neg_features(void *, uint64_t);
230 static int pci_vtscsi_cfgread(void *, int, int, uint32_t *);
231 static int pci_vtscsi_cfgwrite(void *, int, int, uint32_t);
232 static inline int pci_vtscsi_get_lun(uint8_t *);
233 static int pci_vtscsi_control_handle(struct pci_vtscsi_softc *, void *, size_t);
234 static int pci_vtscsi_tmf_handle(struct pci_vtscsi_softc *,
235 struct pci_vtscsi_ctrl_tmf *);
236 static int pci_vtscsi_an_handle(struct pci_vtscsi_softc *,
237 struct pci_vtscsi_ctrl_an *);
238 static int pci_vtscsi_request_handle(struct pci_vtscsi_queue *, struct iovec *,
239 int, struct iovec *, int);
240 static void pci_vtscsi_controlq_notify(void *, struct vqueue_info *);
241 static void pci_vtscsi_eventq_notify(void *, struct vqueue_info *);
242 static void pci_vtscsi_requestq_notify(void *, struct vqueue_info *);
243 static int pci_vtscsi_init_queue(struct pci_vtscsi_softc *,
244 struct pci_vtscsi_queue *, int);
245 static int pci_vtscsi_init(struct pci_devinst *, nvlist_t *);
247 static struct virtio_consts vtscsi_vi_consts = {
249 .vc_nvq = VTSCSI_MAXQ,
250 .vc_cfgsize = sizeof(struct pci_vtscsi_config),
251 .vc_reset = pci_vtscsi_reset,
252 .vc_cfgread = pci_vtscsi_cfgread,
253 .vc_cfgwrite = pci_vtscsi_cfgwrite,
254 .vc_apply_features = pci_vtscsi_neg_features,
259 pci_vtscsi_proc(void *arg)
261 struct pci_vtscsi_worker *worker = (struct pci_vtscsi_worker *)arg;
262 struct pci_vtscsi_queue *q = worker->vsw_queue;
263 struct pci_vtscsi_request *req;
267 pthread_mutex_lock(&q->vsq_mtx);
269 while (STAILQ_EMPTY(&q->vsq_requests)
270 && !worker->vsw_exiting)
271 pthread_cond_wait(&q->vsq_cv, &q->vsq_mtx);
273 if (worker->vsw_exiting)
276 req = STAILQ_FIRST(&q->vsq_requests);
277 STAILQ_REMOVE_HEAD(&q->vsq_requests, vsr_link);
279 pthread_mutex_unlock(&q->vsq_mtx);
280 iolen = pci_vtscsi_request_handle(q, req->vsr_iov_in,
281 req->vsr_niov_in, req->vsr_iov_out, req->vsr_niov_out);
283 pthread_mutex_lock(&q->vsq_qmtx);
284 vq_relchain(q->vsq_vq, req->vsr_idx, iolen);
285 vq_endchains(q->vsq_vq, 0);
286 pthread_mutex_unlock(&q->vsq_qmtx);
288 DPRINTF("request <idx=%d> completed", req->vsr_idx);
292 pthread_mutex_unlock(&q->vsq_mtx);
297 pci_vtscsi_reset(void *vsc)
299 struct pci_vtscsi_softc *sc;
303 DPRINTF("device reset requested");
304 vi_reset_dev(&sc->vss_vs);
306 /* initialize config structure */
307 sc->vss_config = (struct pci_vtscsi_config){
308 .num_queues = VTSCSI_REQUESTQ,
309 /* Leave room for the request and the response. */
310 .seg_max = VTSCSI_MAXSEG - 2,
313 .event_info_size = sizeof(struct pci_vtscsi_event),
316 .max_channel = VIRTIO_SCSI_MAX_CHANNEL,
317 .max_target = VIRTIO_SCSI_MAX_TARGET,
318 .max_lun = VIRTIO_SCSI_MAX_LUN
323 pci_vtscsi_neg_features(void *vsc, uint64_t negotiated_features)
325 struct pci_vtscsi_softc *sc = vsc;
327 sc->vss_features = negotiated_features;
331 pci_vtscsi_cfgread(void *vsc, int offset, int size, uint32_t *retval)
333 struct pci_vtscsi_softc *sc = vsc;
336 ptr = (uint8_t *)&sc->vss_config + offset;
337 memcpy(retval, ptr, size);
342 pci_vtscsi_cfgwrite(void *vsc __unused, int offset __unused, int size __unused,
343 uint32_t val __unused)
349 pci_vtscsi_get_lun(uint8_t *lun)
352 return (((lun[2] << 8) | lun[3]) & 0x3fff);
356 pci_vtscsi_control_handle(struct pci_vtscsi_softc *sc, void *buf,
359 struct pci_vtscsi_ctrl_tmf *tmf;
360 struct pci_vtscsi_ctrl_an *an;
363 if (bufsize < sizeof(uint32_t)) {
364 WPRINTF("ignoring truncated control request");
368 type = *(uint32_t *)buf;
370 if (type == VIRTIO_SCSI_T_TMF) {
371 if (bufsize != sizeof(*tmf)) {
372 WPRINTF("ignoring tmf request with size %zu", bufsize);
375 tmf = (struct pci_vtscsi_ctrl_tmf *)buf;
376 return (pci_vtscsi_tmf_handle(sc, tmf));
379 if (type == VIRTIO_SCSI_T_AN_QUERY) {
380 if (bufsize != sizeof(*an)) {
381 WPRINTF("ignoring AN request with size %zu", bufsize);
384 an = (struct pci_vtscsi_ctrl_an *)buf;
385 return (pci_vtscsi_an_handle(sc, an));
392 pci_vtscsi_tmf_handle(struct pci_vtscsi_softc *sc,
393 struct pci_vtscsi_ctrl_tmf *tmf)
398 io = ctl_scsi_alloc_io(sc->vss_iid);
399 ctl_scsi_zero_io(io);
401 io->io_hdr.io_type = CTL_IO_TASK;
402 io->io_hdr.nexus.initid = sc->vss_iid;
403 io->io_hdr.nexus.targ_lun = pci_vtscsi_get_lun(tmf->lun);
404 io->taskio.tag_type = CTL_TAG_SIMPLE;
405 io->taskio.tag_num = tmf->id;
406 io->io_hdr.flags |= CTL_FLAG_USER_TAG;
408 switch (tmf->subtype) {
409 case VIRTIO_SCSI_T_TMF_ABORT_TASK:
410 io->taskio.task_action = CTL_TASK_ABORT_TASK;
413 case VIRTIO_SCSI_T_TMF_ABORT_TASK_SET:
414 io->taskio.task_action = CTL_TASK_ABORT_TASK_SET;
417 case VIRTIO_SCSI_T_TMF_CLEAR_ACA:
418 io->taskio.task_action = CTL_TASK_CLEAR_ACA;
421 case VIRTIO_SCSI_T_TMF_CLEAR_TASK_SET:
422 io->taskio.task_action = CTL_TASK_CLEAR_TASK_SET;
425 case VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET:
426 io->taskio.task_action = CTL_TASK_I_T_NEXUS_RESET;
429 case VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET:
430 io->taskio.task_action = CTL_TASK_LUN_RESET;
433 case VIRTIO_SCSI_T_TMF_QUERY_TASK:
434 io->taskio.task_action = CTL_TASK_QUERY_TASK;
437 case VIRTIO_SCSI_T_TMF_QUERY_TASK_SET:
438 io->taskio.task_action = CTL_TASK_QUERY_TASK_SET;
442 if (pci_vtscsi_debug) {
443 struct sbuf *sb = sbuf_new_auto();
446 DPRINTF("%s", sbuf_data(sb));
450 err = ioctl(sc->vss_ctl_fd, CTL_IO, io);
452 WPRINTF("CTL_IO: err=%d (%s)", errno, strerror(errno));
454 tmf->response = io->taskio.task_status;
455 ctl_scsi_free_io(io);
460 pci_vtscsi_an_handle(struct pci_vtscsi_softc *sc __unused,
461 struct pci_vtscsi_ctrl_an *an __unused)
467 pci_vtscsi_request_handle(struct pci_vtscsi_queue *q, struct iovec *iov_in,
468 int niov_in, struct iovec *iov_out, int niov_out)
470 struct pci_vtscsi_softc *sc = q->vsq_sc;
471 struct pci_vtscsi_req_cmd_rd *cmd_rd = NULL;
472 struct pci_vtscsi_req_cmd_wr *cmd_wr;
473 struct iovec data_iov_in[VTSCSI_MAXSEG], data_iov_out[VTSCSI_MAXSEG];
475 int data_niov_in, data_niov_out;
476 void *ext_data_ptr = NULL;
477 uint32_t ext_data_len = 0, ext_sg_entries = 0;
480 if (count_iov(iov_out, niov_out) < VTSCSI_OUT_HEADER_LEN(sc)) {
481 WPRINTF("ignoring request with insufficient output");
484 if (count_iov(iov_in, niov_in) < VTSCSI_IN_HEADER_LEN(sc)) {
485 WPRINTF("ignoring request with incomplete header");
489 seek_iov(iov_in, niov_in, data_iov_in, &data_niov_in,
490 VTSCSI_IN_HEADER_LEN(sc));
491 seek_iov(iov_out, niov_out, data_iov_out, &data_niov_out,
492 VTSCSI_OUT_HEADER_LEN(sc));
494 truncate_iov(iov_in, &niov_in, VTSCSI_IN_HEADER_LEN(sc));
495 truncate_iov(iov_out, &niov_out, VTSCSI_OUT_HEADER_LEN(sc));
496 iov_to_buf(iov_in, niov_in, (void **)&cmd_rd);
498 cmd_wr = calloc(1, VTSCSI_OUT_HEADER_LEN(sc));
499 io = ctl_scsi_alloc_io(sc->vss_iid);
500 ctl_scsi_zero_io(io);
502 io->io_hdr.nexus.initid = sc->vss_iid;
503 io->io_hdr.nexus.targ_lun = pci_vtscsi_get_lun(cmd_rd->lun);
505 io->io_hdr.io_type = CTL_IO_SCSI;
507 if (data_niov_in > 0) {
508 ext_data_ptr = (void *)data_iov_in;
509 ext_sg_entries = data_niov_in;
510 ext_data_len = count_iov(data_iov_in, data_niov_in);
511 io->io_hdr.flags |= CTL_FLAG_DATA_OUT;
512 } else if (data_niov_out > 0) {
513 ext_data_ptr = (void *)data_iov_out;
514 ext_sg_entries = data_niov_out;
515 ext_data_len = count_iov(data_iov_out, data_niov_out);
516 io->io_hdr.flags |= CTL_FLAG_DATA_IN;
519 io->scsiio.sense_len = sc->vss_config.sense_size;
520 io->scsiio.tag_num = cmd_rd->id;
521 io->io_hdr.flags |= CTL_FLAG_USER_TAG;
522 switch (cmd_rd->task_attr) {
523 case VIRTIO_SCSI_S_ORDERED:
524 io->scsiio.tag_type = CTL_TAG_ORDERED;
526 case VIRTIO_SCSI_S_HEAD:
527 io->scsiio.tag_type = CTL_TAG_HEAD_OF_QUEUE;
529 case VIRTIO_SCSI_S_ACA:
530 io->scsiio.tag_type = CTL_TAG_ACA;
532 case VIRTIO_SCSI_S_SIMPLE:
534 io->scsiio.tag_type = CTL_TAG_SIMPLE;
537 io->scsiio.ext_sg_entries = ext_sg_entries;
538 io->scsiio.ext_data_ptr = ext_data_ptr;
539 io->scsiio.ext_data_len = ext_data_len;
540 io->scsiio.ext_data_filled = 0;
541 io->scsiio.cdb_len = sc->vss_config.cdb_size;
542 memcpy(io->scsiio.cdb, cmd_rd->cdb, sc->vss_config.cdb_size);
544 if (pci_vtscsi_debug) {
545 struct sbuf *sb = sbuf_new_auto();
548 DPRINTF("%s", sbuf_data(sb));
552 err = ioctl(sc->vss_ctl_fd, CTL_IO, io);
554 WPRINTF("CTL_IO: err=%d (%s)", errno, strerror(errno));
555 cmd_wr->response = VIRTIO_SCSI_S_FAILURE;
557 cmd_wr->sense_len = MIN(io->scsiio.sense_len,
558 sc->vss_config.sense_size);
559 cmd_wr->residual = ext_data_len - io->scsiio.ext_data_filled;
560 cmd_wr->status = io->scsiio.scsi_status;
561 cmd_wr->response = VIRTIO_SCSI_S_OK;
562 memcpy(&cmd_wr->sense, &io->scsiio.sense_data,
566 buf_to_iov(cmd_wr, VTSCSI_OUT_HEADER_LEN(sc), iov_out, niov_out, 0);
567 nxferred = VTSCSI_OUT_HEADER_LEN(sc) + io->scsiio.ext_data_filled;
570 ctl_scsi_free_io(io);
575 pci_vtscsi_controlq_notify(void *vsc, struct vqueue_info *vq)
577 struct pci_vtscsi_softc *sc;
578 struct iovec iov[VTSCSI_MAXSEG];
586 while (vq_has_descs(vq)) {
587 n = vq_getchain(vq, iov, VTSCSI_MAXSEG, &req);
588 assert(n >= 1 && n <= VTSCSI_MAXSEG);
590 bufsize = iov_to_buf(iov, n, &buf);
591 iolen = pci_vtscsi_control_handle(sc, buf, bufsize);
592 buf_to_iov((uint8_t *)buf + bufsize - iolen, iolen, iov, n,
596 * Release this chain and handle more
598 vq_relchain(vq, req.idx, iolen);
600 vq_endchains(vq, 1); /* Generate interrupt if appropriate. */
605 pci_vtscsi_eventq_notify(void *vsc __unused, struct vqueue_info *vq)
611 pci_vtscsi_requestq_notify(void *vsc, struct vqueue_info *vq)
613 struct pci_vtscsi_softc *sc;
614 struct pci_vtscsi_queue *q;
615 struct pci_vtscsi_request *req;
616 struct iovec iov[VTSCSI_MAXSEG];
621 q = &sc->vss_queues[vq->vq_num - 2];
623 while (vq_has_descs(vq)) {
624 n = vq_getchain(vq, iov, VTSCSI_MAXSEG, &vireq);
625 assert(n >= 1 && n <= VTSCSI_MAXSEG);
627 req = calloc(1, sizeof(struct pci_vtscsi_request));
628 req->vsr_idx = vireq.idx;
630 req->vsr_niov_in = vireq.readable;
631 req->vsr_niov_out = vireq.writable;
632 memcpy(req->vsr_iov_in, iov,
633 req->vsr_niov_in * sizeof(struct iovec));
634 memcpy(req->vsr_iov_out, iov + vireq.readable,
635 req->vsr_niov_out * sizeof(struct iovec));
637 pthread_mutex_lock(&q->vsq_mtx);
638 STAILQ_INSERT_TAIL(&q->vsq_requests, req, vsr_link);
639 pthread_cond_signal(&q->vsq_cv);
640 pthread_mutex_unlock(&q->vsq_mtx);
642 DPRINTF("request <idx=%d> enqueued", vireq.idx);
647 pci_vtscsi_init_queue(struct pci_vtscsi_softc *sc,
648 struct pci_vtscsi_queue *queue, int num)
650 struct pci_vtscsi_worker *worker;
651 char tname[MAXCOMLEN + 1];
655 queue->vsq_vq = &sc->vss_vq[num + 2];
657 pthread_mutex_init(&queue->vsq_mtx, NULL);
658 pthread_mutex_init(&queue->vsq_qmtx, NULL);
659 pthread_cond_init(&queue->vsq_cv, NULL);
660 STAILQ_INIT(&queue->vsq_requests);
661 LIST_INIT(&queue->vsq_workers);
663 for (i = 0; i < VTSCSI_THR_PER_Q; i++) {
664 worker = calloc(1, sizeof(struct pci_vtscsi_worker));
665 worker->vsw_queue = queue;
667 pthread_create(&worker->vsw_thread, NULL, &pci_vtscsi_proc,
670 snprintf(tname, sizeof(tname), "vtscsi:%d-%d", num, i);
671 pthread_set_name_np(worker->vsw_thread, tname);
672 LIST_INSERT_HEAD(&queue->vsq_workers, worker, vsw_link);
679 pci_vtscsi_legacy_config(nvlist_t *nvl, const char *opts)
686 cp = strchr(opts, ',');
688 set_config_value_node(nvl, "dev", opts);
691 devname = strndup(opts, cp - opts);
692 set_config_value_node(nvl, "dev", devname);
694 return (pci_parse_legacy_config(nvl, cp + 1));
698 pci_vtscsi_init(struct pci_devinst *pi, nvlist_t *nvl)
700 struct pci_vtscsi_softc *sc;
701 const char *devname, *value;
704 sc = calloc(1, sizeof(struct pci_vtscsi_softc));
705 value = get_config_value_node(nvl, "iid");
707 sc->vss_iid = strtoul(value, NULL, 10);
709 value = get_config_value_node(nvl, "bootindex");
711 if (pci_emul_add_boot_device(pi, atoi(value))) {
712 EPRINTLN("Invalid bootindex %d", atoi(value));
718 devname = get_config_value_node(nvl, "dev");
720 devname = "/dev/cam/ctl";
721 sc->vss_ctl_fd = open(devname, O_RDWR);
722 if (sc->vss_ctl_fd < 0) {
723 WPRINTF("cannot open %s: %s", devname, strerror(errno));
728 pthread_mutex_init(&sc->vss_mtx, NULL);
730 vi_softc_linkup(&sc->vss_vs, &vtscsi_vi_consts, sc, pi, sc->vss_vq);
731 sc->vss_vs.vs_mtx = &sc->vss_mtx;
734 sc->vss_vq[0].vq_qsize = VTSCSI_RINGSZ;
735 sc->vss_vq[0].vq_notify = pci_vtscsi_controlq_notify;
738 sc->vss_vq[1].vq_qsize = VTSCSI_RINGSZ;
739 sc->vss_vq[1].vq_notify = pci_vtscsi_eventq_notify;
742 for (i = 2; i < VTSCSI_MAXQ; i++) {
743 sc->vss_vq[i].vq_qsize = VTSCSI_RINGSZ;
744 sc->vss_vq[i].vq_notify = pci_vtscsi_requestq_notify;
745 pci_vtscsi_init_queue(sc, &sc->vss_queues[i - 2], i - 2);
748 /* initialize config space */
749 pci_set_cfgdata16(pi, PCIR_DEVICE, VIRTIO_DEV_SCSI);
750 pci_set_cfgdata16(pi, PCIR_VENDOR, VIRTIO_VENDOR);
751 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_STORAGE);
752 pci_set_cfgdata16(pi, PCIR_SUBDEV_0, VIRTIO_ID_SCSI);
753 pci_set_cfgdata16(pi, PCIR_SUBVEND_0, VIRTIO_VENDOR);
755 if (vi_intr_init(&sc->vss_vs, 1, fbsdrun_virtio_msix()))
757 vi_set_io_bar(&sc->vss_vs, 0);
763 static const struct pci_devemu pci_de_vscsi = {
764 .pe_emu = "virtio-scsi",
765 .pe_init = pci_vtscsi_init,
766 .pe_legacy_config = pci_vtscsi_legacy_config,
767 .pe_barwrite = vi_pci_write,
768 .pe_barread = vi_pci_read
770 PCI_EMUL_SET(pci_de_vscsi);