2 * Copyright (c) 2012, Bryan Venteicher <bryanv@daemoninthecloset.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 /* Driver for VirtIO SCSI devices. */
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/kthread.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/sglist.h>
39 #include <sys/sysctl.h>
41 #include <sys/mutex.h>
42 #include <sys/callout.h>
43 #include <sys/taskqueue.h>
44 #include <sys/queue.h>
47 #include <machine/stdarg.h>
49 #include <machine/bus.h>
50 #include <machine/resource.h>
55 #include <cam/cam_ccb.h>
56 #include <cam/cam_sim.h>
57 #include <cam/cam_periph.h>
58 #include <cam/cam_xpt_sim.h>
59 #include <cam/cam_debug.h>
60 #include <cam/scsi/scsi_all.h>
61 #include <cam/scsi/scsi_message.h>
63 #include <dev/virtio/virtio.h>
64 #include <dev/virtio/virtqueue.h>
65 #include <dev/virtio/scsi/virtio_scsi.h>
66 #include <dev/virtio/scsi/virtio_scsivar.h>
68 #include "virtio_if.h"
70 static int vtscsi_modevent(module_t, int, void *);
72 static int vtscsi_probe(device_t);
73 static int vtscsi_attach(device_t);
74 static int vtscsi_detach(device_t);
75 static int vtscsi_suspend(device_t);
76 static int vtscsi_resume(device_t);
78 static void vtscsi_negotiate_features(struct vtscsi_softc *);
79 static int vtscsi_maximum_segments(struct vtscsi_softc *, int);
80 static int vtscsi_alloc_virtqueues(struct vtscsi_softc *);
81 static void vtscsi_write_device_config(struct vtscsi_softc *);
82 static int vtscsi_reinit(struct vtscsi_softc *);
84 static int vtscsi_alloc_cam(struct vtscsi_softc *);
85 static int vtscsi_register_cam(struct vtscsi_softc *);
86 static void vtscsi_free_cam(struct vtscsi_softc *);
87 static void vtscsi_cam_async(void *, uint32_t, struct cam_path *, void *);
88 static int vtscsi_register_async(struct vtscsi_softc *);
89 static void vtscsi_deregister_async(struct vtscsi_softc *);
90 static void vtscsi_cam_action(struct cam_sim *, union ccb *);
91 static void vtscsi_cam_poll(struct cam_sim *);
93 static void vtscsi_cam_scsi_io(struct vtscsi_softc *, struct cam_sim *,
95 static void vtscsi_cam_get_tran_settings(struct vtscsi_softc *,
97 static void vtscsi_cam_reset_bus(struct vtscsi_softc *, union ccb *);
98 static void vtscsi_cam_reset_dev(struct vtscsi_softc *, union ccb *);
99 static void vtscsi_cam_abort(struct vtscsi_softc *, union ccb *);
100 static void vtscsi_cam_path_inquiry(struct vtscsi_softc *,
101 struct cam_sim *, union ccb *);
103 static int vtscsi_sg_append_scsi_buf(struct vtscsi_softc *,
104 struct sglist *, struct ccb_scsiio *);
105 static int vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *,
106 struct vtscsi_request *, int *, int *);
107 static int vtscsi_execute_scsi_cmd(struct vtscsi_softc *,
108 struct vtscsi_request *);
109 static int vtscsi_start_scsi_cmd(struct vtscsi_softc *, union ccb *);
110 static void vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *,
111 struct vtscsi_request *);
112 static int vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *,
113 struct vtscsi_request *);
114 static void vtscsi_timedout_scsi_cmd(void *);
115 static cam_status vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *);
116 static cam_status vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *,
117 struct ccb_scsiio *, struct virtio_scsi_cmd_resp *);
118 static void vtscsi_complete_scsi_cmd(struct vtscsi_softc *,
119 struct vtscsi_request *);
121 static void vtscsi_poll_ctrl_req(struct vtscsi_softc *,
122 struct vtscsi_request *);
123 static int vtscsi_execute_ctrl_req(struct vtscsi_softc *,
124 struct vtscsi_request *, struct sglist *, int, int, int);
125 static void vtscsi_complete_abort_task_cmd(struct vtscsi_softc *c,
126 struct vtscsi_request *);
127 static int vtscsi_execute_abort_task_cmd(struct vtscsi_softc *,
128 struct vtscsi_request *);
129 static int vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *,
130 struct vtscsi_request *);
132 static void vtscsi_get_request_lun(uint8_t [], target_id_t *, lun_id_t *);
133 static void vtscsi_set_request_lun(struct ccb_hdr *, uint8_t []);
134 static void vtscsi_init_scsi_cmd_req(struct ccb_scsiio *,
135 struct virtio_scsi_cmd_req *);
136 static void vtscsi_init_ctrl_tmf_req(struct ccb_hdr *, uint32_t,
137 uintptr_t, struct virtio_scsi_ctrl_tmf_req *);
139 static void vtscsi_freeze_simq(struct vtscsi_softc *, int);
140 static int vtscsi_thaw_simq(struct vtscsi_softc *, int);
142 static void vtscsi_announce(struct vtscsi_softc *, uint32_t, target_id_t,
144 static void vtscsi_execute_rescan(struct vtscsi_softc *, target_id_t,
146 static void vtscsi_execute_rescan_bus(struct vtscsi_softc *);
148 static void vtscsi_handle_event(struct vtscsi_softc *,
149 struct virtio_scsi_event *);
150 static int vtscsi_enqueue_event_buf(struct vtscsi_softc *,
151 struct virtio_scsi_event *);
152 static int vtscsi_init_event_vq(struct vtscsi_softc *);
153 static void vtscsi_reinit_event_vq(struct vtscsi_softc *);
154 static void vtscsi_drain_event_vq(struct vtscsi_softc *);
156 static void vtscsi_complete_vqs_locked(struct vtscsi_softc *);
157 static void vtscsi_complete_vqs(struct vtscsi_softc *);
158 static void vtscsi_drain_vqs(struct vtscsi_softc *);
159 static void vtscsi_cancel_request(struct vtscsi_softc *,
160 struct vtscsi_request *);
161 static void vtscsi_drain_vq(struct vtscsi_softc *, struct virtqueue *);
162 static void vtscsi_stop(struct vtscsi_softc *);
163 static int vtscsi_reset_bus(struct vtscsi_softc *);
165 static void vtscsi_init_request(struct vtscsi_softc *,
166 struct vtscsi_request *);
167 static int vtscsi_alloc_requests(struct vtscsi_softc *);
168 static void vtscsi_free_requests(struct vtscsi_softc *);
169 static void vtscsi_enqueue_request(struct vtscsi_softc *,
170 struct vtscsi_request *);
171 static struct vtscsi_request * vtscsi_dequeue_request(struct vtscsi_softc *);
173 static void vtscsi_complete_request(struct vtscsi_request *);
174 static void vtscsi_complete_vq(struct vtscsi_softc *, struct virtqueue *);
175 static void vtscsi_control_vq_task(void *, int);
176 static void vtscsi_event_vq_task(void *, int);
177 static void vtscsi_request_vq_task(void *, int);
179 static int vtscsi_control_vq_intr(void *);
180 static int vtscsi_event_vq_intr(void *);
181 static int vtscsi_request_vq_intr(void *);
182 static void vtscsi_disable_vqs_intr(struct vtscsi_softc *);
183 static void vtscsi_enable_vqs_intr(struct vtscsi_softc *);
185 static void vtscsi_get_tunables(struct vtscsi_softc *);
186 static void vtscsi_add_sysctl(struct vtscsi_softc *);
188 static void vtscsi_printf_req(struct vtscsi_request *, const char *,
191 /* Global tunables. */
193 * The current QEMU VirtIO SCSI implementation does not cancel in-flight
194 * IO during virtio_stop(). So in-flight requests still complete after the
195 * device reset. We would have to wait for all the in-flight IO to complete,
196 * which defeats the typical purpose of a bus reset. We could simulate the
197 * bus reset with either I_T_NEXUS_RESET of all the targets, or with
198 * LOGICAL_UNIT_RESET of all the LUNs (assuming there is space in the
199 * control virtqueue). But this isn't very useful if things really go off
200 * the rails, so default to disabled for now.
202 static int vtscsi_bus_reset_disable = 1;
203 TUNABLE_INT("hw.vtscsi.bus_reset_disable", &vtscsi_bus_reset_disable);
205 static struct virtio_feature_desc vtscsi_feature_desc[] = {
206 { VIRTIO_SCSI_F_INOUT, "InOut" },
207 { VIRTIO_SCSI_F_HOTPLUG, "Hotplug" },
212 static device_method_t vtscsi_methods[] = {
213 /* Device methods. */
214 DEVMETHOD(device_probe, vtscsi_probe),
215 DEVMETHOD(device_attach, vtscsi_attach),
216 DEVMETHOD(device_detach, vtscsi_detach),
217 DEVMETHOD(device_suspend, vtscsi_suspend),
218 DEVMETHOD(device_resume, vtscsi_resume),
223 static driver_t vtscsi_driver = {
226 sizeof(struct vtscsi_softc)
228 static devclass_t vtscsi_devclass;
230 DRIVER_MODULE(virtio_scsi, virtio_pci, vtscsi_driver, vtscsi_devclass,
232 MODULE_VERSION(virtio_scsi, 1);
233 MODULE_DEPEND(virtio_scsi, virtio, 1, 1, 1);
234 MODULE_DEPEND(virtio_scsi, cam, 1, 1, 1);
237 vtscsi_modevent(module_t mod, int type, void *unused)
257 vtscsi_probe(device_t dev)
260 if (virtio_get_device_type(dev) != VIRTIO_ID_SCSI)
263 device_set_desc(dev, "VirtIO SCSI Adapter");
265 return (BUS_PROBE_DEFAULT);
269 vtscsi_attach(device_t dev)
271 struct vtscsi_softc *sc;
272 struct virtio_scsi_config scsicfg;
275 sc = device_get_softc(dev);
276 sc->vtscsi_dev = dev;
278 VTSCSI_LOCK_INIT(sc, device_get_nameunit(dev));
279 TAILQ_INIT(&sc->vtscsi_req_free);
281 vtscsi_get_tunables(sc);
282 vtscsi_add_sysctl(sc);
284 virtio_set_feature_desc(dev, vtscsi_feature_desc);
285 vtscsi_negotiate_features(sc);
287 if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
288 sc->vtscsi_flags |= VTSCSI_FLAG_INDIRECT;
289 if (virtio_with_feature(dev, VIRTIO_SCSI_F_INOUT))
290 sc->vtscsi_flags |= VTSCSI_FLAG_BIDIRECTIONAL;
291 if (virtio_with_feature(dev, VIRTIO_SCSI_F_HOTPLUG))
292 sc->vtscsi_flags |= VTSCSI_FLAG_HOTPLUG;
294 virtio_read_device_config(dev, 0, &scsicfg,
295 sizeof(struct virtio_scsi_config));
297 sc->vtscsi_max_channel = scsicfg.max_channel;
298 sc->vtscsi_max_target = scsicfg.max_target;
299 sc->vtscsi_max_lun = scsicfg.max_lun;
300 sc->vtscsi_event_buf_size = scsicfg.event_info_size;
302 vtscsi_write_device_config(sc);
304 sc->vtscsi_max_nsegs = vtscsi_maximum_segments(sc, scsicfg.seg_max);
305 sc->vtscsi_sglist = sglist_alloc(sc->vtscsi_max_nsegs, M_NOWAIT);
306 if (sc->vtscsi_sglist == NULL) {
308 device_printf(dev, "cannot allocate sglist\n");
312 error = vtscsi_alloc_virtqueues(sc);
314 device_printf(dev, "cannot allocate virtqueues\n");
318 error = vtscsi_init_event_vq(sc);
320 device_printf(dev, "cannot populate the eventvq\n");
324 error = vtscsi_alloc_requests(sc);
326 device_printf(dev, "cannot allocate requests\n");
330 error = vtscsi_alloc_cam(sc);
332 device_printf(dev, "cannot allocate CAM structures\n");
336 TASK_INIT(&sc->vtscsi_control_intr_task, 0,
337 vtscsi_control_vq_task, sc);
338 TASK_INIT(&sc->vtscsi_event_intr_task, 0,
339 vtscsi_event_vq_task, sc);
340 TASK_INIT(&sc->vtscsi_request_intr_task, 0,
341 vtscsi_request_vq_task, sc);
343 sc->vtscsi_tq = taskqueue_create_fast("vtscsi_taskq", M_NOWAIT,
344 taskqueue_thread_enqueue, &sc->vtscsi_tq);
345 if (sc->vtscsi_tq == NULL) {
347 device_printf(dev, "cannot allocate taskqueue\n");
351 error = virtio_setup_intr(dev, INTR_TYPE_CAM);
353 device_printf(dev, "cannot setup virtqueue interrupts\n");
357 taskqueue_start_threads(&sc->vtscsi_tq, 1, PI_DISK, "%s taskq",
358 device_get_nameunit(dev));
360 vtscsi_enable_vqs_intr(sc);
363 * Register with CAM after interrupts are enabled so we will get
364 * notified of the probe responses.
366 error = vtscsi_register_cam(sc);
368 device_printf(dev, "cannot register with CAM\n");
380 vtscsi_detach(device_t dev)
382 struct vtscsi_softc *sc;
384 sc = device_get_softc(dev);
387 sc->vtscsi_flags |= VTSCSI_FLAG_DETACH;
388 if (device_is_attached(dev))
392 if (sc->vtscsi_tq != NULL) {
393 taskqueue_drain(sc->vtscsi_tq, &sc->vtscsi_control_intr_task);
394 taskqueue_drain(sc->vtscsi_tq, &sc->vtscsi_event_intr_task);
395 taskqueue_drain(sc->vtscsi_tq, &sc->vtscsi_request_intr_task);
396 taskqueue_free(sc->vtscsi_tq);
397 sc->vtscsi_tq = NULL;
400 vtscsi_complete_vqs(sc);
401 vtscsi_drain_vqs(sc);
404 vtscsi_free_requests(sc);
406 if (sc->vtscsi_sglist != NULL) {
407 sglist_free(sc->vtscsi_sglist);
408 sc->vtscsi_sglist = NULL;
411 VTSCSI_LOCK_DESTROY(sc);
417 vtscsi_suspend(device_t dev)
424 vtscsi_resume(device_t dev)
431 vtscsi_negotiate_features(struct vtscsi_softc *sc)
436 dev = sc->vtscsi_dev;
437 features = virtio_negotiate_features(dev, VTSCSI_FEATURES);
438 sc->vtscsi_features = features;
442 vtscsi_maximum_segments(struct vtscsi_softc *sc, int seg_max)
446 nsegs = VTSCSI_MIN_SEGMENTS;
449 nsegs += MIN(seg_max, MAXPHYS / PAGE_SIZE + 1);
450 if (sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT)
451 nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT);
459 vtscsi_alloc_virtqueues(struct vtscsi_softc *sc)
462 struct vq_alloc_info vq_info[3];
465 dev = sc->vtscsi_dev;
468 VQ_ALLOC_INFO_INIT(&vq_info[0], 0, vtscsi_control_vq_intr, sc,
469 &sc->vtscsi_control_vq, "%s control", device_get_nameunit(dev));
471 VQ_ALLOC_INFO_INIT(&vq_info[1], 0, vtscsi_event_vq_intr, sc,
472 &sc->vtscsi_event_vq, "%s event", device_get_nameunit(dev));
474 VQ_ALLOC_INFO_INIT(&vq_info[2], sc->vtscsi_max_nsegs,
475 vtscsi_request_vq_intr, sc, &sc->vtscsi_request_vq,
476 "%s request", device_get_nameunit(dev));
478 return (virtio_alloc_virtqueues(dev, 0, nvqs, vq_info));
482 vtscsi_write_device_config(struct vtscsi_softc *sc)
485 virtio_write_dev_config_4(sc->vtscsi_dev,
486 offsetof(struct virtio_scsi_config, sense_size),
487 VIRTIO_SCSI_SENSE_SIZE);
490 * This is the size in the virtio_scsi_cmd_req structure. Note
491 * this value (32) is larger than the maximum CAM CDB size (16).
493 virtio_write_dev_config_4(sc->vtscsi_dev,
494 offsetof(struct virtio_scsi_config, cdb_size),
495 VIRTIO_SCSI_CDB_SIZE);
499 vtscsi_reinit(struct vtscsi_softc *sc)
504 dev = sc->vtscsi_dev;
506 error = virtio_reinit(dev, sc->vtscsi_features);
508 vtscsi_write_device_config(sc);
509 vtscsi_reinit_event_vq(sc);
510 virtio_reinit_complete(dev);
512 vtscsi_enable_vqs_intr(sc);
515 vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d\n", error);
521 vtscsi_alloc_cam(struct vtscsi_softc *sc)
524 struct cam_devq *devq;
527 dev = sc->vtscsi_dev;
528 openings = sc->vtscsi_nrequests - VTSCSI_RESERVED_REQUESTS;
530 devq = cam_simq_alloc(openings);
532 device_printf(dev, "cannot allocate SIM queue\n");
536 sc->vtscsi_sim = cam_sim_alloc(vtscsi_cam_action, vtscsi_cam_poll,
537 "vtscsi", sc, device_get_unit(dev), VTSCSI_MTX(sc), 1,
539 if (sc->vtscsi_sim == NULL) {
541 device_printf(dev, "cannot allocate SIM\n");
549 vtscsi_register_cam(struct vtscsi_softc *sc)
552 int registered, error;
554 dev = sc->vtscsi_dev;
559 if (xpt_bus_register(sc->vtscsi_sim, dev, 0) != CAM_SUCCESS) {
561 device_printf(dev, "cannot register XPT bus\n");
567 if (xpt_create_path(&sc->vtscsi_path, NULL,
568 cam_sim_path(sc->vtscsi_sim), CAM_TARGET_WILDCARD,
569 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
571 device_printf(dev, "cannot create bus path\n");
578 * The async register apparently needs to be done without
579 * the lock held, otherwise it can recurse on the lock.
581 if (vtscsi_register_async(sc) != CAM_REQ_CMP) {
583 device_printf(dev, "cannot register async callback\n");
591 if (sc->vtscsi_path != NULL) {
592 xpt_free_path(sc->vtscsi_path);
593 sc->vtscsi_path = NULL;
597 xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim));
605 vtscsi_free_cam(struct vtscsi_softc *sc)
610 if (sc->vtscsi_path != NULL) {
611 vtscsi_deregister_async(sc);
613 xpt_free_path(sc->vtscsi_path);
614 sc->vtscsi_path = NULL;
616 xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim));
619 if (sc->vtscsi_sim != NULL) {
620 cam_sim_free(sc->vtscsi_sim, 1);
621 sc->vtscsi_sim = NULL;
628 vtscsi_cam_async(void *cb_arg, uint32_t code, struct cam_path *path, void *arg)
631 struct vtscsi_softc *sc;
634 sc = cam_sim_softc(sim);
636 vtscsi_dprintf(sc, VTSCSI_TRACE, "code=%u\n", code);
639 * TODO Once QEMU supports event reporting, we should
640 * (un)subscribe to events here.
643 case AC_FOUND_DEVICE:
651 vtscsi_register_async(struct vtscsi_softc *sc)
653 struct ccb_setasync csa;
655 VTSCSI_LOCK_NOTOWNED(sc);
657 xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5);
658 csa.ccb_h.func_code = XPT_SASYNC_CB;
659 csa.event_enable = AC_LOST_DEVICE | AC_FOUND_DEVICE;
660 csa.callback = vtscsi_cam_async;
661 csa.callback_arg = sc->vtscsi_sim;
663 xpt_action((union ccb *) &csa);
665 return (csa.ccb_h.status);
669 vtscsi_deregister_async(struct vtscsi_softc *sc)
671 struct ccb_setasync csa;
673 xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5);
674 csa.ccb_h.func_code = XPT_SASYNC_CB;
675 csa.event_enable = 0;
676 csa.callback = vtscsi_cam_async;
677 csa.callback_arg = sc->vtscsi_sim;
679 xpt_action((union ccb *) &csa);
683 vtscsi_cam_action(struct cam_sim *sim, union ccb *ccb)
685 struct vtscsi_softc *sc;
686 struct ccb_hdr *ccbh;
688 sc = cam_sim_softc(sim);
691 VTSCSI_LOCK_OWNED(sc);
693 if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) {
695 * The VTSCSI_MTX is briefly dropped between setting
696 * VTSCSI_FLAG_DETACH and deregistering with CAM, so
697 * drop any CCBs that come in during that window.
699 ccbh->status = CAM_NO_HBA;
704 switch (ccbh->func_code) {
706 vtscsi_cam_scsi_io(sc, sim, ccb);
709 case XPT_SET_TRAN_SETTINGS:
710 ccbh->status = CAM_FUNC_NOTAVAIL;
714 case XPT_GET_TRAN_SETTINGS:
715 vtscsi_cam_get_tran_settings(sc, ccb);
719 vtscsi_cam_reset_bus(sc, ccb);
723 vtscsi_cam_reset_dev(sc, ccb);
727 vtscsi_cam_abort(sc, ccb);
730 case XPT_CALC_GEOMETRY:
731 cam_calc_geometry(&ccb->ccg, 1);
736 vtscsi_cam_path_inquiry(sc, sim, ccb);
740 vtscsi_dprintf(sc, VTSCSI_ERROR,
741 "invalid ccb=%p func=%#x\n", ccb, ccbh->func_code);
743 ccbh->status = CAM_REQ_INVALID;
750 vtscsi_cam_poll(struct cam_sim *sim)
752 struct vtscsi_softc *sc;
754 sc = cam_sim_softc(sim);
756 vtscsi_complete_vqs_locked(sc);
760 vtscsi_cam_scsi_io(struct vtscsi_softc *sc, struct cam_sim *sim,
763 struct ccb_hdr *ccbh;
764 struct ccb_scsiio *csio;
770 if (csio->cdb_len > VIRTIO_SCSI_CDB_SIZE) {
772 ccbh->status = CAM_REQ_INVALID;
776 if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_BOTH &&
777 (sc->vtscsi_flags & VTSCSI_FLAG_BIDIRECTIONAL) == 0) {
779 ccbh->status = CAM_REQ_INVALID;
783 error = vtscsi_start_scsi_cmd(sc, ccb);
787 vtscsi_dprintf(sc, VTSCSI_ERROR,
788 "error=%d ccb=%p status=%#x\n", error, ccb, ccbh->status);
794 vtscsi_cam_get_tran_settings(struct vtscsi_softc *sc, union ccb *ccb)
796 struct ccb_trans_settings *cts;
797 struct ccb_trans_settings_scsi *scsi;
800 scsi = &cts->proto_specific.scsi;
802 cts->protocol = PROTO_SCSI;
803 cts->protocol_version = SCSI_REV_SPC3;
804 cts->transport = XPORT_SAS;
805 cts->transport_version = 0;
807 scsi->valid = CTS_SCSI_VALID_TQ;
808 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
810 ccb->ccb_h.status = CAM_REQ_CMP;
815 vtscsi_cam_reset_bus(struct vtscsi_softc *sc, union ccb *ccb)
819 error = vtscsi_reset_bus(sc);
821 ccb->ccb_h.status = CAM_REQ_CMP;
823 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
825 vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d ccb=%p status=%#x\n",
826 error, ccb, ccb->ccb_h.status);
832 vtscsi_cam_reset_dev(struct vtscsi_softc *sc, union ccb *ccb)
834 struct ccb_hdr *ccbh;
835 struct vtscsi_request *req;
840 req = vtscsi_dequeue_request(sc);
843 vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
849 error = vtscsi_execute_reset_dev_cmd(sc, req);
853 vtscsi_enqueue_request(sc, req);
856 vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n",
860 ccbh->status = CAM_RESRC_UNAVAIL;
862 ccbh->status = CAM_REQ_CMP_ERR;
868 vtscsi_cam_abort(struct vtscsi_softc *sc, union ccb *ccb)
870 struct vtscsi_request *req;
871 struct ccb_hdr *ccbh;
876 req = vtscsi_dequeue_request(sc);
879 vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
885 error = vtscsi_execute_abort_task_cmd(sc, req);
889 vtscsi_enqueue_request(sc, req);
892 vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n",
896 ccbh->status = CAM_RESRC_UNAVAIL;
898 ccbh->status = CAM_REQ_CMP_ERR;
904 vtscsi_cam_path_inquiry(struct vtscsi_softc *sc, struct cam_sim *sim,
908 struct ccb_pathinq *cpi;
910 dev = sc->vtscsi_dev;
913 vtscsi_dprintf(sc, VTSCSI_TRACE, "sim=%p ccb=%p\n", sim, ccb);
915 cpi->version_num = 1;
916 cpi->hba_inquiry = PI_TAG_ABLE;
917 cpi->target_sprt = 0;
918 cpi->hba_misc = PIM_SEQSCAN;
919 if (vtscsi_bus_reset_disable != 0)
920 cpi->hba_misc |= PIM_NOBUSRESET;
921 cpi->hba_eng_cnt = 0;
923 cpi->max_target = sc->vtscsi_max_target;
924 cpi->max_lun = sc->vtscsi_max_lun;
925 cpi->initiator_id = VTSCSI_INITIATOR_ID;
927 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
928 strncpy(cpi->hba_vid, "VirtIO", HBA_IDLEN);
929 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
931 cpi->unit_number = cam_sim_unit(sim);
932 cpi->bus_id = cam_sim_bus(sim);
934 cpi->base_transfer_speed = 300000;
936 cpi->protocol = PROTO_SCSI;
937 cpi->protocol_version = SCSI_REV_SPC3;
938 cpi->transport = XPORT_SAS;
939 cpi->transport_version = 0;
941 cpi->maxio = (sc->vtscsi_max_nsegs - VTSCSI_MIN_SEGMENTS - 1) *
944 cpi->hba_vendor = virtio_get_vendor(dev);
945 cpi->hba_device = virtio_get_device(dev);
946 cpi->hba_subvendor = virtio_get_subvendor(dev);
947 cpi->hba_subdevice = virtio_get_subdevice(dev);
949 ccb->ccb_h.status = CAM_REQ_CMP;
954 vtscsi_sg_append_scsi_buf(struct vtscsi_softc *sc, struct sglist *sg,
955 struct ccb_scsiio *csio)
957 struct ccb_hdr *ccbh;
958 struct bus_dma_segment *dseg;
964 switch ((ccbh->flags & CAM_DATA_MASK)) {
966 error = sglist_append(sg, csio->data_ptr, csio->dxfer_len);
969 error = sglist_append_phys(sg,
970 (vm_paddr_t)(vm_offset_t) csio->data_ptr, csio->dxfer_len);
973 for (i = 0; i < csio->sglist_cnt && error == 0; i++) {
974 dseg = &((struct bus_dma_segment *)csio->data_ptr)[i];
975 error = sglist_append(sg,
976 (void *)(vm_offset_t) dseg->ds_addr, dseg->ds_len);
979 case CAM_DATA_SG_PADDR:
980 for (i = 0; i < csio->sglist_cnt && error == 0; i++) {
981 dseg = &((struct bus_dma_segment *)csio->data_ptr)[i];
982 error = sglist_append_phys(sg,
983 (vm_paddr_t) dseg->ds_addr, dseg->ds_len);
995 vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *sc, struct vtscsi_request *req,
996 int *readable, int *writable)
999 struct ccb_hdr *ccbh;
1000 struct ccb_scsiio *csio;
1001 struct virtio_scsi_cmd_req *cmd_req;
1002 struct virtio_scsi_cmd_resp *cmd_resp;
1005 sg = sc->vtscsi_sglist;
1006 csio = &req->vsr_ccb->csio;
1007 ccbh = &csio->ccb_h;
1008 cmd_req = &req->vsr_cmd_req;
1009 cmd_resp = &req->vsr_cmd_resp;
1013 sglist_append(sg, cmd_req, sizeof(struct virtio_scsi_cmd_req));
1014 if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1015 error = vtscsi_sg_append_scsi_buf(sc, sg, csio);
1016 /* At least one segment must be left for the response. */
1017 if (error || sg->sg_nseg == sg->sg_maxseg)
1021 *readable = sg->sg_nseg;
1023 sglist_append(sg, cmd_resp, sizeof(struct virtio_scsi_cmd_resp));
1024 if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1025 error = vtscsi_sg_append_scsi_buf(sc, sg, csio);
1030 *writable = sg->sg_nseg - *readable;
1032 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p readable=%d "
1033 "writable=%d\n", req, ccbh, *readable, *writable);
1039 * This should never happen unless maxio was incorrectly set.
1041 vtscsi_set_ccb_status(ccbh, CAM_REQ_TOO_BIG, 0);
1043 vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p "
1044 "nseg=%d maxseg=%d\n",
1045 error, req, ccbh, sg->sg_nseg, sg->sg_maxseg);
1051 vtscsi_execute_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req)
1054 struct virtqueue *vq;
1055 struct ccb_scsiio *csio;
1056 struct ccb_hdr *ccbh;
1057 struct virtio_scsi_cmd_req *cmd_req;
1058 struct virtio_scsi_cmd_resp *cmd_resp;
1059 int readable, writable, error;
1061 sg = sc->vtscsi_sglist;
1062 vq = sc->vtscsi_request_vq;
1063 csio = &req->vsr_ccb->csio;
1064 ccbh = &csio->ccb_h;
1065 cmd_req = &req->vsr_cmd_req;
1066 cmd_resp = &req->vsr_cmd_resp;
1068 vtscsi_init_scsi_cmd_req(csio, cmd_req);
1070 error = vtscsi_fill_scsi_cmd_sglist(sc, req, &readable, &writable);
1074 req->vsr_complete = vtscsi_complete_scsi_cmd;
1075 cmd_resp->response = -1;
1077 error = virtqueue_enqueue(vq, req, sg, readable, writable);
1079 vtscsi_dprintf(sc, VTSCSI_ERROR,
1080 "enqueue error=%d req=%p ccb=%p\n", error, req, ccbh);
1082 ccbh->status = CAM_REQUEUE_REQ;
1083 vtscsi_freeze_simq(sc, VTSCSI_REQUEST_VQ);
1087 ccbh->status |= CAM_SIM_QUEUED;
1088 ccbh->ccbh_vtscsi_req = req;
1090 virtqueue_notify(vq);
1092 if (ccbh->timeout != CAM_TIME_INFINITY) {
1093 req->vsr_flags |= VTSCSI_REQ_FLAG_TIMEOUT_SET;
1094 callout_reset(&req->vsr_callout, ccbh->timeout * hz / 1000,
1095 vtscsi_timedout_scsi_cmd, req);
1098 vtscsi_dprintf_req(req, VTSCSI_TRACE, "enqueued req=%p ccb=%p\n",
1105 vtscsi_start_scsi_cmd(struct vtscsi_softc *sc, union ccb *ccb)
1107 struct vtscsi_request *req;
1110 req = vtscsi_dequeue_request(sc);
1112 ccb->ccb_h.status = CAM_REQUEUE_REQ;
1113 vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
1119 error = vtscsi_execute_scsi_cmd(sc, req);
1121 vtscsi_enqueue_request(sc, req);
1127 vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *sc,
1128 struct vtscsi_request *req)
1130 struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1131 struct vtscsi_request *to_req;
1134 tmf_resp = &req->vsr_tmf_resp;
1135 response = tmf_resp->response;
1136 to_req = req->vsr_timedout_req;
1138 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p to_req=%p response=%d\n",
1139 req, to_req, response);
1141 vtscsi_enqueue_request(sc, req);
1144 * The timedout request could have completed between when the
1145 * abort task was sent and when the host processed it.
1147 if (to_req->vsr_state != VTSCSI_REQ_STATE_TIMEDOUT)
1150 /* The timedout request was successfully aborted. */
1151 if (response == VIRTIO_SCSI_S_FUNCTION_COMPLETE)
1154 /* Don't bother if the device is going away. */
1155 if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH)
1158 /* The timedout request will be aborted by the reset. */
1159 if (sc->vtscsi_flags & VTSCSI_FLAG_RESET)
1162 vtscsi_reset_bus(sc);
1166 vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *sc,
1167 struct vtscsi_request *to_req)
1170 struct ccb_hdr *to_ccbh;
1171 struct vtscsi_request *req;
1172 struct virtio_scsi_ctrl_tmf_req *tmf_req;
1173 struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1176 sg = sc->vtscsi_sglist;
1177 to_ccbh = &to_req->vsr_ccb->ccb_h;
1179 req = vtscsi_dequeue_request(sc);
1185 tmf_req = &req->vsr_tmf_req;
1186 tmf_resp = &req->vsr_tmf_resp;
1188 vtscsi_init_ctrl_tmf_req(to_ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK,
1189 (uintptr_t) to_ccbh, tmf_req);
1192 sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1193 sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1195 req->vsr_timedout_req = to_req;
1196 req->vsr_complete = vtscsi_complete_abort_timedout_scsi_cmd;
1197 tmf_resp->response = -1;
1199 error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1200 VTSCSI_EXECUTE_ASYNC);
1204 vtscsi_enqueue_request(sc, req);
1207 vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p "
1208 "timedout req=%p ccb=%p\n", error, req, to_req, to_ccbh);
1214 vtscsi_timedout_scsi_cmd(void *xreq)
1216 struct vtscsi_softc *sc;
1217 struct vtscsi_request *to_req;
1220 sc = to_req->vsr_softc;
1222 vtscsi_dprintf(sc, VTSCSI_INFO, "timedout req=%p ccb=%p state=%#x\n",
1223 to_req, to_req->vsr_ccb, to_req->vsr_state);
1225 /* Don't bother if the device is going away. */
1226 if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH)
1230 * Bail if the request is not in use. We likely raced when
1231 * stopping the callout handler or it has already been aborted.
1233 if (to_req->vsr_state != VTSCSI_REQ_STATE_INUSE ||
1234 (to_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) == 0)
1238 * Complete the request queue in case the timedout request is
1239 * actually just pending.
1241 vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
1242 if (to_req->vsr_state == VTSCSI_REQ_STATE_FREE)
1245 sc->vtscsi_stats.scsi_cmd_timeouts++;
1246 to_req->vsr_state = VTSCSI_REQ_STATE_TIMEDOUT;
1248 if (vtscsi_abort_timedout_scsi_cmd(sc, to_req) == 0)
1251 vtscsi_dprintf(sc, VTSCSI_ERROR, "resetting bus\n");
1252 vtscsi_reset_bus(sc);
1256 vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *cmd_resp)
1260 switch (cmd_resp->response) {
1261 case VIRTIO_SCSI_S_OK:
1262 status = CAM_REQ_CMP;
1264 case VIRTIO_SCSI_S_OVERRUN:
1265 status = CAM_DATA_RUN_ERR;
1267 case VIRTIO_SCSI_S_ABORTED:
1268 status = CAM_REQ_ABORTED;
1270 case VIRTIO_SCSI_S_BAD_TARGET:
1271 status = CAM_TID_INVALID;
1273 case VIRTIO_SCSI_S_RESET:
1274 status = CAM_SCSI_BUS_RESET;
1276 case VIRTIO_SCSI_S_BUSY:
1277 status = CAM_SCSI_BUSY;
1279 case VIRTIO_SCSI_S_TRANSPORT_FAILURE:
1280 case VIRTIO_SCSI_S_TARGET_FAILURE:
1281 case VIRTIO_SCSI_S_NEXUS_FAILURE:
1282 status = CAM_SCSI_IT_NEXUS_LOST;
1284 default: /* VIRTIO_SCSI_S_FAILURE */
1285 status = CAM_REQ_CMP_ERR;
1293 vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *sc,
1294 struct ccb_scsiio *csio, struct virtio_scsi_cmd_resp *cmd_resp)
1298 csio->scsi_status = cmd_resp->status;
1299 csio->resid = cmd_resp->resid;
1301 if (csio->scsi_status == SCSI_STATUS_OK)
1302 status = CAM_REQ_CMP;
1304 status = CAM_SCSI_STATUS_ERROR;
1306 if (cmd_resp->sense_len > 0) {
1307 status |= CAM_AUTOSNS_VALID;
1309 if (cmd_resp->sense_len < csio->sense_len)
1310 csio->sense_resid = csio->sense_len -
1311 cmd_resp->sense_len;
1313 csio->sense_resid = 0;
1315 bzero(&csio->sense_data, sizeof(csio->sense_data));
1316 memcpy(cmd_resp->sense, &csio->sense_data,
1317 csio->sense_len - csio->sense_resid);
1320 vtscsi_dprintf(sc, status == CAM_REQ_CMP ? VTSCSI_TRACE : VTSCSI_ERROR,
1321 "ccb=%p scsi_status=%#x resid=%u sense_resid=%u\n",
1322 csio, csio->scsi_status, csio->resid, csio->sense_resid);
1328 vtscsi_complete_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req)
1330 struct ccb_hdr *ccbh;
1331 struct ccb_scsiio *csio;
1332 struct virtio_scsi_cmd_resp *cmd_resp;
1335 csio = &req->vsr_ccb->csio;
1336 ccbh = &csio->ccb_h;
1337 cmd_resp = &req->vsr_cmd_resp;
1339 KASSERT(ccbh->ccbh_vtscsi_req == req,
1340 ("ccb %p req mismatch %p/%p", ccbh, ccbh->ccbh_vtscsi_req, req));
1342 if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET)
1343 callout_stop(&req->vsr_callout);
1345 status = vtscsi_scsi_cmd_cam_status(cmd_resp);
1346 if (status == CAM_REQ_ABORTED) {
1347 if (req->vsr_state == VTSCSI_REQ_STATE_TIMEDOUT)
1348 status = CAM_CMD_TIMEOUT;
1349 } else if (status == CAM_REQ_CMP)
1350 status = vtscsi_complete_scsi_cmd_response(sc, csio, cmd_resp);
1352 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1353 status |= CAM_DEV_QFRZN;
1354 xpt_freeze_devq(ccbh->path, 1);
1357 if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0)
1358 status |= CAM_RELEASE_SIMQ;
1360 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p status=%#x\n",
1363 ccbh->status = status;
1364 xpt_done(req->vsr_ccb);
1365 vtscsi_enqueue_request(sc, req);
1369 vtscsi_poll_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req)
1372 /* XXX We probably shouldn't poll forever. */
1373 req->vsr_flags |= VTSCSI_REQ_FLAG_POLLED;
1375 vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
1376 while ((req->vsr_flags & VTSCSI_REQ_FLAG_COMPLETE) == 0);
1378 req->vsr_flags &= ~VTSCSI_REQ_FLAG_POLLED;
1382 vtscsi_execute_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req,
1383 struct sglist *sg, int readable, int writable, int flag)
1385 struct virtqueue *vq;
1388 vq = sc->vtscsi_control_vq;
1390 MPASS(flag == VTSCSI_EXECUTE_POLL || req->vsr_complete != NULL);
1392 error = virtqueue_enqueue(vq, req, sg, readable, writable);
1395 * Return EAGAIN when the virtqueue does not have enough
1396 * descriptors available.
1398 if (error == ENOSPC || error == EMSGSIZE)
1404 virtqueue_notify(vq);
1405 if (flag == VTSCSI_EXECUTE_POLL)
1406 vtscsi_poll_ctrl_req(sc, req);
1412 vtscsi_complete_abort_task_cmd(struct vtscsi_softc *sc,
1413 struct vtscsi_request *req)
1416 struct ccb_hdr *ccbh;
1417 struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1421 tmf_resp = &req->vsr_tmf_resp;
1423 switch (tmf_resp->response) {
1424 case VIRTIO_SCSI_S_FUNCTION_COMPLETE:
1425 ccbh->status = CAM_REQ_CMP;
1427 case VIRTIO_SCSI_S_FUNCTION_REJECTED:
1428 ccbh->status = CAM_UA_ABORT;
1431 ccbh->status = CAM_REQ_CMP_ERR;
1436 vtscsi_enqueue_request(sc, req);
1440 vtscsi_execute_abort_task_cmd(struct vtscsi_softc *sc,
1441 struct vtscsi_request *req)
1444 struct ccb_abort *cab;
1445 struct ccb_hdr *ccbh;
1446 struct ccb_hdr *abort_ccbh;
1447 struct vtscsi_request *abort_req;
1448 struct virtio_scsi_ctrl_tmf_req *tmf_req;
1449 struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1452 sg = sc->vtscsi_sglist;
1453 cab = &req->vsr_ccb->cab;
1455 tmf_req = &req->vsr_tmf_req;
1456 tmf_resp = &req->vsr_tmf_resp;
1458 /* CCB header and request that's to be aborted. */
1459 abort_ccbh = &cab->abort_ccb->ccb_h;
1460 abort_req = abort_ccbh->ccbh_vtscsi_req;
1462 if (abort_ccbh->func_code != XPT_SCSI_IO || abort_req == NULL) {
1467 /* Only attempt to abort requests that could be in-flight. */
1468 if (abort_req->vsr_state != VTSCSI_REQ_STATE_INUSE) {
1473 abort_req->vsr_state = VTSCSI_REQ_STATE_ABORTED;
1474 if (abort_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET)
1475 callout_stop(&abort_req->vsr_callout);
1477 vtscsi_init_ctrl_tmf_req(ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK,
1478 (uintptr_t) abort_ccbh, tmf_req);
1481 sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1482 sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1484 req->vsr_complete = vtscsi_complete_abort_task_cmd;
1485 tmf_resp->response = -1;
1487 error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1488 VTSCSI_EXECUTE_ASYNC);
1491 vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p abort_ccb=%p "
1492 "abort_req=%p\n", error, req, abort_ccbh, abort_req);
1498 vtscsi_complete_reset_dev_cmd(struct vtscsi_softc *sc,
1499 struct vtscsi_request *req)
1502 struct ccb_hdr *ccbh;
1503 struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1507 tmf_resp = &req->vsr_tmf_resp;
1509 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p response=%d\n",
1510 req, ccb, tmf_resp->response);
1512 if (tmf_resp->response == VIRTIO_SCSI_S_FUNCTION_COMPLETE) {
1513 ccbh->status = CAM_REQ_CMP;
1514 vtscsi_announce(sc, AC_SENT_BDR, ccbh->target_id,
1517 ccbh->status = CAM_REQ_CMP_ERR;
1520 vtscsi_enqueue_request(sc, req);
1524 vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *sc,
1525 struct vtscsi_request *req)
1528 struct ccb_resetdev *crd;
1529 struct ccb_hdr *ccbh;
1530 struct virtio_scsi_ctrl_tmf_req *tmf_req;
1531 struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1535 sg = sc->vtscsi_sglist;
1536 crd = &req->vsr_ccb->crd;
1538 tmf_req = &req->vsr_tmf_req;
1539 tmf_resp = &req->vsr_tmf_resp;
1541 if (ccbh->target_lun == CAM_LUN_WILDCARD)
1542 subtype = VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET;
1544 subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET;
1546 vtscsi_init_ctrl_tmf_req(ccbh, subtype, 0, tmf_req);
1549 sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1550 sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1552 req->vsr_complete = vtscsi_complete_reset_dev_cmd;
1553 tmf_resp->response = -1;
1555 error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1556 VTSCSI_EXECUTE_ASYNC);
1558 vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p ccb=%p\n",
1565 vtscsi_get_request_lun(uint8_t lun[], target_id_t *target_id, lun_id_t *lun_id)
1568 *target_id = lun[1];
1569 *lun_id = (lun[2] << 8) | lun[3];
1573 vtscsi_set_request_lun(struct ccb_hdr *ccbh, uint8_t lun[])
1577 lun[1] = ccbh->target_id;
1578 lun[2] = 0x40 | ((ccbh->target_lun >> 8) & 0x3F);
1579 lun[3] = (ccbh->target_lun >> 8) & 0xFF;
1583 vtscsi_init_scsi_cmd_req(struct ccb_scsiio *csio,
1584 struct virtio_scsi_cmd_req *cmd_req)
1588 switch (csio->tag_action) {
1589 case MSG_HEAD_OF_Q_TAG:
1590 attr = VIRTIO_SCSI_S_HEAD;
1592 case MSG_ORDERED_Q_TAG:
1593 attr = VIRTIO_SCSI_S_ORDERED;
1596 attr = VIRTIO_SCSI_S_ACA;
1598 default: /* MSG_SIMPLE_Q_TAG */
1599 attr = VIRTIO_SCSI_S_SIMPLE;
1603 vtscsi_set_request_lun(&csio->ccb_h, cmd_req->lun);
1604 cmd_req->tag = (uintptr_t) csio;
1605 cmd_req->task_attr = attr;
1607 memcpy(cmd_req->cdb,
1608 csio->ccb_h.flags & CAM_CDB_POINTER ?
1609 csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes,
1614 vtscsi_init_ctrl_tmf_req(struct ccb_hdr *ccbh, uint32_t subtype,
1615 uintptr_t tag, struct virtio_scsi_ctrl_tmf_req *tmf_req)
1618 vtscsi_set_request_lun(ccbh, tmf_req->lun);
1620 tmf_req->type = VIRTIO_SCSI_T_TMF;
1621 tmf_req->subtype = subtype;
1626 vtscsi_freeze_simq(struct vtscsi_softc *sc, int reason)
1630 frozen = sc->vtscsi_frozen;
1632 if (reason & VTSCSI_REQUEST &&
1633 (sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS) == 0)
1634 sc->vtscsi_frozen |= VTSCSI_FROZEN_NO_REQUESTS;
1636 if (reason & VTSCSI_REQUEST_VQ &&
1637 (sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL) == 0)
1638 sc->vtscsi_frozen |= VTSCSI_FROZEN_REQUEST_VQ_FULL;
1640 /* Freeze the SIMQ if transitioned to frozen. */
1641 if (frozen == 0 && sc->vtscsi_frozen != 0) {
1642 vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ frozen\n");
1643 xpt_freeze_simq(sc->vtscsi_sim, 1);
1648 vtscsi_thaw_simq(struct vtscsi_softc *sc, int reason)
1652 if (sc->vtscsi_frozen == 0 || reason == 0)
1655 if (reason & VTSCSI_REQUEST &&
1656 sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS)
1657 sc->vtscsi_frozen &= ~VTSCSI_FROZEN_NO_REQUESTS;
1659 if (reason & VTSCSI_REQUEST_VQ &&
1660 sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL)
1661 sc->vtscsi_frozen &= ~VTSCSI_FROZEN_REQUEST_VQ_FULL;
1663 thawed = sc->vtscsi_frozen == 0;
1665 vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ thawed\n");
1671 vtscsi_announce(struct vtscsi_softc *sc, uint32_t ac_code,
1672 target_id_t target_id, lun_id_t lun_id)
1674 struct cam_path *path;
1676 /* Use the wildcard path from our softc for bus announcements. */
1677 if (target_id == CAM_TARGET_WILDCARD && lun_id == CAM_LUN_WILDCARD) {
1678 xpt_async(ac_code, sc->vtscsi_path, NULL);
1682 if (xpt_create_path(&path, NULL, cam_sim_path(sc->vtscsi_sim),
1683 target_id, lun_id) != CAM_REQ_CMP) {
1684 vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot create path\n");
1688 xpt_async(ac_code, path, NULL);
1689 xpt_free_path(path);
1693 vtscsi_execute_rescan(struct vtscsi_softc *sc, target_id_t target_id,
1699 ccb = xpt_alloc_ccb_nowait();
1701 vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot allocate CCB\n");
1705 status = xpt_create_path(&ccb->ccb_h.path, NULL,
1706 cam_sim_path(sc->vtscsi_sim), target_id, lun_id);
1707 if (status != CAM_REQ_CMP) {
1716 vtscsi_execute_rescan_bus(struct vtscsi_softc *sc)
1719 vtscsi_execute_rescan(sc, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
1723 vtscsi_transport_reset_event(struct vtscsi_softc *sc,
1724 struct virtio_scsi_event *event)
1726 target_id_t target_id;
1729 vtscsi_get_request_lun(event->lun, &target_id, &lun_id);
1731 switch (event->reason) {
1732 case VIRTIO_SCSI_EVT_RESET_RESCAN:
1733 case VIRTIO_SCSI_EVT_RESET_REMOVED:
1734 vtscsi_execute_rescan(sc, target_id, lun_id);
1737 device_printf(sc->vtscsi_dev,
1738 "unhandled transport event reason: %d\n", event->reason);
1744 vtscsi_handle_event(struct vtscsi_softc *sc, struct virtio_scsi_event *event)
1748 if ((event->event & VIRTIO_SCSI_T_EVENTS_MISSED) == 0) {
1749 switch (event->event) {
1750 case VIRTIO_SCSI_T_TRANSPORT_RESET:
1751 vtscsi_transport_reset_event(sc, event);
1754 device_printf(sc->vtscsi_dev,
1755 "unhandled event: %d\n", event->event);
1759 vtscsi_execute_rescan_bus(sc);
1762 * This should always be successful since the buffer
1763 * was just dequeued.
1765 error = vtscsi_enqueue_event_buf(sc, event);
1767 ("cannot requeue event buffer: %d", error));
1771 vtscsi_enqueue_event_buf(struct vtscsi_softc *sc,
1772 struct virtio_scsi_event *event)
1775 struct virtqueue *vq;
1778 sg = sc->vtscsi_sglist;
1779 vq = sc->vtscsi_event_vq;
1780 size = sc->vtscsi_event_buf_size;
1785 error = sglist_append(sg, event, size);
1789 error = virtqueue_enqueue(vq, event, sg, 0, sg->sg_nseg);
1793 virtqueue_notify(vq);
1799 vtscsi_init_event_vq(struct vtscsi_softc *sc)
1801 struct virtio_scsi_event *event;
1805 * The first release of QEMU with VirtIO SCSI support would crash
1806 * when attempting to notify the event virtqueue. This was fixed
1807 * when hotplug support was added.
1809 if (sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG)
1810 size = sc->vtscsi_event_buf_size;
1814 if (size < sizeof(struct virtio_scsi_event))
1817 for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) {
1818 event = &sc->vtscsi_event_bufs[i];
1820 error = vtscsi_enqueue_event_buf(sc, event);
1826 * Even just one buffer is enough. Missed events are
1827 * denoted with the VIRTIO_SCSI_T_EVENTS_MISSED flag.
1836 vtscsi_reinit_event_vq(struct vtscsi_softc *sc)
1838 struct virtio_scsi_event *event;
1841 if ((sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG) == 0 ||
1842 sc->vtscsi_event_buf_size < sizeof(struct virtio_scsi_event))
1845 for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) {
1846 event = &sc->vtscsi_event_bufs[i];
1848 error = vtscsi_enqueue_event_buf(sc, event);
1853 KASSERT(i > 0, ("cannot reinit event vq: %d", error));
1857 vtscsi_drain_event_vq(struct vtscsi_softc *sc)
1859 struct virtqueue *vq;
1862 vq = sc->vtscsi_event_vq;
1865 while (virtqueue_drain(vq, &last) != NULL)
1868 KASSERT(virtqueue_empty(vq), ("eventvq not empty"));
1872 vtscsi_complete_vqs_locked(struct vtscsi_softc *sc)
1875 VTSCSI_LOCK_OWNED(sc);
1877 if (sc->vtscsi_request_vq != NULL)
1878 vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
1879 if (sc->vtscsi_control_vq != NULL)
1880 vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
1884 vtscsi_complete_vqs(struct vtscsi_softc *sc)
1888 vtscsi_complete_vqs_locked(sc);
1893 vtscsi_cancel_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
1900 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p\n", req, ccb);
1903 * The callout must be drained when detaching since the request is
1904 * about to be freed. The VTSCSI_MTX must not be held for this in
1905 * case the callout is pending because there is a deadlock potential.
1906 * Otherwise, the virtqueue is being drained because of a bus reset
1907 * so we only need to attempt to stop the callouts.
1909 detach = (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) != 0;
1911 VTSCSI_LOCK_NOTOWNED(sc);
1913 VTSCSI_LOCK_OWNED(sc);
1915 if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) {
1917 callout_drain(&req->vsr_callout);
1919 callout_stop(&req->vsr_callout);
1925 ccb->ccb_h.status = CAM_NO_HBA;
1927 ccb->ccb_h.status = CAM_REQUEUE_REQ;
1933 vtscsi_enqueue_request(sc, req);
1937 vtscsi_drain_vq(struct vtscsi_softc *sc, struct virtqueue *vq)
1939 struct vtscsi_request *req;
1944 vtscsi_dprintf(sc, VTSCSI_TRACE, "vq=%p\n", vq);
1946 while ((req = virtqueue_drain(vq, &last)) != NULL)
1947 vtscsi_cancel_request(sc, req);
1949 KASSERT(virtqueue_empty(vq), ("virtqueue not empty"));
1953 vtscsi_drain_vqs(struct vtscsi_softc *sc)
1956 if (sc->vtscsi_control_vq != NULL)
1957 vtscsi_drain_vq(sc, sc->vtscsi_control_vq);
1958 if (sc->vtscsi_request_vq != NULL)
1959 vtscsi_drain_vq(sc, sc->vtscsi_request_vq);
1960 if (sc->vtscsi_event_vq != NULL)
1961 vtscsi_drain_event_vq(sc);
1965 vtscsi_stop(struct vtscsi_softc *sc)
1968 vtscsi_disable_vqs_intr(sc);
1969 virtio_stop(sc->vtscsi_dev);
1973 vtscsi_reset_bus(struct vtscsi_softc *sc)
1977 VTSCSI_LOCK_OWNED(sc);
1979 if (vtscsi_bus_reset_disable != 0) {
1980 device_printf(sc->vtscsi_dev, "bus reset disabled\n");
1984 sc->vtscsi_flags |= VTSCSI_FLAG_RESET;
1987 * vtscsi_stop() will cause the in-flight requests to be canceled.
1988 * Those requests are then completed here so CAM will retry them
1989 * after the reset is complete.
1992 vtscsi_complete_vqs_locked(sc);
1994 /* Rid the virtqueues of any remaining requests. */
1995 vtscsi_drain_vqs(sc);
1998 * Any resource shortage that froze the SIMQ cannot persist across
1999 * a bus reset so ensure it gets thawed here.
2001 if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0)
2002 xpt_release_simq(sc->vtscsi_sim, 0);
2004 error = vtscsi_reinit(sc);
2006 device_printf(sc->vtscsi_dev,
2007 "reinitialization failed, stopping device...\n");
2010 vtscsi_announce(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
2013 sc->vtscsi_flags &= ~VTSCSI_FLAG_RESET;
2019 vtscsi_init_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
2023 int req_nsegs, resp_nsegs;
2025 req_nsegs = sglist_count(&req->vsr_ureq, sizeof(req->vsr_ureq));
2026 resp_nsegs = sglist_count(&req->vsr_uresp, sizeof(req->vsr_uresp));
2028 KASSERT(req_nsegs == 1, ("request crossed page boundary"));
2029 KASSERT(resp_nsegs == 1, ("response crossed page boundary"));
2032 req->vsr_softc = sc;
2033 callout_init_mtx(&req->vsr_callout, VTSCSI_MTX(sc), 0);
2037 vtscsi_alloc_requests(struct vtscsi_softc *sc)
2039 struct vtscsi_request *req;
2043 * Commands destined for either the request or control queues come
2044 * from the same SIM queue. Use the size of the request virtqueue
2045 * as it (should) be much more frequently used. Some additional
2046 * requests are allocated for internal (TMF) use.
2048 nreqs = virtqueue_size(sc->vtscsi_request_vq);
2049 if ((sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) == 0)
2050 nreqs /= VTSCSI_MIN_SEGMENTS;
2051 nreqs += VTSCSI_RESERVED_REQUESTS;
2053 for (i = 0; i < nreqs; i++) {
2054 req = malloc(sizeof(struct vtscsi_request), M_DEVBUF,
2059 vtscsi_init_request(sc, req);
2061 sc->vtscsi_nrequests++;
2062 vtscsi_enqueue_request(sc, req);
2069 vtscsi_free_requests(struct vtscsi_softc *sc)
2071 struct vtscsi_request *req;
2073 while ((req = vtscsi_dequeue_request(sc)) != NULL) {
2074 KASSERT(callout_active(&req->vsr_callout) == 0,
2075 ("request callout still active"));
2077 sc->vtscsi_nrequests--;
2078 free(req, M_DEVBUF);
2081 KASSERT(sc->vtscsi_nrequests == 0, ("leaked requests: %d",
2082 sc->vtscsi_nrequests));
2086 vtscsi_enqueue_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
2089 KASSERT(req->vsr_softc == sc,
2090 ("non-matching request vsr_softc %p/%p", req->vsr_softc, sc));
2092 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req);
2094 /* A request is available so the SIMQ could be released. */
2095 if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST) != 0)
2096 xpt_release_simq(sc->vtscsi_sim, 1);
2098 req->vsr_ccb = NULL;
2099 req->vsr_complete = NULL;
2100 req->vsr_ptr0 = NULL;
2101 req->vsr_state = VTSCSI_REQ_STATE_FREE;
2104 bzero(&req->vsr_ureq, sizeof(req->vsr_ureq));
2105 bzero(&req->vsr_uresp, sizeof(req->vsr_uresp));
2108 * We insert at the tail of the queue in order to make it
2109 * very unlikely a request will be reused if we race with
2110 * stopping its callout handler.
2112 TAILQ_INSERT_TAIL(&sc->vtscsi_req_free, req, vsr_link);
2115 static struct vtscsi_request *
2116 vtscsi_dequeue_request(struct vtscsi_softc *sc)
2118 struct vtscsi_request *req;
2120 req = TAILQ_FIRST(&sc->vtscsi_req_free);
2122 req->vsr_state = VTSCSI_REQ_STATE_INUSE;
2123 TAILQ_REMOVE(&sc->vtscsi_req_free, req, vsr_link);
2125 sc->vtscsi_stats.dequeue_no_requests++;
2127 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req);
2133 vtscsi_complete_request(struct vtscsi_request *req)
2136 if (req->vsr_flags & VTSCSI_REQ_FLAG_POLLED)
2137 req->vsr_flags |= VTSCSI_REQ_FLAG_COMPLETE;
2139 if (req->vsr_complete != NULL)
2140 req->vsr_complete(req->vsr_softc, req);
2144 vtscsi_complete_vq(struct vtscsi_softc *sc, struct virtqueue *vq)
2146 struct vtscsi_request *req;
2148 VTSCSI_LOCK_OWNED(sc);
2150 while ((req = virtqueue_dequeue(vq, NULL)) != NULL)
2151 vtscsi_complete_request(req);
2155 vtscsi_control_vq_task(void *arg, int pending)
2157 struct vtscsi_softc *sc;
2158 struct virtqueue *vq;
2161 vq = sc->vtscsi_control_vq;
2165 vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
2167 if (virtqueue_enable_intr(vq) != 0) {
2168 virtqueue_disable_intr(vq);
2170 taskqueue_enqueue_fast(sc->vtscsi_tq,
2171 &sc->vtscsi_control_intr_task);
2179 vtscsi_event_vq_task(void *arg, int pending)
2181 struct vtscsi_softc *sc;
2182 struct virtqueue *vq;
2183 struct virtio_scsi_event *event;
2186 vq = sc->vtscsi_event_vq;
2190 while ((event = virtqueue_dequeue(vq, NULL)) != NULL)
2191 vtscsi_handle_event(sc, event);
2193 if (virtqueue_enable_intr(vq) != 0) {
2194 virtqueue_disable_intr(vq);
2196 taskqueue_enqueue_fast(sc->vtscsi_tq,
2197 &sc->vtscsi_control_intr_task);
2205 vtscsi_request_vq_task(void *arg, int pending)
2207 struct vtscsi_softc *sc;
2208 struct virtqueue *vq;
2211 vq = sc->vtscsi_request_vq;
2215 vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
2217 if (virtqueue_enable_intr(vq) != 0) {
2218 virtqueue_disable_intr(vq);
2220 taskqueue_enqueue_fast(sc->vtscsi_tq,
2221 &sc->vtscsi_request_intr_task);
2229 vtscsi_control_vq_intr(void *xsc)
2231 struct vtscsi_softc *sc;
2235 virtqueue_disable_intr(sc->vtscsi_control_vq);
2236 taskqueue_enqueue_fast(sc->vtscsi_tq,
2237 &sc->vtscsi_control_intr_task);
2243 vtscsi_event_vq_intr(void *xsc)
2245 struct vtscsi_softc *sc;
2249 virtqueue_disable_intr(sc->vtscsi_event_vq);
2250 taskqueue_enqueue_fast(sc->vtscsi_tq,
2251 &sc->vtscsi_event_intr_task);
2257 vtscsi_request_vq_intr(void *xsc)
2259 struct vtscsi_softc *sc;
2263 virtqueue_disable_intr(sc->vtscsi_request_vq);
2264 taskqueue_enqueue_fast(sc->vtscsi_tq,
2265 &sc->vtscsi_request_intr_task);
2271 vtscsi_disable_vqs_intr(struct vtscsi_softc *sc)
2274 virtqueue_disable_intr(sc->vtscsi_control_vq);
2275 virtqueue_disable_intr(sc->vtscsi_event_vq);
2276 virtqueue_disable_intr(sc->vtscsi_request_vq);
2280 vtscsi_enable_vqs_intr(struct vtscsi_softc *sc)
2283 virtqueue_enable_intr(sc->vtscsi_control_vq);
2284 virtqueue_enable_intr(sc->vtscsi_event_vq);
2285 virtqueue_enable_intr(sc->vtscsi_request_vq);
2289 vtscsi_get_tunables(struct vtscsi_softc *sc)
2293 TUNABLE_INT_FETCH("hw.vtscsi.debug_level", &sc->vtscsi_debug);
2295 snprintf(tmpstr, sizeof(tmpstr), "dev.vtscsi.%d.debug_level",
2296 device_get_unit(sc->vtscsi_dev));
2297 TUNABLE_INT_FETCH(tmpstr, &sc->vtscsi_debug);
2301 vtscsi_add_sysctl(struct vtscsi_softc *sc)
2304 struct vtscsi_statistics *stats;
2305 struct sysctl_ctx_list *ctx;
2306 struct sysctl_oid *tree;
2307 struct sysctl_oid_list *child;
2309 dev = sc->vtscsi_dev;
2310 stats = &sc->vtscsi_stats;
2311 ctx = device_get_sysctl_ctx(dev);
2312 tree = device_get_sysctl_tree(dev);
2313 child = SYSCTL_CHILDREN(tree);
2315 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "debug_level",
2316 CTLFLAG_RW, &sc->vtscsi_debug, 0,
2319 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "scsi_cmd_timeouts",
2320 CTLFLAG_RD, &stats->scsi_cmd_timeouts,
2321 "SCSI command timeouts");
2322 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dequeue_no_requests",
2323 CTLFLAG_RD, &stats->dequeue_no_requests,
2324 "No available requests to dequeue");
2328 vtscsi_printf_req(struct vtscsi_request *req, const char *func,
2329 const char *fmt, ...)
2331 struct vtscsi_softc *sc;
2341 sc = req->vsr_softc;
2345 sbuf_new(&sb, str, sizeof(str), 0);
2348 sbuf_printf(&sb, "(noperiph:%s%d:%u): ",
2349 cam_sim_name(sc->vtscsi_sim), cam_sim_unit(sc->vtscsi_sim),
2350 cam_sim_bus(sc->vtscsi_sim));
2352 xpt_path_string(ccb->ccb_h.path, path_str, sizeof(path_str));
2353 sbuf_cat(&sb, path_str);
2354 if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
2355 scsi_command_string(&ccb->csio, &sb);
2356 sbuf_printf(&sb, "length %d ", ccb->csio.dxfer_len);
2360 sbuf_vprintf(&sb, fmt, ap);
2364 printf("%s: %s: %s", device_get_nameunit(sc->vtscsi_dev), func,