2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2012, Bryan Venteicher <bryanv@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 /* Driver for VirtIO SCSI devices. */
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/kthread.h>
38 #include <sys/malloc.h>
39 #include <sys/module.h>
40 #include <sys/sglist.h>
41 #include <sys/sysctl.h>
43 #include <sys/mutex.h>
44 #include <sys/callout.h>
45 #include <sys/queue.h>
48 #include <machine/stdarg.h>
50 #include <machine/bus.h>
51 #include <machine/resource.h>
56 #include <cam/cam_ccb.h>
57 #include <cam/cam_sim.h>
58 #include <cam/cam_periph.h>
59 #include <cam/cam_xpt_sim.h>
60 #include <cam/cam_debug.h>
61 #include <cam/scsi/scsi_all.h>
62 #include <cam/scsi/scsi_message.h>
64 #include <dev/virtio/virtio.h>
65 #include <dev/virtio/virtqueue.h>
66 #include <dev/virtio/scsi/virtio_scsi.h>
67 #include <dev/virtio/scsi/virtio_scsivar.h>
69 #include "virtio_if.h"
71 static int vtscsi_modevent(module_t, int, void *);
73 static int vtscsi_probe(device_t);
74 static int vtscsi_attach(device_t);
75 static int vtscsi_detach(device_t);
76 static int vtscsi_suspend(device_t);
77 static int vtscsi_resume(device_t);
79 static void vtscsi_negotiate_features(struct vtscsi_softc *);
80 static void vtscsi_read_config(struct vtscsi_softc *,
81 struct virtio_scsi_config *);
82 static int vtscsi_maximum_segments(struct vtscsi_softc *, int);
83 static int vtscsi_alloc_virtqueues(struct vtscsi_softc *);
84 static void vtscsi_check_sizes(struct vtscsi_softc *);
85 static void vtscsi_write_device_config(struct vtscsi_softc *);
86 static int vtscsi_reinit(struct vtscsi_softc *);
88 static int vtscsi_alloc_cam(struct vtscsi_softc *);
89 static int vtscsi_register_cam(struct vtscsi_softc *);
90 static void vtscsi_free_cam(struct vtscsi_softc *);
91 static void vtscsi_cam_async(void *, uint32_t, struct cam_path *, void *);
92 static int vtscsi_register_async(struct vtscsi_softc *);
93 static void vtscsi_deregister_async(struct vtscsi_softc *);
94 static void vtscsi_cam_action(struct cam_sim *, union ccb *);
95 static void vtscsi_cam_poll(struct cam_sim *);
97 static void vtscsi_cam_scsi_io(struct vtscsi_softc *, struct cam_sim *,
99 static void vtscsi_cam_get_tran_settings(struct vtscsi_softc *,
101 static void vtscsi_cam_reset_bus(struct vtscsi_softc *, union ccb *);
102 static void vtscsi_cam_reset_dev(struct vtscsi_softc *, union ccb *);
103 static void vtscsi_cam_abort(struct vtscsi_softc *, union ccb *);
104 static void vtscsi_cam_path_inquiry(struct vtscsi_softc *,
105 struct cam_sim *, union ccb *);
107 static int vtscsi_sg_append_scsi_buf(struct vtscsi_softc *,
108 struct sglist *, struct ccb_scsiio *);
109 static int vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *,
110 struct vtscsi_request *, int *, int *);
111 static int vtscsi_execute_scsi_cmd(struct vtscsi_softc *,
112 struct vtscsi_request *);
113 static int vtscsi_start_scsi_cmd(struct vtscsi_softc *, union ccb *);
114 static void vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *,
115 struct vtscsi_request *);
116 static int vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *,
117 struct vtscsi_request *);
118 static void vtscsi_timedout_scsi_cmd(void *);
119 static cam_status vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *);
120 static cam_status vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *,
121 struct ccb_scsiio *, struct virtio_scsi_cmd_resp *);
122 static void vtscsi_complete_scsi_cmd(struct vtscsi_softc *,
123 struct vtscsi_request *);
125 static void vtscsi_poll_ctrl_req(struct vtscsi_softc *,
126 struct vtscsi_request *);
127 static int vtscsi_execute_ctrl_req(struct vtscsi_softc *,
128 struct vtscsi_request *, struct sglist *, int, int, int);
129 static void vtscsi_complete_abort_task_cmd(struct vtscsi_softc *c,
130 struct vtscsi_request *);
131 static int vtscsi_execute_abort_task_cmd(struct vtscsi_softc *,
132 struct vtscsi_request *);
133 static int vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *,
134 struct vtscsi_request *);
136 static void vtscsi_get_request_lun(uint8_t [], target_id_t *, lun_id_t *);
137 static void vtscsi_set_request_lun(struct ccb_hdr *, uint8_t []);
138 static void vtscsi_init_scsi_cmd_req(struct ccb_scsiio *,
139 struct virtio_scsi_cmd_req *);
140 static void vtscsi_init_ctrl_tmf_req(struct ccb_hdr *, uint32_t,
141 uintptr_t, struct virtio_scsi_ctrl_tmf_req *);
143 static void vtscsi_freeze_simq(struct vtscsi_softc *, int);
144 static int vtscsi_thaw_simq(struct vtscsi_softc *, int);
146 static void vtscsi_announce(struct vtscsi_softc *, uint32_t, target_id_t,
148 static void vtscsi_execute_rescan(struct vtscsi_softc *, target_id_t,
150 static void vtscsi_execute_rescan_bus(struct vtscsi_softc *);
152 static void vtscsi_handle_event(struct vtscsi_softc *,
153 struct virtio_scsi_event *);
154 static int vtscsi_enqueue_event_buf(struct vtscsi_softc *,
155 struct virtio_scsi_event *);
156 static int vtscsi_init_event_vq(struct vtscsi_softc *);
157 static void vtscsi_reinit_event_vq(struct vtscsi_softc *);
158 static void vtscsi_drain_event_vq(struct vtscsi_softc *);
160 static void vtscsi_complete_vqs_locked(struct vtscsi_softc *);
161 static void vtscsi_complete_vqs(struct vtscsi_softc *);
162 static void vtscsi_drain_vqs(struct vtscsi_softc *);
163 static void vtscsi_cancel_request(struct vtscsi_softc *,
164 struct vtscsi_request *);
165 static void vtscsi_drain_vq(struct vtscsi_softc *, struct virtqueue *);
166 static void vtscsi_stop(struct vtscsi_softc *);
167 static int vtscsi_reset_bus(struct vtscsi_softc *);
169 static void vtscsi_init_request(struct vtscsi_softc *,
170 struct vtscsi_request *);
171 static int vtscsi_alloc_requests(struct vtscsi_softc *);
172 static void vtscsi_free_requests(struct vtscsi_softc *);
173 static void vtscsi_enqueue_request(struct vtscsi_softc *,
174 struct vtscsi_request *);
175 static struct vtscsi_request * vtscsi_dequeue_request(struct vtscsi_softc *);
177 static void vtscsi_complete_request(struct vtscsi_request *);
178 static void vtscsi_complete_vq(struct vtscsi_softc *, struct virtqueue *);
180 static void vtscsi_control_vq_intr(void *);
181 static void vtscsi_event_vq_intr(void *);
182 static void vtscsi_request_vq_intr(void *);
183 static void vtscsi_disable_vqs_intr(struct vtscsi_softc *);
184 static void vtscsi_enable_vqs_intr(struct vtscsi_softc *);
186 static void vtscsi_get_tunables(struct vtscsi_softc *);
187 static void vtscsi_add_sysctl(struct vtscsi_softc *);
189 static void vtscsi_printf_req(struct vtscsi_request *, const char *,
192 /* Global tunables. */
194 * The current QEMU VirtIO SCSI implementation does not cancel in-flight
195 * IO during virtio_stop(). So in-flight requests still complete after the
196 * device reset. We would have to wait for all the in-flight IO to complete,
197 * which defeats the typical purpose of a bus reset. We could simulate the
198 * bus reset with either I_T_NEXUS_RESET of all the targets, or with
199 * LOGICAL_UNIT_RESET of all the LUNs (assuming there is space in the
200 * control virtqueue). But this isn't very useful if things really go off
201 * the rails, so default to disabled for now.
203 static int vtscsi_bus_reset_disable = 1;
204 TUNABLE_INT("hw.vtscsi.bus_reset_disable", &vtscsi_bus_reset_disable);
206 static struct virtio_feature_desc vtscsi_feature_desc[] = {
207 { VIRTIO_SCSI_F_INOUT, "InOut" },
208 { VIRTIO_SCSI_F_HOTPLUG, "Hotplug" },
212 static device_method_t vtscsi_methods[] = {
213 /* Device methods. */
214 DEVMETHOD(device_probe, vtscsi_probe),
215 DEVMETHOD(device_attach, vtscsi_attach),
216 DEVMETHOD(device_detach, vtscsi_detach),
217 DEVMETHOD(device_suspend, vtscsi_suspend),
218 DEVMETHOD(device_resume, vtscsi_resume),
223 static driver_t vtscsi_driver = {
226 sizeof(struct vtscsi_softc)
228 static devclass_t vtscsi_devclass;
230 DRIVER_MODULE(virtio_scsi, virtio_mmio, vtscsi_driver, vtscsi_devclass,
232 DRIVER_MODULE(virtio_scsi, virtio_pci, vtscsi_driver, vtscsi_devclass,
234 MODULE_VERSION(virtio_scsi, 1);
235 MODULE_DEPEND(virtio_scsi, virtio, 1, 1, 1);
236 MODULE_DEPEND(virtio_scsi, cam, 1, 1, 1);
238 VIRTIO_SIMPLE_PNPTABLE(virtio_scsi, VIRTIO_ID_SCSI, "VirtIO SCSI Adapter");
239 VIRTIO_SIMPLE_PNPINFO(virtio_mmio, virtio_scsi);
240 VIRTIO_SIMPLE_PNPINFO(virtio_pci, virtio_scsi);
243 vtscsi_modevent(module_t mod, int type, void *unused)
263 vtscsi_probe(device_t dev)
265 return (VIRTIO_SIMPLE_PROBE(dev, virtio_scsi));
269 vtscsi_attach(device_t dev)
271 struct vtscsi_softc *sc;
272 struct virtio_scsi_config scsicfg;
275 sc = device_get_softc(dev);
276 sc->vtscsi_dev = dev;
278 VTSCSI_LOCK_INIT(sc, device_get_nameunit(dev));
279 TAILQ_INIT(&sc->vtscsi_req_free);
281 vtscsi_get_tunables(sc);
282 vtscsi_add_sysctl(sc);
284 virtio_set_feature_desc(dev, vtscsi_feature_desc);
285 vtscsi_negotiate_features(sc);
287 if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC))
288 sc->vtscsi_flags |= VTSCSI_FLAG_INDIRECT;
289 if (virtio_with_feature(dev, VIRTIO_SCSI_F_INOUT))
290 sc->vtscsi_flags |= VTSCSI_FLAG_BIDIRECTIONAL;
291 if (virtio_with_feature(dev, VIRTIO_SCSI_F_HOTPLUG))
292 sc->vtscsi_flags |= VTSCSI_FLAG_HOTPLUG;
294 vtscsi_read_config(sc, &scsicfg);
296 sc->vtscsi_max_channel = scsicfg.max_channel;
297 sc->vtscsi_max_target = scsicfg.max_target;
298 sc->vtscsi_max_lun = scsicfg.max_lun;
299 sc->vtscsi_event_buf_size = scsicfg.event_info_size;
301 vtscsi_write_device_config(sc);
303 sc->vtscsi_max_nsegs = vtscsi_maximum_segments(sc, scsicfg.seg_max);
304 sc->vtscsi_sglist = sglist_alloc(sc->vtscsi_max_nsegs, M_NOWAIT);
305 if (sc->vtscsi_sglist == NULL) {
307 device_printf(dev, "cannot allocate sglist\n");
311 error = vtscsi_alloc_virtqueues(sc);
313 device_printf(dev, "cannot allocate virtqueues\n");
317 vtscsi_check_sizes(sc);
319 error = vtscsi_init_event_vq(sc);
321 device_printf(dev, "cannot populate the eventvq\n");
325 error = vtscsi_alloc_requests(sc);
327 device_printf(dev, "cannot allocate requests\n");
331 error = vtscsi_alloc_cam(sc);
333 device_printf(dev, "cannot allocate CAM structures\n");
337 error = virtio_setup_intr(dev, INTR_TYPE_CAM);
339 device_printf(dev, "cannot setup virtqueue interrupts\n");
343 vtscsi_enable_vqs_intr(sc);
346 * Register with CAM after interrupts are enabled so we will get
347 * notified of the probe responses.
349 error = vtscsi_register_cam(sc);
351 device_printf(dev, "cannot register with CAM\n");
363 vtscsi_detach(device_t dev)
365 struct vtscsi_softc *sc;
367 sc = device_get_softc(dev);
370 sc->vtscsi_flags |= VTSCSI_FLAG_DETACH;
371 if (device_is_attached(dev))
375 vtscsi_complete_vqs(sc);
376 vtscsi_drain_vqs(sc);
379 vtscsi_free_requests(sc);
381 if (sc->vtscsi_sglist != NULL) {
382 sglist_free(sc->vtscsi_sglist);
383 sc->vtscsi_sglist = NULL;
386 VTSCSI_LOCK_DESTROY(sc);
392 vtscsi_suspend(device_t dev)
399 vtscsi_resume(device_t dev)
406 vtscsi_negotiate_features(struct vtscsi_softc *sc)
411 dev = sc->vtscsi_dev;
412 features = virtio_negotiate_features(dev, VTSCSI_FEATURES);
413 sc->vtscsi_features = features;
416 #define VTSCSI_GET_CONFIG(_dev, _field, _cfg) \
417 virtio_read_device_config(_dev, \
418 offsetof(struct virtio_scsi_config, _field), \
419 &(_cfg)->_field, sizeof((_cfg)->_field)) \
422 vtscsi_read_config(struct vtscsi_softc *sc,
423 struct virtio_scsi_config *scsicfg)
427 dev = sc->vtscsi_dev;
429 bzero(scsicfg, sizeof(struct virtio_scsi_config));
431 VTSCSI_GET_CONFIG(dev, num_queues, scsicfg);
432 VTSCSI_GET_CONFIG(dev, seg_max, scsicfg);
433 VTSCSI_GET_CONFIG(dev, max_sectors, scsicfg);
434 VTSCSI_GET_CONFIG(dev, cmd_per_lun, scsicfg);
435 VTSCSI_GET_CONFIG(dev, event_info_size, scsicfg);
436 VTSCSI_GET_CONFIG(dev, sense_size, scsicfg);
437 VTSCSI_GET_CONFIG(dev, cdb_size, scsicfg);
438 VTSCSI_GET_CONFIG(dev, max_channel, scsicfg);
439 VTSCSI_GET_CONFIG(dev, max_target, scsicfg);
440 VTSCSI_GET_CONFIG(dev, max_lun, scsicfg);
443 #undef VTSCSI_GET_CONFIG
446 vtscsi_maximum_segments(struct vtscsi_softc *sc, int seg_max)
450 nsegs = VTSCSI_MIN_SEGMENTS;
453 nsegs += MIN(seg_max, maxphys / PAGE_SIZE + 1);
454 if (sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT)
455 nsegs = MIN(nsegs, VIRTIO_MAX_INDIRECT);
463 vtscsi_alloc_virtqueues(struct vtscsi_softc *sc)
466 struct vq_alloc_info vq_info[3];
469 dev = sc->vtscsi_dev;
472 VQ_ALLOC_INFO_INIT(&vq_info[0], 0, vtscsi_control_vq_intr, sc,
473 &sc->vtscsi_control_vq, "%s control", device_get_nameunit(dev));
475 VQ_ALLOC_INFO_INIT(&vq_info[1], 0, vtscsi_event_vq_intr, sc,
476 &sc->vtscsi_event_vq, "%s event", device_get_nameunit(dev));
478 VQ_ALLOC_INFO_INIT(&vq_info[2], sc->vtscsi_max_nsegs,
479 vtscsi_request_vq_intr, sc, &sc->vtscsi_request_vq,
480 "%s request", device_get_nameunit(dev));
482 return (virtio_alloc_virtqueues(dev, 0, nvqs, vq_info));
486 vtscsi_check_sizes(struct vtscsi_softc *sc)
490 if ((sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) == 0) {
492 * Ensure the assertions in virtqueue_enqueue(),
493 * even if the hypervisor reports a bad seg_max.
495 rqsize = virtqueue_size(sc->vtscsi_request_vq);
496 if (sc->vtscsi_max_nsegs > rqsize) {
497 device_printf(sc->vtscsi_dev,
498 "clamping seg_max (%d %d)\n", sc->vtscsi_max_nsegs,
500 sc->vtscsi_max_nsegs = rqsize;
506 vtscsi_write_device_config(struct vtscsi_softc *sc)
509 virtio_write_dev_config_4(sc->vtscsi_dev,
510 offsetof(struct virtio_scsi_config, sense_size),
511 VIRTIO_SCSI_SENSE_SIZE);
514 * This is the size in the virtio_scsi_cmd_req structure. Note
515 * this value (32) is larger than the maximum CAM CDB size (16).
517 virtio_write_dev_config_4(sc->vtscsi_dev,
518 offsetof(struct virtio_scsi_config, cdb_size),
519 VIRTIO_SCSI_CDB_SIZE);
523 vtscsi_reinit(struct vtscsi_softc *sc)
528 dev = sc->vtscsi_dev;
530 error = virtio_reinit(dev, sc->vtscsi_features);
532 vtscsi_write_device_config(sc);
533 vtscsi_reinit_event_vq(sc);
534 virtio_reinit_complete(dev);
536 vtscsi_enable_vqs_intr(sc);
539 vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d\n", error);
545 vtscsi_alloc_cam(struct vtscsi_softc *sc)
548 struct cam_devq *devq;
551 dev = sc->vtscsi_dev;
552 openings = sc->vtscsi_nrequests - VTSCSI_RESERVED_REQUESTS;
554 devq = cam_simq_alloc(openings);
556 device_printf(dev, "cannot allocate SIM queue\n");
560 sc->vtscsi_sim = cam_sim_alloc(vtscsi_cam_action, vtscsi_cam_poll,
561 "vtscsi", sc, device_get_unit(dev), VTSCSI_MTX(sc), 1,
563 if (sc->vtscsi_sim == NULL) {
565 device_printf(dev, "cannot allocate SIM\n");
573 vtscsi_register_cam(struct vtscsi_softc *sc)
576 int registered, error;
578 dev = sc->vtscsi_dev;
583 if (xpt_bus_register(sc->vtscsi_sim, dev, 0) != CAM_SUCCESS) {
585 device_printf(dev, "cannot register XPT bus\n");
591 if (xpt_create_path(&sc->vtscsi_path, NULL,
592 cam_sim_path(sc->vtscsi_sim), CAM_TARGET_WILDCARD,
593 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
595 device_printf(dev, "cannot create bus path\n");
599 if (vtscsi_register_async(sc) != CAM_REQ_CMP) {
601 device_printf(dev, "cannot register async callback\n");
610 if (sc->vtscsi_path != NULL) {
611 xpt_free_path(sc->vtscsi_path);
612 sc->vtscsi_path = NULL;
616 xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim));
624 vtscsi_free_cam(struct vtscsi_softc *sc)
629 if (sc->vtscsi_path != NULL) {
630 vtscsi_deregister_async(sc);
632 xpt_free_path(sc->vtscsi_path);
633 sc->vtscsi_path = NULL;
635 xpt_bus_deregister(cam_sim_path(sc->vtscsi_sim));
638 if (sc->vtscsi_sim != NULL) {
639 cam_sim_free(sc->vtscsi_sim, 1);
640 sc->vtscsi_sim = NULL;
647 vtscsi_cam_async(void *cb_arg, uint32_t code, struct cam_path *path, void *arg)
650 struct vtscsi_softc *sc;
653 sc = cam_sim_softc(sim);
655 vtscsi_dprintf(sc, VTSCSI_TRACE, "code=%u\n", code);
658 * TODO Once QEMU supports event reporting, we should
659 * (un)subscribe to events here.
662 case AC_FOUND_DEVICE:
670 vtscsi_register_async(struct vtscsi_softc *sc)
672 struct ccb_setasync csa;
674 xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5);
675 csa.ccb_h.func_code = XPT_SASYNC_CB;
676 csa.event_enable = AC_LOST_DEVICE | AC_FOUND_DEVICE;
677 csa.callback = vtscsi_cam_async;
678 csa.callback_arg = sc->vtscsi_sim;
680 xpt_action((union ccb *) &csa);
682 return (csa.ccb_h.status);
686 vtscsi_deregister_async(struct vtscsi_softc *sc)
688 struct ccb_setasync csa;
690 xpt_setup_ccb(&csa.ccb_h, sc->vtscsi_path, 5);
691 csa.ccb_h.func_code = XPT_SASYNC_CB;
692 csa.event_enable = 0;
693 csa.callback = vtscsi_cam_async;
694 csa.callback_arg = sc->vtscsi_sim;
696 xpt_action((union ccb *) &csa);
700 vtscsi_cam_action(struct cam_sim *sim, union ccb *ccb)
702 struct vtscsi_softc *sc;
703 struct ccb_hdr *ccbh;
705 sc = cam_sim_softc(sim);
708 VTSCSI_LOCK_OWNED(sc);
710 if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) {
712 * The VTSCSI_MTX is briefly dropped between setting
713 * VTSCSI_FLAG_DETACH and deregistering with CAM, so
714 * drop any CCBs that come in during that window.
716 ccbh->status = CAM_NO_HBA;
721 switch (ccbh->func_code) {
723 vtscsi_cam_scsi_io(sc, sim, ccb);
726 case XPT_SET_TRAN_SETTINGS:
727 ccbh->status = CAM_FUNC_NOTAVAIL;
731 case XPT_GET_TRAN_SETTINGS:
732 vtscsi_cam_get_tran_settings(sc, ccb);
736 vtscsi_cam_reset_bus(sc, ccb);
740 vtscsi_cam_reset_dev(sc, ccb);
744 vtscsi_cam_abort(sc, ccb);
747 case XPT_CALC_GEOMETRY:
748 cam_calc_geometry(&ccb->ccg, 1);
753 vtscsi_cam_path_inquiry(sc, sim, ccb);
757 vtscsi_dprintf(sc, VTSCSI_ERROR,
758 "invalid ccb=%p func=%#x\n", ccb, ccbh->func_code);
760 ccbh->status = CAM_REQ_INVALID;
767 vtscsi_cam_poll(struct cam_sim *sim)
769 struct vtscsi_softc *sc;
771 sc = cam_sim_softc(sim);
773 vtscsi_complete_vqs_locked(sc);
777 vtscsi_cam_scsi_io(struct vtscsi_softc *sc, struct cam_sim *sim,
780 struct ccb_hdr *ccbh;
781 struct ccb_scsiio *csio;
787 if (csio->cdb_len > VIRTIO_SCSI_CDB_SIZE) {
789 ccbh->status = CAM_REQ_INVALID;
793 if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_BOTH &&
794 (sc->vtscsi_flags & VTSCSI_FLAG_BIDIRECTIONAL) == 0) {
796 ccbh->status = CAM_REQ_INVALID;
800 error = vtscsi_start_scsi_cmd(sc, ccb);
804 vtscsi_dprintf(sc, VTSCSI_ERROR,
805 "error=%d ccb=%p status=%#x\n", error, ccb, ccbh->status);
811 vtscsi_cam_get_tran_settings(struct vtscsi_softc *sc, union ccb *ccb)
813 struct ccb_trans_settings *cts;
814 struct ccb_trans_settings_scsi *scsi;
817 scsi = &cts->proto_specific.scsi;
819 cts->protocol = PROTO_SCSI;
820 cts->protocol_version = SCSI_REV_SPC3;
821 cts->transport = XPORT_SAS;
822 cts->transport_version = 0;
824 scsi->valid = CTS_SCSI_VALID_TQ;
825 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
827 ccb->ccb_h.status = CAM_REQ_CMP;
832 vtscsi_cam_reset_bus(struct vtscsi_softc *sc, union ccb *ccb)
836 error = vtscsi_reset_bus(sc);
838 ccb->ccb_h.status = CAM_REQ_CMP;
840 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
842 vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d ccb=%p status=%#x\n",
843 error, ccb, ccb->ccb_h.status);
849 vtscsi_cam_reset_dev(struct vtscsi_softc *sc, union ccb *ccb)
851 struct ccb_hdr *ccbh;
852 struct vtscsi_request *req;
857 req = vtscsi_dequeue_request(sc);
860 vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
866 error = vtscsi_execute_reset_dev_cmd(sc, req);
870 vtscsi_enqueue_request(sc, req);
873 vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n",
877 ccbh->status = CAM_RESRC_UNAVAIL;
879 ccbh->status = CAM_REQ_CMP_ERR;
885 vtscsi_cam_abort(struct vtscsi_softc *sc, union ccb *ccb)
887 struct vtscsi_request *req;
888 struct ccb_hdr *ccbh;
893 req = vtscsi_dequeue_request(sc);
896 vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
902 error = vtscsi_execute_abort_task_cmd(sc, req);
906 vtscsi_enqueue_request(sc, req);
909 vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p\n",
913 ccbh->status = CAM_RESRC_UNAVAIL;
915 ccbh->status = CAM_REQ_CMP_ERR;
921 vtscsi_cam_path_inquiry(struct vtscsi_softc *sc, struct cam_sim *sim,
925 struct ccb_pathinq *cpi;
927 dev = sc->vtscsi_dev;
930 vtscsi_dprintf(sc, VTSCSI_TRACE, "sim=%p ccb=%p\n", sim, ccb);
932 cpi->version_num = 1;
933 cpi->hba_inquiry = PI_TAG_ABLE;
934 cpi->target_sprt = 0;
935 cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED;
936 if (vtscsi_bus_reset_disable != 0)
937 cpi->hba_misc |= PIM_NOBUSRESET;
938 cpi->hba_eng_cnt = 0;
940 cpi->max_target = sc->vtscsi_max_target;
941 cpi->max_lun = sc->vtscsi_max_lun;
942 cpi->initiator_id = cpi->max_target + 1;
944 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
945 strlcpy(cpi->hba_vid, "VirtIO", HBA_IDLEN);
946 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
948 cpi->unit_number = cam_sim_unit(sim);
949 cpi->bus_id = cam_sim_bus(sim);
951 cpi->base_transfer_speed = 300000;
953 cpi->protocol = PROTO_SCSI;
954 cpi->protocol_version = SCSI_REV_SPC3;
955 cpi->transport = XPORT_SAS;
956 cpi->transport_version = 0;
958 cpi->maxio = (sc->vtscsi_max_nsegs - VTSCSI_MIN_SEGMENTS - 1) *
961 cpi->hba_vendor = virtio_get_vendor(dev);
962 cpi->hba_device = virtio_get_device(dev);
963 cpi->hba_subvendor = virtio_get_subvendor(dev);
964 cpi->hba_subdevice = virtio_get_subdevice(dev);
966 ccb->ccb_h.status = CAM_REQ_CMP;
971 vtscsi_sg_append_scsi_buf(struct vtscsi_softc *sc, struct sglist *sg,
972 struct ccb_scsiio *csio)
974 struct ccb_hdr *ccbh;
975 struct bus_dma_segment *dseg;
981 switch ((ccbh->flags & CAM_DATA_MASK)) {
983 error = sglist_append(sg, csio->data_ptr, csio->dxfer_len);
986 error = sglist_append_phys(sg,
987 (vm_paddr_t)(vm_offset_t) csio->data_ptr, csio->dxfer_len);
990 for (i = 0; i < csio->sglist_cnt && error == 0; i++) {
991 dseg = &((struct bus_dma_segment *)csio->data_ptr)[i];
992 error = sglist_append(sg,
993 (void *)(vm_offset_t) dseg->ds_addr, dseg->ds_len);
996 case CAM_DATA_SG_PADDR:
997 for (i = 0; i < csio->sglist_cnt && error == 0; i++) {
998 dseg = &((struct bus_dma_segment *)csio->data_ptr)[i];
999 error = sglist_append_phys(sg,
1000 (vm_paddr_t) dseg->ds_addr, dseg->ds_len);
1004 error = sglist_append_bio(sg, (struct bio *) csio->data_ptr);
1015 vtscsi_fill_scsi_cmd_sglist(struct vtscsi_softc *sc, struct vtscsi_request *req,
1016 int *readable, int *writable)
1019 struct ccb_hdr *ccbh;
1020 struct ccb_scsiio *csio;
1021 struct virtio_scsi_cmd_req *cmd_req;
1022 struct virtio_scsi_cmd_resp *cmd_resp;
1025 sg = sc->vtscsi_sglist;
1026 csio = &req->vsr_ccb->csio;
1027 ccbh = &csio->ccb_h;
1028 cmd_req = &req->vsr_cmd_req;
1029 cmd_resp = &req->vsr_cmd_resp;
1033 sglist_append(sg, cmd_req, sizeof(struct virtio_scsi_cmd_req));
1034 if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1035 error = vtscsi_sg_append_scsi_buf(sc, sg, csio);
1036 /* At least one segment must be left for the response. */
1037 if (error || sg->sg_nseg == sg->sg_maxseg)
1041 *readable = sg->sg_nseg;
1043 sglist_append(sg, cmd_resp, sizeof(struct virtio_scsi_cmd_resp));
1044 if ((ccbh->flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1045 error = vtscsi_sg_append_scsi_buf(sc, sg, csio);
1050 *writable = sg->sg_nseg - *readable;
1052 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p readable=%d "
1053 "writable=%d\n", req, ccbh, *readable, *writable);
1059 * This should never happen unless maxio was incorrectly set.
1061 vtscsi_set_ccb_status(ccbh, CAM_REQ_TOO_BIG, 0);
1063 vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p ccb=%p "
1064 "nseg=%d maxseg=%d\n",
1065 error, req, ccbh, sg->sg_nseg, sg->sg_maxseg);
1071 vtscsi_execute_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req)
1074 struct virtqueue *vq;
1075 struct ccb_scsiio *csio;
1076 struct ccb_hdr *ccbh;
1077 struct virtio_scsi_cmd_req *cmd_req;
1078 struct virtio_scsi_cmd_resp *cmd_resp;
1079 int readable, writable, error;
1081 sg = sc->vtscsi_sglist;
1082 vq = sc->vtscsi_request_vq;
1083 csio = &req->vsr_ccb->csio;
1084 ccbh = &csio->ccb_h;
1085 cmd_req = &req->vsr_cmd_req;
1086 cmd_resp = &req->vsr_cmd_resp;
1088 vtscsi_init_scsi_cmd_req(csio, cmd_req);
1090 error = vtscsi_fill_scsi_cmd_sglist(sc, req, &readable, &writable);
1094 req->vsr_complete = vtscsi_complete_scsi_cmd;
1095 cmd_resp->response = -1;
1097 error = virtqueue_enqueue(vq, req, sg, readable, writable);
1099 vtscsi_dprintf(sc, VTSCSI_ERROR,
1100 "enqueue error=%d req=%p ccb=%p\n", error, req, ccbh);
1102 ccbh->status = CAM_REQUEUE_REQ;
1103 vtscsi_freeze_simq(sc, VTSCSI_REQUEST_VQ);
1107 ccbh->status |= CAM_SIM_QUEUED;
1108 ccbh->ccbh_vtscsi_req = req;
1110 virtqueue_notify(vq);
1112 if (ccbh->timeout != CAM_TIME_INFINITY) {
1113 req->vsr_flags |= VTSCSI_REQ_FLAG_TIMEOUT_SET;
1114 callout_reset_sbt(&req->vsr_callout, SBT_1MS * ccbh->timeout,
1115 0, vtscsi_timedout_scsi_cmd, req, 0);
1118 vtscsi_dprintf_req(req, VTSCSI_TRACE, "enqueued req=%p ccb=%p\n",
1125 vtscsi_start_scsi_cmd(struct vtscsi_softc *sc, union ccb *ccb)
1127 struct vtscsi_request *req;
1130 req = vtscsi_dequeue_request(sc);
1132 ccb->ccb_h.status = CAM_REQUEUE_REQ;
1133 vtscsi_freeze_simq(sc, VTSCSI_REQUEST);
1139 error = vtscsi_execute_scsi_cmd(sc, req);
1141 vtscsi_enqueue_request(sc, req);
1147 vtscsi_complete_abort_timedout_scsi_cmd(struct vtscsi_softc *sc,
1148 struct vtscsi_request *req)
1150 struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1151 struct vtscsi_request *to_req;
1154 tmf_resp = &req->vsr_tmf_resp;
1155 response = tmf_resp->response;
1156 to_req = req->vsr_timedout_req;
1158 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p to_req=%p response=%d\n",
1159 req, to_req, response);
1161 vtscsi_enqueue_request(sc, req);
1164 * The timedout request could have completed between when the
1165 * abort task was sent and when the host processed it.
1167 if (to_req->vsr_state != VTSCSI_REQ_STATE_TIMEDOUT)
1170 /* The timedout request was successfully aborted. */
1171 if (response == VIRTIO_SCSI_S_FUNCTION_COMPLETE)
1174 /* Don't bother if the device is going away. */
1175 if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH)
1178 /* The timedout request will be aborted by the reset. */
1179 if (sc->vtscsi_flags & VTSCSI_FLAG_RESET)
1182 vtscsi_reset_bus(sc);
1186 vtscsi_abort_timedout_scsi_cmd(struct vtscsi_softc *sc,
1187 struct vtscsi_request *to_req)
1190 struct ccb_hdr *to_ccbh;
1191 struct vtscsi_request *req;
1192 struct virtio_scsi_ctrl_tmf_req *tmf_req;
1193 struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1196 sg = sc->vtscsi_sglist;
1197 to_ccbh = &to_req->vsr_ccb->ccb_h;
1199 req = vtscsi_dequeue_request(sc);
1205 tmf_req = &req->vsr_tmf_req;
1206 tmf_resp = &req->vsr_tmf_resp;
1208 vtscsi_init_ctrl_tmf_req(to_ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK,
1209 (uintptr_t) to_ccbh, tmf_req);
1212 sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1213 sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1215 req->vsr_timedout_req = to_req;
1216 req->vsr_complete = vtscsi_complete_abort_timedout_scsi_cmd;
1217 tmf_resp->response = -1;
1219 error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1220 VTSCSI_EXECUTE_ASYNC);
1224 vtscsi_enqueue_request(sc, req);
1227 vtscsi_dprintf(sc, VTSCSI_ERROR, "error=%d req=%p "
1228 "timedout req=%p ccb=%p\n", error, req, to_req, to_ccbh);
1234 vtscsi_timedout_scsi_cmd(void *xreq)
1236 struct vtscsi_softc *sc;
1237 struct vtscsi_request *to_req;
1240 sc = to_req->vsr_softc;
1242 vtscsi_dprintf(sc, VTSCSI_INFO, "timedout req=%p ccb=%p state=%#x\n",
1243 to_req, to_req->vsr_ccb, to_req->vsr_state);
1245 /* Don't bother if the device is going away. */
1246 if (sc->vtscsi_flags & VTSCSI_FLAG_DETACH)
1250 * Bail if the request is not in use. We likely raced when
1251 * stopping the callout handler or it has already been aborted.
1253 if (to_req->vsr_state != VTSCSI_REQ_STATE_INUSE ||
1254 (to_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) == 0)
1258 * Complete the request queue in case the timedout request is
1259 * actually just pending.
1261 vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
1262 if (to_req->vsr_state == VTSCSI_REQ_STATE_FREE)
1265 sc->vtscsi_stats.scsi_cmd_timeouts++;
1266 to_req->vsr_state = VTSCSI_REQ_STATE_TIMEDOUT;
1268 if (vtscsi_abort_timedout_scsi_cmd(sc, to_req) == 0)
1271 vtscsi_dprintf(sc, VTSCSI_ERROR, "resetting bus\n");
1272 vtscsi_reset_bus(sc);
1276 vtscsi_scsi_cmd_cam_status(struct virtio_scsi_cmd_resp *cmd_resp)
1280 switch (cmd_resp->response) {
1281 case VIRTIO_SCSI_S_OK:
1282 status = CAM_REQ_CMP;
1284 case VIRTIO_SCSI_S_OVERRUN:
1285 status = CAM_DATA_RUN_ERR;
1287 case VIRTIO_SCSI_S_ABORTED:
1288 status = CAM_REQ_ABORTED;
1290 case VIRTIO_SCSI_S_BAD_TARGET:
1291 status = CAM_SEL_TIMEOUT;
1293 case VIRTIO_SCSI_S_RESET:
1294 status = CAM_SCSI_BUS_RESET;
1296 case VIRTIO_SCSI_S_BUSY:
1297 status = CAM_SCSI_BUSY;
1299 case VIRTIO_SCSI_S_TRANSPORT_FAILURE:
1300 case VIRTIO_SCSI_S_TARGET_FAILURE:
1301 case VIRTIO_SCSI_S_NEXUS_FAILURE:
1302 status = CAM_SCSI_IT_NEXUS_LOST;
1304 default: /* VIRTIO_SCSI_S_FAILURE */
1305 status = CAM_REQ_CMP_ERR;
1313 vtscsi_complete_scsi_cmd_response(struct vtscsi_softc *sc,
1314 struct ccb_scsiio *csio, struct virtio_scsi_cmd_resp *cmd_resp)
1318 csio->scsi_status = cmd_resp->status;
1319 csio->resid = cmd_resp->resid;
1321 if (csio->scsi_status == SCSI_STATUS_OK)
1322 status = CAM_REQ_CMP;
1324 status = CAM_SCSI_STATUS_ERROR;
1326 if (cmd_resp->sense_len > 0) {
1327 status |= CAM_AUTOSNS_VALID;
1329 if (cmd_resp->sense_len < csio->sense_len)
1330 csio->sense_resid = csio->sense_len -
1331 cmd_resp->sense_len;
1333 csio->sense_resid = 0;
1335 memcpy(&csio->sense_data, cmd_resp->sense,
1336 csio->sense_len - csio->sense_resid);
1339 vtscsi_dprintf(sc, status == CAM_REQ_CMP ? VTSCSI_TRACE : VTSCSI_ERROR,
1340 "ccb=%p scsi_status=%#x resid=%u sense_resid=%u\n",
1341 csio, csio->scsi_status, csio->resid, csio->sense_resid);
1347 vtscsi_complete_scsi_cmd(struct vtscsi_softc *sc, struct vtscsi_request *req)
1349 struct ccb_hdr *ccbh;
1350 struct ccb_scsiio *csio;
1351 struct virtio_scsi_cmd_resp *cmd_resp;
1354 csio = &req->vsr_ccb->csio;
1355 ccbh = &csio->ccb_h;
1356 cmd_resp = &req->vsr_cmd_resp;
1358 KASSERT(ccbh->ccbh_vtscsi_req == req,
1359 ("ccb %p req mismatch %p/%p", ccbh, ccbh->ccbh_vtscsi_req, req));
1361 if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET)
1362 callout_stop(&req->vsr_callout);
1364 status = vtscsi_scsi_cmd_cam_status(cmd_resp);
1365 if (status == CAM_REQ_ABORTED) {
1366 if (req->vsr_state == VTSCSI_REQ_STATE_TIMEDOUT)
1367 status = CAM_CMD_TIMEOUT;
1368 } else if (status == CAM_REQ_CMP)
1369 status = vtscsi_complete_scsi_cmd_response(sc, csio, cmd_resp);
1371 if ((status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
1372 status |= CAM_DEV_QFRZN;
1373 xpt_freeze_devq(ccbh->path, 1);
1376 if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0)
1377 status |= CAM_RELEASE_SIMQ;
1379 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p status=%#x\n",
1382 ccbh->status = status;
1383 xpt_done(req->vsr_ccb);
1384 vtscsi_enqueue_request(sc, req);
1388 vtscsi_poll_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req)
1391 /* XXX We probably shouldn't poll forever. */
1392 req->vsr_flags |= VTSCSI_REQ_FLAG_POLLED;
1394 vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
1395 while ((req->vsr_flags & VTSCSI_REQ_FLAG_COMPLETE) == 0);
1397 req->vsr_flags &= ~VTSCSI_REQ_FLAG_POLLED;
1401 vtscsi_execute_ctrl_req(struct vtscsi_softc *sc, struct vtscsi_request *req,
1402 struct sglist *sg, int readable, int writable, int flag)
1404 struct virtqueue *vq;
1407 vq = sc->vtscsi_control_vq;
1409 MPASS(flag == VTSCSI_EXECUTE_POLL || req->vsr_complete != NULL);
1411 error = virtqueue_enqueue(vq, req, sg, readable, writable);
1414 * Return EAGAIN when the virtqueue does not have enough
1415 * descriptors available.
1417 if (error == ENOSPC || error == EMSGSIZE)
1423 virtqueue_notify(vq);
1424 if (flag == VTSCSI_EXECUTE_POLL)
1425 vtscsi_poll_ctrl_req(sc, req);
1431 vtscsi_complete_abort_task_cmd(struct vtscsi_softc *sc,
1432 struct vtscsi_request *req)
1435 struct ccb_hdr *ccbh;
1436 struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1440 tmf_resp = &req->vsr_tmf_resp;
1442 switch (tmf_resp->response) {
1443 case VIRTIO_SCSI_S_FUNCTION_COMPLETE:
1444 ccbh->status = CAM_REQ_CMP;
1446 case VIRTIO_SCSI_S_FUNCTION_REJECTED:
1447 ccbh->status = CAM_UA_ABORT;
1450 ccbh->status = CAM_REQ_CMP_ERR;
1455 vtscsi_enqueue_request(sc, req);
1459 vtscsi_execute_abort_task_cmd(struct vtscsi_softc *sc,
1460 struct vtscsi_request *req)
1463 struct ccb_abort *cab;
1464 struct ccb_hdr *ccbh;
1465 struct ccb_hdr *abort_ccbh;
1466 struct vtscsi_request *abort_req;
1467 struct virtio_scsi_ctrl_tmf_req *tmf_req;
1468 struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1471 sg = sc->vtscsi_sglist;
1472 cab = &req->vsr_ccb->cab;
1474 tmf_req = &req->vsr_tmf_req;
1475 tmf_resp = &req->vsr_tmf_resp;
1477 /* CCB header and request that's to be aborted. */
1478 abort_ccbh = &cab->abort_ccb->ccb_h;
1479 abort_req = abort_ccbh->ccbh_vtscsi_req;
1481 if (abort_ccbh->func_code != XPT_SCSI_IO || abort_req == NULL) {
1486 /* Only attempt to abort requests that could be in-flight. */
1487 if (abort_req->vsr_state != VTSCSI_REQ_STATE_INUSE) {
1492 abort_req->vsr_state = VTSCSI_REQ_STATE_ABORTED;
1493 if (abort_req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET)
1494 callout_stop(&abort_req->vsr_callout);
1496 vtscsi_init_ctrl_tmf_req(ccbh, VIRTIO_SCSI_T_TMF_ABORT_TASK,
1497 (uintptr_t) abort_ccbh, tmf_req);
1500 sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1501 sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1503 req->vsr_complete = vtscsi_complete_abort_task_cmd;
1504 tmf_resp->response = -1;
1506 error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1507 VTSCSI_EXECUTE_ASYNC);
1510 vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p abort_ccb=%p "
1511 "abort_req=%p\n", error, req, abort_ccbh, abort_req);
1517 vtscsi_complete_reset_dev_cmd(struct vtscsi_softc *sc,
1518 struct vtscsi_request *req)
1521 struct ccb_hdr *ccbh;
1522 struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1526 tmf_resp = &req->vsr_tmf_resp;
1528 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p response=%d\n",
1529 req, ccb, tmf_resp->response);
1531 if (tmf_resp->response == VIRTIO_SCSI_S_FUNCTION_COMPLETE) {
1532 ccbh->status = CAM_REQ_CMP;
1533 vtscsi_announce(sc, AC_SENT_BDR, ccbh->target_id,
1536 ccbh->status = CAM_REQ_CMP_ERR;
1539 vtscsi_enqueue_request(sc, req);
1543 vtscsi_execute_reset_dev_cmd(struct vtscsi_softc *sc,
1544 struct vtscsi_request *req)
1547 struct ccb_resetdev *crd;
1548 struct ccb_hdr *ccbh;
1549 struct virtio_scsi_ctrl_tmf_req *tmf_req;
1550 struct virtio_scsi_ctrl_tmf_resp *tmf_resp;
1554 sg = sc->vtscsi_sglist;
1555 crd = &req->vsr_ccb->crd;
1557 tmf_req = &req->vsr_tmf_req;
1558 tmf_resp = &req->vsr_tmf_resp;
1560 if (ccbh->target_lun == CAM_LUN_WILDCARD)
1561 subtype = VIRTIO_SCSI_T_TMF_I_T_NEXUS_RESET;
1563 subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET;
1565 vtscsi_init_ctrl_tmf_req(ccbh, subtype, 0, tmf_req);
1568 sglist_append(sg, tmf_req, sizeof(struct virtio_scsi_ctrl_tmf_req));
1569 sglist_append(sg, tmf_resp, sizeof(struct virtio_scsi_ctrl_tmf_resp));
1571 req->vsr_complete = vtscsi_complete_reset_dev_cmd;
1572 tmf_resp->response = -1;
1574 error = vtscsi_execute_ctrl_req(sc, req, sg, 1, 1,
1575 VTSCSI_EXECUTE_ASYNC);
1577 vtscsi_dprintf(sc, VTSCSI_TRACE, "error=%d req=%p ccb=%p\n",
1584 vtscsi_get_request_lun(uint8_t lun[], target_id_t *target_id, lun_id_t *lun_id)
1587 *target_id = lun[1];
1588 *lun_id = (lun[2] << 8) | lun[3];
1592 vtscsi_set_request_lun(struct ccb_hdr *ccbh, uint8_t lun[])
1596 lun[1] = ccbh->target_id;
1597 lun[2] = 0x40 | ((ccbh->target_lun >> 8) & 0x3F);
1598 lun[3] = ccbh->target_lun & 0xFF;
1602 vtscsi_init_scsi_cmd_req(struct ccb_scsiio *csio,
1603 struct virtio_scsi_cmd_req *cmd_req)
1607 switch (csio->tag_action) {
1608 case MSG_HEAD_OF_Q_TAG:
1609 attr = VIRTIO_SCSI_S_HEAD;
1611 case MSG_ORDERED_Q_TAG:
1612 attr = VIRTIO_SCSI_S_ORDERED;
1615 attr = VIRTIO_SCSI_S_ACA;
1617 default: /* MSG_SIMPLE_Q_TAG */
1618 attr = VIRTIO_SCSI_S_SIMPLE;
1622 vtscsi_set_request_lun(&csio->ccb_h, cmd_req->lun);
1623 cmd_req->tag = (uintptr_t) csio;
1624 cmd_req->task_attr = attr;
1626 memcpy(cmd_req->cdb,
1627 csio->ccb_h.flags & CAM_CDB_POINTER ?
1628 csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes,
1633 vtscsi_init_ctrl_tmf_req(struct ccb_hdr *ccbh, uint32_t subtype,
1634 uintptr_t tag, struct virtio_scsi_ctrl_tmf_req *tmf_req)
1637 vtscsi_set_request_lun(ccbh, tmf_req->lun);
1639 tmf_req->type = VIRTIO_SCSI_T_TMF;
1640 tmf_req->subtype = subtype;
1645 vtscsi_freeze_simq(struct vtscsi_softc *sc, int reason)
1649 frozen = sc->vtscsi_frozen;
1651 if (reason & VTSCSI_REQUEST &&
1652 (sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS) == 0)
1653 sc->vtscsi_frozen |= VTSCSI_FROZEN_NO_REQUESTS;
1655 if (reason & VTSCSI_REQUEST_VQ &&
1656 (sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL) == 0)
1657 sc->vtscsi_frozen |= VTSCSI_FROZEN_REQUEST_VQ_FULL;
1659 /* Freeze the SIMQ if transitioned to frozen. */
1660 if (frozen == 0 && sc->vtscsi_frozen != 0) {
1661 vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ frozen\n");
1662 xpt_freeze_simq(sc->vtscsi_sim, 1);
1667 vtscsi_thaw_simq(struct vtscsi_softc *sc, int reason)
1671 if (sc->vtscsi_frozen == 0 || reason == 0)
1674 if (reason & VTSCSI_REQUEST &&
1675 sc->vtscsi_frozen & VTSCSI_FROZEN_NO_REQUESTS)
1676 sc->vtscsi_frozen &= ~VTSCSI_FROZEN_NO_REQUESTS;
1678 if (reason & VTSCSI_REQUEST_VQ &&
1679 sc->vtscsi_frozen & VTSCSI_FROZEN_REQUEST_VQ_FULL)
1680 sc->vtscsi_frozen &= ~VTSCSI_FROZEN_REQUEST_VQ_FULL;
1682 thawed = sc->vtscsi_frozen == 0;
1684 vtscsi_dprintf(sc, VTSCSI_INFO, "SIMQ thawed\n");
1690 vtscsi_announce(struct vtscsi_softc *sc, uint32_t ac_code,
1691 target_id_t target_id, lun_id_t lun_id)
1693 struct cam_path *path;
1695 /* Use the wildcard path from our softc for bus announcements. */
1696 if (target_id == CAM_TARGET_WILDCARD && lun_id == CAM_LUN_WILDCARD) {
1697 xpt_async(ac_code, sc->vtscsi_path, NULL);
1701 if (xpt_create_path(&path, NULL, cam_sim_path(sc->vtscsi_sim),
1702 target_id, lun_id) != CAM_REQ_CMP) {
1703 vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot create path\n");
1707 xpt_async(ac_code, path, NULL);
1708 xpt_free_path(path);
1712 vtscsi_execute_rescan(struct vtscsi_softc *sc, target_id_t target_id,
1718 ccb = xpt_alloc_ccb_nowait();
1720 vtscsi_dprintf(sc, VTSCSI_ERROR, "cannot allocate CCB\n");
1724 status = xpt_create_path(&ccb->ccb_h.path, NULL,
1725 cam_sim_path(sc->vtscsi_sim), target_id, lun_id);
1726 if (status != CAM_REQ_CMP) {
1735 vtscsi_execute_rescan_bus(struct vtscsi_softc *sc)
1738 vtscsi_execute_rescan(sc, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD);
1742 vtscsi_transport_reset_event(struct vtscsi_softc *sc,
1743 struct virtio_scsi_event *event)
1745 target_id_t target_id;
1748 vtscsi_get_request_lun(event->lun, &target_id, &lun_id);
1750 switch (event->reason) {
1751 case VIRTIO_SCSI_EVT_RESET_RESCAN:
1752 case VIRTIO_SCSI_EVT_RESET_REMOVED:
1753 vtscsi_execute_rescan(sc, target_id, lun_id);
1756 device_printf(sc->vtscsi_dev,
1757 "unhandled transport event reason: %d\n", event->reason);
1763 vtscsi_handle_event(struct vtscsi_softc *sc, struct virtio_scsi_event *event)
1767 if ((event->event & VIRTIO_SCSI_T_EVENTS_MISSED) == 0) {
1768 switch (event->event) {
1769 case VIRTIO_SCSI_T_TRANSPORT_RESET:
1770 vtscsi_transport_reset_event(sc, event);
1773 device_printf(sc->vtscsi_dev,
1774 "unhandled event: %d\n", event->event);
1778 vtscsi_execute_rescan_bus(sc);
1781 * This should always be successful since the buffer
1782 * was just dequeued.
1784 error = vtscsi_enqueue_event_buf(sc, event);
1786 ("cannot requeue event buffer: %d", error));
1790 vtscsi_enqueue_event_buf(struct vtscsi_softc *sc,
1791 struct virtio_scsi_event *event)
1794 struct virtqueue *vq;
1797 sg = sc->vtscsi_sglist;
1798 vq = sc->vtscsi_event_vq;
1799 size = sc->vtscsi_event_buf_size;
1804 error = sglist_append(sg, event, size);
1808 error = virtqueue_enqueue(vq, event, sg, 0, sg->sg_nseg);
1812 virtqueue_notify(vq);
1818 vtscsi_init_event_vq(struct vtscsi_softc *sc)
1820 struct virtio_scsi_event *event;
1824 * The first release of QEMU with VirtIO SCSI support would crash
1825 * when attempting to notify the event virtqueue. This was fixed
1826 * when hotplug support was added.
1828 if (sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG)
1829 size = sc->vtscsi_event_buf_size;
1833 if (size < sizeof(struct virtio_scsi_event))
1836 for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) {
1837 event = &sc->vtscsi_event_bufs[i];
1839 error = vtscsi_enqueue_event_buf(sc, event);
1845 * Even just one buffer is enough. Missed events are
1846 * denoted with the VIRTIO_SCSI_T_EVENTS_MISSED flag.
1855 vtscsi_reinit_event_vq(struct vtscsi_softc *sc)
1857 struct virtio_scsi_event *event;
1860 if ((sc->vtscsi_flags & VTSCSI_FLAG_HOTPLUG) == 0 ||
1861 sc->vtscsi_event_buf_size < sizeof(struct virtio_scsi_event))
1864 for (i = 0; i < VTSCSI_NUM_EVENT_BUFS; i++) {
1865 event = &sc->vtscsi_event_bufs[i];
1867 error = vtscsi_enqueue_event_buf(sc, event);
1872 KASSERT(i > 0, ("cannot reinit event vq: %d", error));
1876 vtscsi_drain_event_vq(struct vtscsi_softc *sc)
1878 struct virtqueue *vq;
1881 vq = sc->vtscsi_event_vq;
1884 while (virtqueue_drain(vq, &last) != NULL)
1887 KASSERT(virtqueue_empty(vq), ("eventvq not empty"));
1891 vtscsi_complete_vqs_locked(struct vtscsi_softc *sc)
1894 VTSCSI_LOCK_OWNED(sc);
1896 if (sc->vtscsi_request_vq != NULL)
1897 vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
1898 if (sc->vtscsi_control_vq != NULL)
1899 vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
1903 vtscsi_complete_vqs(struct vtscsi_softc *sc)
1907 vtscsi_complete_vqs_locked(sc);
1912 vtscsi_cancel_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
1919 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p ccb=%p\n", req, ccb);
1922 * The callout must be drained when detaching since the request is
1923 * about to be freed. The VTSCSI_MTX must not be held for this in
1924 * case the callout is pending because there is a deadlock potential.
1925 * Otherwise, the virtqueue is being drained because of a bus reset
1926 * so we only need to attempt to stop the callouts.
1928 detach = (sc->vtscsi_flags & VTSCSI_FLAG_DETACH) != 0;
1930 VTSCSI_LOCK_NOTOWNED(sc);
1932 VTSCSI_LOCK_OWNED(sc);
1934 if (req->vsr_flags & VTSCSI_REQ_FLAG_TIMEOUT_SET) {
1936 callout_drain(&req->vsr_callout);
1938 callout_stop(&req->vsr_callout);
1944 ccb->ccb_h.status = CAM_NO_HBA;
1946 ccb->ccb_h.status = CAM_REQUEUE_REQ;
1952 vtscsi_enqueue_request(sc, req);
1956 vtscsi_drain_vq(struct vtscsi_softc *sc, struct virtqueue *vq)
1958 struct vtscsi_request *req;
1963 vtscsi_dprintf(sc, VTSCSI_TRACE, "vq=%p\n", vq);
1965 while ((req = virtqueue_drain(vq, &last)) != NULL)
1966 vtscsi_cancel_request(sc, req);
1968 KASSERT(virtqueue_empty(vq), ("virtqueue not empty"));
1972 vtscsi_drain_vqs(struct vtscsi_softc *sc)
1975 if (sc->vtscsi_control_vq != NULL)
1976 vtscsi_drain_vq(sc, sc->vtscsi_control_vq);
1977 if (sc->vtscsi_request_vq != NULL)
1978 vtscsi_drain_vq(sc, sc->vtscsi_request_vq);
1979 if (sc->vtscsi_event_vq != NULL)
1980 vtscsi_drain_event_vq(sc);
1984 vtscsi_stop(struct vtscsi_softc *sc)
1987 vtscsi_disable_vqs_intr(sc);
1988 virtio_stop(sc->vtscsi_dev);
1992 vtscsi_reset_bus(struct vtscsi_softc *sc)
1996 VTSCSI_LOCK_OWNED(sc);
1998 if (vtscsi_bus_reset_disable != 0) {
1999 device_printf(sc->vtscsi_dev, "bus reset disabled\n");
2003 sc->vtscsi_flags |= VTSCSI_FLAG_RESET;
2006 * vtscsi_stop() will cause the in-flight requests to be canceled.
2007 * Those requests are then completed here so CAM will retry them
2008 * after the reset is complete.
2011 vtscsi_complete_vqs_locked(sc);
2013 /* Rid the virtqueues of any remaining requests. */
2014 vtscsi_drain_vqs(sc);
2017 * Any resource shortage that froze the SIMQ cannot persist across
2018 * a bus reset so ensure it gets thawed here.
2020 if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST | VTSCSI_REQUEST_VQ) != 0)
2021 xpt_release_simq(sc->vtscsi_sim, 0);
2023 error = vtscsi_reinit(sc);
2025 device_printf(sc->vtscsi_dev,
2026 "reinitialization failed, stopping device...\n");
2029 vtscsi_announce(sc, AC_BUS_RESET, CAM_TARGET_WILDCARD,
2032 sc->vtscsi_flags &= ~VTSCSI_FLAG_RESET;
2038 vtscsi_init_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
2042 int req_nsegs, resp_nsegs;
2044 req_nsegs = sglist_count(&req->vsr_ureq, sizeof(req->vsr_ureq));
2045 resp_nsegs = sglist_count(&req->vsr_uresp, sizeof(req->vsr_uresp));
2047 KASSERT(req_nsegs == 1, ("request crossed page boundary"));
2048 KASSERT(resp_nsegs == 1, ("response crossed page boundary"));
2051 req->vsr_softc = sc;
2052 callout_init_mtx(&req->vsr_callout, VTSCSI_MTX(sc), 0);
2056 vtscsi_alloc_requests(struct vtscsi_softc *sc)
2058 struct vtscsi_request *req;
2062 * Commands destined for either the request or control queues come
2063 * from the same SIM queue. Use the size of the request virtqueue
2064 * as it (should) be much more frequently used. Some additional
2065 * requests are allocated for internal (TMF) use.
2067 nreqs = virtqueue_size(sc->vtscsi_request_vq);
2068 if ((sc->vtscsi_flags & VTSCSI_FLAG_INDIRECT) == 0)
2069 nreqs /= VTSCSI_MIN_SEGMENTS;
2070 nreqs += VTSCSI_RESERVED_REQUESTS;
2072 for (i = 0; i < nreqs; i++) {
2073 req = malloc(sizeof(struct vtscsi_request), M_DEVBUF,
2078 vtscsi_init_request(sc, req);
2080 sc->vtscsi_nrequests++;
2081 vtscsi_enqueue_request(sc, req);
2088 vtscsi_free_requests(struct vtscsi_softc *sc)
2090 struct vtscsi_request *req;
2092 while ((req = vtscsi_dequeue_request(sc)) != NULL) {
2093 KASSERT(callout_active(&req->vsr_callout) == 0,
2094 ("request callout still active"));
2096 sc->vtscsi_nrequests--;
2097 free(req, M_DEVBUF);
2100 KASSERT(sc->vtscsi_nrequests == 0, ("leaked requests: %d",
2101 sc->vtscsi_nrequests));
2105 vtscsi_enqueue_request(struct vtscsi_softc *sc, struct vtscsi_request *req)
2108 KASSERT(req->vsr_softc == sc,
2109 ("non-matching request vsr_softc %p/%p", req->vsr_softc, sc));
2111 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req);
2113 /* A request is available so the SIMQ could be released. */
2114 if (vtscsi_thaw_simq(sc, VTSCSI_REQUEST) != 0)
2115 xpt_release_simq(sc->vtscsi_sim, 1);
2117 req->vsr_ccb = NULL;
2118 req->vsr_complete = NULL;
2119 req->vsr_ptr0 = NULL;
2120 req->vsr_state = VTSCSI_REQ_STATE_FREE;
2123 bzero(&req->vsr_ureq, sizeof(req->vsr_ureq));
2124 bzero(&req->vsr_uresp, sizeof(req->vsr_uresp));
2127 * We insert at the tail of the queue in order to make it
2128 * very unlikely a request will be reused if we race with
2129 * stopping its callout handler.
2131 TAILQ_INSERT_TAIL(&sc->vtscsi_req_free, req, vsr_link);
2134 static struct vtscsi_request *
2135 vtscsi_dequeue_request(struct vtscsi_softc *sc)
2137 struct vtscsi_request *req;
2139 req = TAILQ_FIRST(&sc->vtscsi_req_free);
2141 req->vsr_state = VTSCSI_REQ_STATE_INUSE;
2142 TAILQ_REMOVE(&sc->vtscsi_req_free, req, vsr_link);
2144 sc->vtscsi_stats.dequeue_no_requests++;
2146 vtscsi_dprintf(sc, VTSCSI_TRACE, "req=%p\n", req);
2152 vtscsi_complete_request(struct vtscsi_request *req)
2155 if (req->vsr_flags & VTSCSI_REQ_FLAG_POLLED)
2156 req->vsr_flags |= VTSCSI_REQ_FLAG_COMPLETE;
2158 if (req->vsr_complete != NULL)
2159 req->vsr_complete(req->vsr_softc, req);
2163 vtscsi_complete_vq(struct vtscsi_softc *sc, struct virtqueue *vq)
2165 struct vtscsi_request *req;
2167 VTSCSI_LOCK_OWNED(sc);
2169 while ((req = virtqueue_dequeue(vq, NULL)) != NULL)
2170 vtscsi_complete_request(req);
2174 vtscsi_control_vq_intr(void *xsc)
2176 struct vtscsi_softc *sc;
2177 struct virtqueue *vq;
2180 vq = sc->vtscsi_control_vq;
2185 vtscsi_complete_vq(sc, sc->vtscsi_control_vq);
2187 if (virtqueue_enable_intr(vq) != 0) {
2188 virtqueue_disable_intr(vq);
2197 vtscsi_event_vq_intr(void *xsc)
2199 struct vtscsi_softc *sc;
2200 struct virtqueue *vq;
2201 struct virtio_scsi_event *event;
2204 vq = sc->vtscsi_event_vq;
2209 while ((event = virtqueue_dequeue(vq, NULL)) != NULL)
2210 vtscsi_handle_event(sc, event);
2212 if (virtqueue_enable_intr(vq) != 0) {
2213 virtqueue_disable_intr(vq);
2222 vtscsi_request_vq_intr(void *xsc)
2224 struct vtscsi_softc *sc;
2225 struct virtqueue *vq;
2228 vq = sc->vtscsi_request_vq;
2233 vtscsi_complete_vq(sc, sc->vtscsi_request_vq);
2235 if (virtqueue_enable_intr(vq) != 0) {
2236 virtqueue_disable_intr(vq);
2245 vtscsi_disable_vqs_intr(struct vtscsi_softc *sc)
2248 virtqueue_disable_intr(sc->vtscsi_control_vq);
2249 virtqueue_disable_intr(sc->vtscsi_event_vq);
2250 virtqueue_disable_intr(sc->vtscsi_request_vq);
2254 vtscsi_enable_vqs_intr(struct vtscsi_softc *sc)
2257 virtqueue_enable_intr(sc->vtscsi_control_vq);
2258 virtqueue_enable_intr(sc->vtscsi_event_vq);
2259 virtqueue_enable_intr(sc->vtscsi_request_vq);
2263 vtscsi_get_tunables(struct vtscsi_softc *sc)
2267 TUNABLE_INT_FETCH("hw.vtscsi.debug_level", &sc->vtscsi_debug);
2269 snprintf(tmpstr, sizeof(tmpstr), "dev.vtscsi.%d.debug_level",
2270 device_get_unit(sc->vtscsi_dev));
2271 TUNABLE_INT_FETCH(tmpstr, &sc->vtscsi_debug);
2275 vtscsi_add_sysctl(struct vtscsi_softc *sc)
2278 struct vtscsi_statistics *stats;
2279 struct sysctl_ctx_list *ctx;
2280 struct sysctl_oid *tree;
2281 struct sysctl_oid_list *child;
2283 dev = sc->vtscsi_dev;
2284 stats = &sc->vtscsi_stats;
2285 ctx = device_get_sysctl_ctx(dev);
2286 tree = device_get_sysctl_tree(dev);
2287 child = SYSCTL_CHILDREN(tree);
2289 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "debug_level",
2290 CTLFLAG_RW, &sc->vtscsi_debug, 0,
2293 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "scsi_cmd_timeouts",
2294 CTLFLAG_RD, &stats->scsi_cmd_timeouts,
2295 "SCSI command timeouts");
2296 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dequeue_no_requests",
2297 CTLFLAG_RD, &stats->dequeue_no_requests,
2298 "No available requests to dequeue");
2302 vtscsi_printf_req(struct vtscsi_request *req, const char *func,
2303 const char *fmt, ...)
2305 struct vtscsi_softc *sc;
2315 sc = req->vsr_softc;
2319 sbuf_new(&sb, str, sizeof(str), 0);
2322 sbuf_printf(&sb, "(noperiph:%s%d:%u): ",
2323 cam_sim_name(sc->vtscsi_sim), cam_sim_unit(sc->vtscsi_sim),
2324 cam_sim_bus(sc->vtscsi_sim));
2326 xpt_path_string(ccb->ccb_h.path, path_str, sizeof(path_str));
2327 sbuf_cat(&sb, path_str);
2328 if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
2329 scsi_command_string(&ccb->csio, &sb);
2330 sbuf_printf(&sb, "length %d ", ccb->csio.dxfer_len);
2334 sbuf_vprintf(&sb, fmt, ap);
2338 printf("%s: %s: %s", device_get_nameunit(sc->vtscsi_dev), func,