2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2009-2012,2016-2017 Microsoft Corp.
5 * Copyright (c) 2012 NetApp Inc.
6 * Copyright (c) 2012 Citrix Inc.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice unmodified, this list of conditions, and the following
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * StorVSC driver for Hyper-V. This driver presents a SCSI HBA interface
33 * to the Comman Access Method (CAM) layer. CAM control blocks (CCBs) are
34 * converted into VSCSI protocol messages which are delivered to the parent
35 * partition StorVSP driver over the Hyper-V VMBUS.
37 #include <sys/cdefs.h>
38 __FBSDID("$FreeBSD$");
40 #include <sys/param.h>
42 #include <sys/condvar.h>
44 #include <sys/systm.h>
45 #include <sys/sysctl.h>
46 #include <sys/sockio.h>
48 #include <sys/malloc.h>
49 #include <sys/module.h>
50 #include <sys/kernel.h>
51 #include <sys/queue.h>
54 #include <sys/taskqueue.h>
56 #include <sys/mutex.h>
57 #include <sys/callout.h>
64 #include <sys/sglist.h>
65 #include <sys/eventhandler.h>
66 #include <machine/bus.h>
69 #include <cam/cam_ccb.h>
70 #include <cam/cam_periph.h>
71 #include <cam/cam_sim.h>
72 #include <cam/cam_xpt_sim.h>
73 #include <cam/cam_xpt_internal.h>
74 #include <cam/cam_debug.h>
75 #include <cam/scsi/scsi_all.h>
76 #include <cam/scsi/scsi_message.h>
78 #include <dev/hyperv/include/hyperv.h>
79 #include <dev/hyperv/include/vmbus.h>
80 #include "hv_vstorage.h"
83 #define STORVSC_MAX_LUNS_PER_TARGET (64)
84 #define STORVSC_MAX_IO_REQUESTS (STORVSC_MAX_LUNS_PER_TARGET * 2)
85 #define BLKVSC_MAX_IDE_DISKS_PER_TARGET (1)
86 #define BLKVSC_MAX_IO_REQUESTS STORVSC_MAX_IO_REQUESTS
87 #define STORVSC_MAX_TARGETS (2)
89 #define VSTOR_PKT_SIZE (sizeof(struct vstor_packet) - vmscsi_size_delta)
92 * 33 segments are needed to allow 128KB maxio, in case the data
93 * in the first page is _not_ PAGE_SIZE aligned, e.g.
95 * |<----------- 128KB ----------->|
97 * 0 2K 4K 8K 16K 124K 128K 130K
99 * +--+--+-----+-----+.......+-----+--+--+
100 * | | | | | | | | | DATA
102 * +--+--+-----+-----+.......------+--+--+
104 * | 1| 31 | 1| ...... # of segments
106 #define STORVSC_DATA_SEGCNT_MAX 33
107 #define STORVSC_DATA_SEGSZ_MAX PAGE_SIZE
108 #define STORVSC_DATA_SIZE_MAX \
109 ((STORVSC_DATA_SEGCNT_MAX - 1) * STORVSC_DATA_SEGSZ_MAX)
111 struct storvsc_softc;
114 LIST_ENTRY(hv_sgl_node) link;
115 struct sglist *sgl_data;
118 struct hv_sgl_page_pool{
119 LIST_HEAD(, hv_sgl_node) in_use_sgl_list;
120 LIST_HEAD(, hv_sgl_node) free_sgl_list;
122 } g_hv_sgl_page_pool;
124 enum storvsc_request_type {
130 SYSCTL_NODE(_hw, OID_AUTO, storvsc, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
131 "Hyper-V storage interface");
133 static u_int hv_storvsc_use_win8ext_flags = 1;
134 SYSCTL_UINT(_hw_storvsc, OID_AUTO, use_win8ext_flags, CTLFLAG_RW,
135 &hv_storvsc_use_win8ext_flags, 0,
136 "Use win8 extension flags or not");
138 static u_int hv_storvsc_use_pim_unmapped = 1;
139 SYSCTL_UINT(_hw_storvsc, OID_AUTO, use_pim_unmapped, CTLFLAG_RDTUN,
140 &hv_storvsc_use_pim_unmapped, 0,
141 "Optimize storvsc by using unmapped I/O");
143 static u_int hv_storvsc_ringbuffer_size = (64 * PAGE_SIZE);
144 SYSCTL_UINT(_hw_storvsc, OID_AUTO, ringbuffer_size, CTLFLAG_RDTUN,
145 &hv_storvsc_ringbuffer_size, 0, "Hyper-V storage ringbuffer size");
147 static u_int hv_storvsc_max_io = 512;
148 SYSCTL_UINT(_hw_storvsc, OID_AUTO, max_io, CTLFLAG_RDTUN,
149 &hv_storvsc_max_io, 0, "Hyper-V storage max io limit");
151 static int hv_storvsc_chan_cnt = 0;
152 SYSCTL_INT(_hw_storvsc, OID_AUTO, chan_cnt, CTLFLAG_RDTUN,
153 &hv_storvsc_chan_cnt, 0, "# of channels to use");
155 static int hv_storvsc_srb_status = -1;
156 SYSCTL_INT(_hw_storvsc, OID_AUTO, srb_status, CTLFLAG_RW,
157 &hv_storvsc_srb_status, 0, "srb_status to inject");
158 TUNABLE_INT("hw_storvsc.srb_status", &hv_storvsc_srb_status);
159 #endif /* DIAGNOSTIC */
161 #define STORVSC_MAX_IO \
162 vmbus_chan_prplist_nelem(hv_storvsc_ringbuffer_size, \
163 STORVSC_DATA_SEGCNT_MAX, VSTOR_PKT_SIZE)
165 struct hv_storvsc_sysctl {
167 u_long data_vaddr_cnt;
169 u_long chan_send_cnt[MAXCPU];
172 struct storvsc_gpa_range {
173 struct vmbus_gpa_range gpa_range;
174 uint64_t gpa_page[STORVSC_DATA_SEGCNT_MAX];
177 struct hv_storvsc_request {
178 LIST_ENTRY(hv_storvsc_request) link;
179 struct vstor_packet vstor_packet;
181 struct storvsc_gpa_range prp_list;
183 uint8_t sense_info_len;
186 struct storvsc_softc *softc;
187 struct callout callout;
188 struct sema synch_sema; /*Synchronize the request/response if needed */
189 struct sglist *bounce_sgl;
190 unsigned int bounce_sgl_count;
191 uint64_t not_aligned_seg_bits;
192 bus_dmamap_t data_dmap;
195 struct storvsc_softc {
196 struct vmbus_channel *hs_chan;
197 LIST_HEAD(, hv_storvsc_request) hs_free_list;
199 struct storvsc_driver_props *hs_drv_props;
202 struct cam_sim *hs_sim;
203 struct cam_path *hs_path;
204 uint32_t hs_num_out_reqs;
205 boolean_t hs_destroy;
206 boolean_t hs_drain_notify;
207 struct sema hs_drain_sema;
208 struct hv_storvsc_request hs_init_req;
209 struct hv_storvsc_request hs_reset_req;
211 bus_dma_tag_t storvsc_req_dtag;
212 struct hv_storvsc_sysctl sysctl_data;
214 struct vmbus_channel *hs_sel_chan[MAXCPU];
217 static eventhandler_tag storvsc_handler_tag;
219 * The size of the vmscsi_request has changed in win8. The
220 * additional size is for the newly added elements in the
221 * structure. These elements are valid only when we are talking
223 * Track the correct size we need to apply.
225 static int vmscsi_size_delta = sizeof(struct vmscsi_win8_extension);
228 * HyperV storvsc timeout testing cases:
229 * a. IO returned after first timeout;
230 * b. IO returned after second timeout and queue freeze;
231 * c. IO returned while timer handler is running
232 * The first can be tested by "sg_senddiag -vv /dev/daX",
233 * and the second and third can be done by
234 * "sg_wr_mode -v -p 08 -c 0,1a -m 0,ff /dev/daX".
236 #define HVS_TIMEOUT_TEST 0
239 * Bus/adapter reset functionality on the Hyper-V host is
240 * buggy and it will be disabled until
241 * it can be further tested.
243 #define HVS_HOST_RESET 0
245 struct storvsc_driver_props {
248 uint8_t drv_max_luns_per_target;
249 uint32_t drv_max_ios_per_target;
250 uint32_t drv_ringbuffer_size;
253 enum hv_storage_type {
259 #define HS_MAX_ADAPTERS 10
261 #define HV_STORAGE_SUPPORTS_MULTI_CHANNEL 0x1
263 /* {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} */
264 static const struct hyperv_guid gStorVscDeviceType={
265 .hv_guid = {0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d,
266 0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f}
269 /* {32412632-86cb-44a2-9b5c-50d1417354f5} */
270 static const struct hyperv_guid gBlkVscDeviceType={
271 .hv_guid = {0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44,
272 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5}
275 static struct storvsc_driver_props g_drv_props_table[] = {
276 {"blkvsc", "Hyper-V IDE",
277 BLKVSC_MAX_IDE_DISKS_PER_TARGET, BLKVSC_MAX_IO_REQUESTS,
279 {"storvsc", "Hyper-V SCSI",
280 STORVSC_MAX_LUNS_PER_TARGET, STORVSC_MAX_IO_REQUESTS,
285 * Sense buffer size changed in win8; have a run-time
286 * variable to track the size we should use.
288 static int sense_buffer_size = PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE;
291 * The storage protocol version is determined during the
292 * initial exchange with the host. It will indicate which
293 * storage functionality is available in the host.
295 static int vmstor_proto_version;
297 struct vmstor_proto {
299 int sense_buffer_size;
300 int vmscsi_size_delta;
303 static const struct vmstor_proto vmstor_proto_list[] = {
305 VMSTOR_PROTOCOL_VERSION_WIN10,
306 POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
310 VMSTOR_PROTOCOL_VERSION_WIN8_1,
311 POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
315 VMSTOR_PROTOCOL_VERSION_WIN8,
316 POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
320 VMSTOR_PROTOCOL_VERSION_WIN7,
321 PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE,
322 sizeof(struct vmscsi_win8_extension),
325 VMSTOR_PROTOCOL_VERSION_WIN6,
326 PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE,
327 sizeof(struct vmscsi_win8_extension),
331 /* static functions */
332 static int storvsc_probe(device_t dev);
333 static int storvsc_attach(device_t dev);
334 static int storvsc_detach(device_t dev);
335 static void storvsc_poll(struct cam_sim * sim);
336 static void storvsc_action(struct cam_sim * sim, union ccb * ccb);
337 static int create_storvsc_request(union ccb *ccb, struct hv_storvsc_request *reqp);
338 static void storvsc_free_request(struct storvsc_softc *sc, struct hv_storvsc_request *reqp);
339 static enum hv_storage_type storvsc_get_storage_type(device_t dev);
340 static void hv_storvsc_rescan_target(struct storvsc_softc *sc);
341 static void hv_storvsc_on_channel_callback(struct vmbus_channel *chan, void *xsc);
342 static void hv_storvsc_on_iocompletion( struct storvsc_softc *sc,
343 struct vstor_packet *vstor_packet,
344 struct hv_storvsc_request *request);
345 static int hv_storvsc_connect_vsp(struct storvsc_softc *);
346 static void storvsc_io_done(struct hv_storvsc_request *reqp);
347 static void storvsc_copy_sgl_to_bounce_buf(struct sglist *bounce_sgl,
348 bus_dma_segment_t *orig_sgl,
349 unsigned int orig_sgl_count,
351 void storvsc_copy_from_bounce_buf_to_sgl(bus_dma_segment_t *dest_sgl,
352 unsigned int dest_sgl_count,
353 struct sglist* src_sgl,
356 static device_method_t storvsc_methods[] = {
357 /* Device interface */
358 DEVMETHOD(device_probe, storvsc_probe),
359 DEVMETHOD(device_attach, storvsc_attach),
360 DEVMETHOD(device_detach, storvsc_detach),
361 DEVMETHOD(device_shutdown, bus_generic_shutdown),
365 static driver_t storvsc_driver = {
366 "storvsc", storvsc_methods, sizeof(struct storvsc_softc),
369 static devclass_t storvsc_devclass;
370 DRIVER_MODULE(storvsc, vmbus, storvsc_driver, storvsc_devclass, 0, 0);
371 MODULE_VERSION(storvsc, 1);
372 MODULE_DEPEND(storvsc, vmbus, 1, 1, 1);
375 storvsc_subchan_attach(struct storvsc_softc *sc,
376 struct vmbus_channel *new_channel)
378 struct vmstor_chan_props props;
381 memset(&props, 0, sizeof(props));
383 vmbus_chan_cpu_rr(new_channel);
384 ret = vmbus_chan_open(new_channel,
385 sc->hs_drv_props->drv_ringbuffer_size,
386 sc->hs_drv_props->drv_ringbuffer_size,
388 sizeof(struct vmstor_chan_props),
389 hv_storvsc_on_channel_callback, sc);
393 * @brief Send multi-channel creation request to host
395 * @param device a Hyper-V device pointer
396 * @param max_chans the max channels supported by vmbus
399 storvsc_send_multichannel_request(struct storvsc_softc *sc, int max_subch)
401 struct vmbus_channel **subchan;
402 struct hv_storvsc_request *request;
403 struct vstor_packet *vstor_packet;
407 /* get sub-channel count that need to create */
408 request_subch = MIN(max_subch, mp_ncpus - 1);
410 request = &sc->hs_init_req;
412 /* request the host to create multi-channel */
413 memset(request, 0, sizeof(struct hv_storvsc_request));
415 sema_init(&request->synch_sema, 0, ("stor_synch_sema"));
417 vstor_packet = &request->vstor_packet;
419 vstor_packet->operation = VSTOR_OPERATION_CREATE_MULTI_CHANNELS;
420 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
421 vstor_packet->u.multi_channels_cnt = request_subch;
423 ret = vmbus_chan_send(sc->hs_chan,
424 VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC,
425 vstor_packet, VSTOR_PKT_SIZE, (uint64_t)(uintptr_t)request);
427 sema_wait(&request->synch_sema);
429 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
430 vstor_packet->status != 0) {
431 printf("Storvsc_error: create multi-channel invalid operation "
432 "(%d) or statue (%u)\n",
433 vstor_packet->operation, vstor_packet->status);
437 /* Update channel count */
438 sc->hs_nchan = request_subch + 1;
440 /* Wait for sub-channels setup to complete. */
441 subchan = vmbus_subchan_get(sc->hs_chan, request_subch);
443 /* Attach the sub-channels. */
444 for (i = 0; i < request_subch; ++i)
445 storvsc_subchan_attach(sc, subchan[i]);
447 /* Release the sub-channels. */
448 vmbus_subchan_rel(subchan, request_subch);
451 printf("Storvsc create multi-channel success!\n");
455 * @brief initialize channel connection to parent partition
457 * @param dev a Hyper-V device pointer
458 * @returns 0 on success, non-zero error on failure
461 hv_storvsc_channel_init(struct storvsc_softc *sc)
464 struct hv_storvsc_request *request;
465 struct vstor_packet *vstor_packet;
467 boolean_t support_multichannel;
471 support_multichannel = FALSE;
473 request = &sc->hs_init_req;
474 memset(request, 0, sizeof(struct hv_storvsc_request));
475 vstor_packet = &request->vstor_packet;
479 * Initiate the vsc/vsp initialization protocol on the open channel
481 sema_init(&request->synch_sema, 0, ("stor_synch_sema"));
483 vstor_packet->operation = VSTOR_OPERATION_BEGININITIALIZATION;
484 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
487 ret = vmbus_chan_send(sc->hs_chan,
488 VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC,
489 vstor_packet, VSTOR_PKT_SIZE, (uint64_t)(uintptr_t)request);
494 sema_wait(&request->synch_sema);
496 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
497 vstor_packet->status != 0) {
501 for (i = 0; i < nitems(vmstor_proto_list); i++) {
502 /* reuse the packet for version range supported */
504 memset(vstor_packet, 0, sizeof(struct vstor_packet));
505 vstor_packet->operation = VSTOR_OPERATION_QUERYPROTOCOLVERSION;
506 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
508 vstor_packet->u.version.major_minor =
509 vmstor_proto_list[i].proto_version;
511 /* revision is only significant for Windows guests */
512 vstor_packet->u.version.revision = 0;
514 ret = vmbus_chan_send(sc->hs_chan,
515 VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC,
516 vstor_packet, VSTOR_PKT_SIZE, (uint64_t)(uintptr_t)request);
521 sema_wait(&request->synch_sema);
523 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO) {
527 if (vstor_packet->status == 0) {
528 vmstor_proto_version =
529 vmstor_proto_list[i].proto_version;
531 vmstor_proto_list[i].sense_buffer_size;
533 vmstor_proto_list[i].vmscsi_size_delta;
538 if (vstor_packet->status != 0) {
543 * Query channel properties
545 memset(vstor_packet, 0, sizeof(struct vstor_packet));
546 vstor_packet->operation = VSTOR_OPERATION_QUERYPROPERTIES;
547 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
549 ret = vmbus_chan_send(sc->hs_chan,
550 VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC,
551 vstor_packet, VSTOR_PKT_SIZE, (uint64_t)(uintptr_t)request);
556 sema_wait(&request->synch_sema);
558 /* TODO: Check returned version */
559 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
560 vstor_packet->status != 0) {
564 max_subch = vstor_packet->u.chan_props.max_channel_cnt;
565 if (hv_storvsc_chan_cnt > 0 && hv_storvsc_chan_cnt < (max_subch + 1))
566 max_subch = hv_storvsc_chan_cnt - 1;
568 /* multi-channels feature is supported by WIN8 and above version */
569 version = VMBUS_GET_VERSION(device_get_parent(sc->hs_dev), sc->hs_dev);
570 if (version != VMBUS_VERSION_WIN7 && version != VMBUS_VERSION_WS2008 &&
571 (vstor_packet->u.chan_props.flags &
572 HV_STORAGE_SUPPORTS_MULTI_CHANNEL)) {
573 support_multichannel = TRUE;
576 device_printf(sc->hs_dev, "max chans %d%s\n", max_subch + 1,
577 support_multichannel ? ", multi-chan capable" : "");
580 memset(vstor_packet, 0, sizeof(struct vstor_packet));
581 vstor_packet->operation = VSTOR_OPERATION_ENDINITIALIZATION;
582 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
584 ret = vmbus_chan_send(sc->hs_chan,
585 VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC,
586 vstor_packet, VSTOR_PKT_SIZE, (uint64_t)(uintptr_t)request);
592 sema_wait(&request->synch_sema);
594 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
595 vstor_packet->status != 0)
599 * If multi-channel is supported, send multichannel create
602 if (support_multichannel && max_subch > 0)
603 storvsc_send_multichannel_request(sc, max_subch);
605 sema_destroy(&request->synch_sema);
610 * @brief Open channel connection to paraent partition StorVSP driver
612 * Open and initialize channel connection to parent partition StorVSP driver.
614 * @param pointer to a Hyper-V device
615 * @returns 0 on success, non-zero error on failure
618 hv_storvsc_connect_vsp(struct storvsc_softc *sc)
621 struct vmstor_chan_props props;
623 memset(&props, 0, sizeof(struct vmstor_chan_props));
628 vmbus_chan_cpu_rr(sc->hs_chan);
629 ret = vmbus_chan_open(
631 sc->hs_drv_props->drv_ringbuffer_size,
632 sc->hs_drv_props->drv_ringbuffer_size,
634 sizeof(struct vmstor_chan_props),
635 hv_storvsc_on_channel_callback, sc);
641 ret = hv_storvsc_channel_init(sc);
647 hv_storvsc_host_reset(struct storvsc_softc *sc)
651 struct hv_storvsc_request *request;
652 struct vstor_packet *vstor_packet;
654 request = &sc->hs_reset_req;
656 vstor_packet = &request->vstor_packet;
658 sema_init(&request->synch_sema, 0, "stor synch sema");
660 vstor_packet->operation = VSTOR_OPERATION_RESETBUS;
661 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
663 ret = vmbus_chan_send(dev->channel,
664 VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC,
665 vstor_packet, VSTOR_PKT_SIZE,
666 (uint64_t)(uintptr_t)&sc->hs_reset_req);
672 sema_wait(&request->synch_sema);
675 * At this point, all outstanding requests in the adapter
676 * should have been flushed out and return to us
680 sema_destroy(&request->synch_sema);
683 #endif /* HVS_HOST_RESET */
686 * @brief Function to initiate an I/O request
688 * @param device Hyper-V device pointer
689 * @param request pointer to a request structure
690 * @returns 0 on success, non-zero error on failure
693 hv_storvsc_io_request(struct storvsc_softc *sc,
694 struct hv_storvsc_request *request)
696 struct vstor_packet *vstor_packet = &request->vstor_packet;
697 struct vmbus_channel* outgoing_channel = NULL;
700 vstor_packet->flags |= REQUEST_COMPLETION_FLAG;
702 vstor_packet->u.vm_srb.length =
703 sizeof(struct vmscsi_req) - vmscsi_size_delta;
705 vstor_packet->u.vm_srb.sense_info_len = sense_buffer_size;
707 vstor_packet->u.vm_srb.transfer_len =
708 request->prp_list.gpa_range.gpa_len;
710 vstor_packet->operation = VSTOR_OPERATION_EXECUTESRB;
712 ch_sel = (vstor_packet->u.vm_srb.lun + curcpu) % sc->hs_nchan;
714 * If we are panic'ing, then we are dumping core. Since storvsc_polls
715 * always uses sc->hs_chan, then we must send to that channel or a poll
716 * timeout will occur.
719 outgoing_channel = sc->hs_chan;
721 outgoing_channel = sc->hs_sel_chan[ch_sel];
724 mtx_unlock(&request->softc->hs_lock);
725 if (request->prp_list.gpa_range.gpa_len) {
726 ret = vmbus_chan_send_prplist(outgoing_channel,
727 &request->prp_list.gpa_range, request->prp_cnt,
728 vstor_packet, VSTOR_PKT_SIZE, (uint64_t)(uintptr_t)request);
730 ret = vmbus_chan_send(outgoing_channel,
731 VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC,
732 vstor_packet, VSTOR_PKT_SIZE, (uint64_t)(uintptr_t)request);
734 /* statistic for successful request sending on each channel */
736 sc->sysctl_data.chan_send_cnt[ch_sel]++;
738 mtx_lock(&request->softc->hs_lock);
741 printf("Unable to send packet %p ret %d", vstor_packet, ret);
743 atomic_add_int(&sc->hs_num_out_reqs, 1);
751 * Process IO_COMPLETION_OPERATION and ready
752 * the result to be completed for upper layer
753 * processing by the CAM layer.
756 hv_storvsc_on_iocompletion(struct storvsc_softc *sc,
757 struct vstor_packet *vstor_packet,
758 struct hv_storvsc_request *request)
760 struct vmscsi_req *vm_srb;
762 vm_srb = &vstor_packet->u.vm_srb;
765 * Copy some fields of the host's response into the request structure,
766 * because the fields will be used later in storvsc_io_done().
768 request->vstor_packet.u.vm_srb.scsi_status = vm_srb->scsi_status;
769 request->vstor_packet.u.vm_srb.srb_status = vm_srb->srb_status;
770 request->vstor_packet.u.vm_srb.transfer_len = vm_srb->transfer_len;
772 if (((vm_srb->scsi_status & 0xFF) == SCSI_STATUS_CHECK_COND) &&
773 (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID)) {
774 /* Autosense data available */
776 KASSERT(vm_srb->sense_info_len <= request->sense_info_len,
777 ("vm_srb->sense_info_len <= "
778 "request->sense_info_len"));
780 memcpy(request->sense_data, vm_srb->u.sense_data,
781 vm_srb->sense_info_len);
783 request->sense_info_len = vm_srb->sense_info_len;
786 /* Complete request by passing to the CAM layer */
787 storvsc_io_done(request);
788 atomic_subtract_int(&sc->hs_num_out_reqs, 1);
789 if (sc->hs_drain_notify && (sc->hs_num_out_reqs == 0)) {
790 sema_post(&sc->hs_drain_sema);
795 hv_storvsc_rescan_target(struct storvsc_softc *sc)
798 target_id_t targetid;
801 pathid = cam_sim_path(sc->hs_sim);
802 targetid = CAM_TARGET_WILDCARD;
805 * Allocate a CCB and schedule a rescan.
807 ccb = xpt_alloc_ccb_nowait();
809 printf("unable to alloc CCB for rescan\n");
813 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
814 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
815 printf("unable to create path for rescan, pathid: %u,"
816 "targetid: %u\n", pathid, targetid);
821 if (targetid == CAM_TARGET_WILDCARD)
822 ccb->ccb_h.func_code = XPT_SCAN_BUS;
824 ccb->ccb_h.func_code = XPT_SCAN_TGT;
830 hv_storvsc_on_channel_callback(struct vmbus_channel *channel, void *xsc)
833 struct storvsc_softc *sc = xsc;
834 uint32_t bytes_recvd;
836 uint8_t packet[roundup2(sizeof(struct vstor_packet), 8)];
837 struct hv_storvsc_request *request;
838 struct vstor_packet *vstor_packet;
840 bytes_recvd = roundup2(VSTOR_PKT_SIZE, 8);
841 ret = vmbus_chan_recv(channel, packet, &bytes_recvd, &request_id);
842 KASSERT(ret != ENOBUFS, ("storvsc recvbuf is not large enough"));
843 /* XXX check bytes_recvd to make sure that it contains enough data */
845 while ((ret == 0) && (bytes_recvd > 0)) {
846 request = (struct hv_storvsc_request *)(uintptr_t)request_id;
848 if ((request == &sc->hs_init_req) ||
849 (request == &sc->hs_reset_req)) {
850 memcpy(&request->vstor_packet, packet,
851 sizeof(struct vstor_packet));
852 sema_post(&request->synch_sema);
854 vstor_packet = (struct vstor_packet *)packet;
855 switch(vstor_packet->operation) {
856 case VSTOR_OPERATION_COMPLETEIO:
858 panic("VMBUS: storvsc received a "
859 "packet with NULL request id in "
860 "COMPLETEIO operation.");
862 hv_storvsc_on_iocompletion(sc,
863 vstor_packet, request);
865 case VSTOR_OPERATION_REMOVEDEVICE:
866 printf("VMBUS: storvsc operation %d not "
867 "implemented.\n", vstor_packet->operation);
868 /* TODO: implement */
870 case VSTOR_OPERATION_ENUMERATE_BUS:
871 hv_storvsc_rescan_target(sc);
878 bytes_recvd = roundup2(VSTOR_PKT_SIZE, 8),
879 ret = vmbus_chan_recv(channel, packet, &bytes_recvd,
881 KASSERT(ret != ENOBUFS,
882 ("storvsc recvbuf is not large enough"));
884 * XXX check bytes_recvd to make sure that it contains
891 * @brief StorVSC probe function
893 * Device probe function. Returns 0 if the input device is a StorVSC
894 * device. Otherwise, a ENXIO is returned. If the input device is
895 * for BlkVSC (paravirtual IDE) device and this support is disabled in
896 * favor of the emulated ATA/IDE device, return ENXIO.
899 * @returns 0 on success, ENXIO if not a matcing StorVSC device
902 storvsc_probe(device_t dev)
906 switch (storvsc_get_storage_type(dev)) {
910 "Enlightened ATA/IDE detected\n");
911 device_set_desc(dev, g_drv_props_table[DRIVER_BLKVSC].drv_desc);
912 ret = BUS_PROBE_DEFAULT;
916 device_printf(dev, "Enlightened SCSI device detected\n");
917 device_set_desc(dev, g_drv_props_table[DRIVER_STORVSC].drv_desc);
918 ret = BUS_PROBE_DEFAULT;
927 storvsc_create_chan_sel(struct storvsc_softc *sc)
929 struct vmbus_channel **subch;
932 sc->hs_sel_chan[0] = sc->hs_chan;
933 nsubch = sc->hs_nchan - 1;
937 subch = vmbus_subchan_get(sc->hs_chan, nsubch);
938 for (i = 0; i < nsubch; i++)
939 sc->hs_sel_chan[i + 1] = subch[i];
940 vmbus_subchan_rel(subch, nsubch);
944 storvsc_init_requests(device_t dev)
946 struct storvsc_softc *sc = device_get_softc(dev);
947 struct hv_storvsc_request *reqp;
950 LIST_INIT(&sc->hs_free_list);
952 error = bus_dma_tag_create(
953 bus_get_dma_tag(dev), /* parent */
955 PAGE_SIZE, /* boundary */
956 BUS_SPACE_MAXADDR, /* lowaddr */
957 BUS_SPACE_MAXADDR, /* highaddr */
958 NULL, NULL, /* filter, filterarg */
959 STORVSC_DATA_SIZE_MAX, /* maxsize */
960 STORVSC_DATA_SEGCNT_MAX, /* nsegments */
961 STORVSC_DATA_SEGSZ_MAX, /* maxsegsize */
964 NULL, /* lockfuncarg */
965 &sc->storvsc_req_dtag);
967 device_printf(dev, "failed to create storvsc dma tag\n");
971 for (i = 0; i < sc->hs_drv_props->drv_max_ios_per_target; ++i) {
972 reqp = malloc(sizeof(struct hv_storvsc_request),
973 M_DEVBUF, M_WAITOK|M_ZERO);
975 error = bus_dmamap_create(sc->storvsc_req_dtag, 0,
978 device_printf(dev, "failed to allocate storvsc "
982 LIST_INSERT_HEAD(&sc->hs_free_list, reqp, link);
987 while ((reqp = LIST_FIRST(&sc->hs_free_list)) != NULL) {
988 LIST_REMOVE(reqp, link);
989 bus_dmamap_destroy(sc->storvsc_req_dtag, reqp->data_dmap);
990 free(reqp, M_DEVBUF);
996 storvsc_sysctl(device_t dev)
998 struct sysctl_oid_list *child;
999 struct sysctl_ctx_list *ctx;
1000 struct sysctl_oid *ch_tree, *chid_tree;
1001 struct storvsc_softc *sc;
1005 sc = device_get_softc(dev);
1006 ctx = device_get_sysctl_ctx(dev);
1007 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
1009 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "data_bio_cnt", CTLFLAG_RW,
1010 &sc->sysctl_data.data_bio_cnt, "# of bio data block");
1011 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "data_vaddr_cnt", CTLFLAG_RW,
1012 &sc->sysctl_data.data_vaddr_cnt, "# of vaddr data block");
1013 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "data_sg_cnt", CTLFLAG_RW,
1014 &sc->sysctl_data.data_sg_cnt, "# of sg data block");
1016 /* dev.storvsc.UNIT.channel */
1017 ch_tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "channel",
1018 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
1019 if (ch_tree == NULL)
1022 for (i = 0; i < sc->hs_nchan; i++) {
1025 ch_id = vmbus_chan_id(sc->hs_sel_chan[i]);
1026 snprintf(name, sizeof(name), "%d", ch_id);
1027 /* dev.storvsc.UNIT.channel.CHID */
1028 chid_tree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(ch_tree),
1029 OID_AUTO, name, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
1030 if (chid_tree == NULL)
1032 /* dev.storvsc.UNIT.channel.CHID.send_req */
1033 SYSCTL_ADD_ULONG(ctx, SYSCTL_CHILDREN(chid_tree), OID_AUTO,
1034 "send_req", CTLFLAG_RD, &sc->sysctl_data.chan_send_cnt[i],
1035 "# of request sending from this channel");
1040 * @brief StorVSC attach function
1042 * Function responsible for allocating per-device structures,
1043 * setting up CAM interfaces and scanning for available LUNs to
1044 * be used for SCSI device peripherals.
1047 * @returns 0 on success or an error on failure
1050 storvsc_attach(device_t dev)
1052 enum hv_storage_type stor_type;
1053 struct storvsc_softc *sc;
1054 struct cam_devq *devq;
1056 struct hv_storvsc_request *reqp;
1057 struct root_hold_token *root_mount_token = NULL;
1058 struct hv_sgl_node *sgl_node = NULL;
1059 void *tmp_buff = NULL;
1062 * We need to serialize storvsc attach calls.
1064 root_mount_token = root_mount_hold("storvsc");
1066 sc = device_get_softc(dev);
1068 sc->hs_chan = vmbus_get_channel(dev);
1070 stor_type = storvsc_get_storage_type(dev);
1072 if (stor_type == DRIVER_UNKNOWN) {
1077 /* fill in driver specific properties */
1078 sc->hs_drv_props = &g_drv_props_table[stor_type];
1079 sc->hs_drv_props->drv_ringbuffer_size = hv_storvsc_ringbuffer_size;
1080 sc->hs_drv_props->drv_max_ios_per_target =
1081 MIN(STORVSC_MAX_IO, hv_storvsc_max_io);
1083 printf("storvsc ringbuffer size: %d, max_io: %d\n",
1084 sc->hs_drv_props->drv_ringbuffer_size,
1085 sc->hs_drv_props->drv_max_ios_per_target);
1087 /* fill in device specific properties */
1088 sc->hs_unit = device_get_unit(dev);
1091 mtx_init(&sc->hs_lock, "hvslck", NULL, MTX_DEF);
1093 ret = storvsc_init_requests(dev);
1097 /* create sg-list page pool */
1098 if (FALSE == g_hv_sgl_page_pool.is_init) {
1099 g_hv_sgl_page_pool.is_init = TRUE;
1100 LIST_INIT(&g_hv_sgl_page_pool.in_use_sgl_list);
1101 LIST_INIT(&g_hv_sgl_page_pool.free_sgl_list);
1104 * Pre-create SG list, each SG list with
1105 * STORVSC_DATA_SEGCNT_MAX segments, each
1106 * segment has one page buffer
1108 for (i = 0; i < sc->hs_drv_props->drv_max_ios_per_target; i++) {
1109 sgl_node = malloc(sizeof(struct hv_sgl_node),
1110 M_DEVBUF, M_WAITOK|M_ZERO);
1112 sgl_node->sgl_data =
1113 sglist_alloc(STORVSC_DATA_SEGCNT_MAX,
1116 for (j = 0; j < STORVSC_DATA_SEGCNT_MAX; j++) {
1117 tmp_buff = malloc(PAGE_SIZE,
1118 M_DEVBUF, M_WAITOK|M_ZERO);
1120 sgl_node->sgl_data->sg_segs[j].ss_paddr =
1121 (vm_paddr_t)tmp_buff;
1124 LIST_INSERT_HEAD(&g_hv_sgl_page_pool.free_sgl_list,
1129 sc->hs_destroy = FALSE;
1130 sc->hs_drain_notify = FALSE;
1131 sema_init(&sc->hs_drain_sema, 0, "Store Drain Sema");
1133 ret = hv_storvsc_connect_vsp(sc);
1138 /* Construct cpu to channel mapping */
1139 storvsc_create_chan_sel(sc);
1142 * Create the device queue.
1143 * Hyper-V maps each target to one SCSI HBA
1145 devq = cam_simq_alloc(sc->hs_drv_props->drv_max_ios_per_target);
1147 device_printf(dev, "Failed to alloc device queue\n");
1152 sc->hs_sim = cam_sim_alloc(storvsc_action,
1154 sc->hs_drv_props->drv_name,
1158 sc->hs_drv_props->drv_max_ios_per_target,
1161 if (sc->hs_sim == NULL) {
1162 device_printf(dev, "Failed to alloc sim\n");
1163 cam_simq_free(devq);
1168 mtx_lock(&sc->hs_lock);
1169 /* bus_id is set to 0, need to get it from VMBUS channel query? */
1170 if (xpt_bus_register(sc->hs_sim, dev, 0) != CAM_SUCCESS) {
1171 cam_sim_free(sc->hs_sim, /*free_devq*/TRUE);
1172 mtx_unlock(&sc->hs_lock);
1173 device_printf(dev, "Unable to register SCSI bus\n");
1178 if (xpt_create_path(&sc->hs_path, /*periph*/NULL,
1179 cam_sim_path(sc->hs_sim),
1180 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1181 xpt_bus_deregister(cam_sim_path(sc->hs_sim));
1182 cam_sim_free(sc->hs_sim, /*free_devq*/TRUE);
1183 mtx_unlock(&sc->hs_lock);
1184 device_printf(dev, "Unable to create path\n");
1189 mtx_unlock(&sc->hs_lock);
1191 storvsc_sysctl(dev);
1193 root_mount_rel(root_mount_token);
1198 root_mount_rel(root_mount_token);
1199 while (!LIST_EMPTY(&sc->hs_free_list)) {
1200 reqp = LIST_FIRST(&sc->hs_free_list);
1201 LIST_REMOVE(reqp, link);
1202 bus_dmamap_destroy(sc->storvsc_req_dtag, reqp->data_dmap);
1203 free(reqp, M_DEVBUF);
1206 while (!LIST_EMPTY(&g_hv_sgl_page_pool.free_sgl_list)) {
1207 sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.free_sgl_list);
1208 LIST_REMOVE(sgl_node, link);
1209 for (j = 0; j < STORVSC_DATA_SEGCNT_MAX; j++) {
1211 (void*)sgl_node->sgl_data->sg_segs[j].ss_paddr) {
1212 free((void*)sgl_node->sgl_data->sg_segs[j].ss_paddr, M_DEVBUF);
1215 sglist_free(sgl_node->sgl_data);
1216 free(sgl_node, M_DEVBUF);
1223 * @brief StorVSC device detach function
1225 * This function is responsible for safely detaching a
1226 * StorVSC device. This includes waiting for inbound responses
1227 * to complete and freeing associated per-device structures.
1229 * @param dev a device
1230 * returns 0 on success
1233 storvsc_detach(device_t dev)
1235 struct storvsc_softc *sc = device_get_softc(dev);
1236 struct hv_storvsc_request *reqp = NULL;
1237 struct hv_sgl_node *sgl_node = NULL;
1240 sc->hs_destroy = TRUE;
1243 * At this point, all outbound traffic should be disabled. We
1244 * only allow inbound traffic (responses) to proceed so that
1245 * outstanding requests can be completed.
1248 sc->hs_drain_notify = TRUE;
1249 sema_wait(&sc->hs_drain_sema);
1250 sc->hs_drain_notify = FALSE;
1253 * Since we have already drained, we don't need to busy wait.
1254 * The call to close the channel will reset the callback
1255 * under the protection of the incoming channel lock.
1258 vmbus_chan_close(sc->hs_chan);
1260 mtx_lock(&sc->hs_lock);
1261 while (!LIST_EMPTY(&sc->hs_free_list)) {
1262 reqp = LIST_FIRST(&sc->hs_free_list);
1263 LIST_REMOVE(reqp, link);
1264 bus_dmamap_destroy(sc->storvsc_req_dtag, reqp->data_dmap);
1265 free(reqp, M_DEVBUF);
1267 mtx_unlock(&sc->hs_lock);
1269 while (!LIST_EMPTY(&g_hv_sgl_page_pool.free_sgl_list)) {
1270 sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.free_sgl_list);
1271 LIST_REMOVE(sgl_node, link);
1272 for (j = 0; j < STORVSC_DATA_SEGCNT_MAX; j++){
1274 (void*)sgl_node->sgl_data->sg_segs[j].ss_paddr) {
1275 free((void*)sgl_node->sgl_data->sg_segs[j].ss_paddr, M_DEVBUF);
1278 sglist_free(sgl_node->sgl_data);
1279 free(sgl_node, M_DEVBUF);
1285 #if HVS_TIMEOUT_TEST
1287 * @brief unit test for timed out operations
1289 * This function provides unit testing capability to simulate
1290 * timed out operations. Recompilation with HV_TIMEOUT_TEST=1
1293 * @param reqp pointer to a request structure
1294 * @param opcode SCSI operation being performed
1295 * @param wait if 1, wait for I/O to complete
1298 storvsc_timeout_test(struct hv_storvsc_request *reqp,
1299 uint8_t opcode, int wait)
1302 union ccb *ccb = reqp->ccb;
1303 struct storvsc_softc *sc = reqp->softc;
1305 if (reqp->vstor_packet.vm_srb.cdb[0] != opcode) {
1310 mtx_lock(&reqp->event.mtx);
1312 ret = hv_storvsc_io_request(sc, reqp);
1315 mtx_unlock(&reqp->event.mtx);
1317 printf("%s: io_request failed with %d.\n",
1319 ccb->ccb_h.status = CAM_PROVIDE_FAIL;
1320 mtx_lock(&sc->hs_lock);
1321 storvsc_free_request(sc, reqp);
1323 mtx_unlock(&sc->hs_lock);
1328 xpt_print(ccb->ccb_h.path,
1329 "%u: %s: waiting for IO return.\n",
1331 ret = cv_timedwait(&reqp->event.cv, &reqp->event.mtx, 60*hz);
1332 mtx_unlock(&reqp->event.mtx);
1333 xpt_print(ccb->ccb_h.path, "%u: %s: %s.\n",
1334 ticks, __func__, (ret == 0)?
1335 "IO return detected" :
1336 "IO return not detected");
1338 * Now both the timer handler and io done are running
1339 * simultaneously. We want to confirm the io done always
1340 * finishes after the timer handler exits. So reqp used by
1341 * timer handler is not freed or stale. Do busy loop for
1342 * another 1/10 second to make sure io done does
1343 * wait for the timer handler to complete.
1346 mtx_lock(&sc->hs_lock);
1347 xpt_print(ccb->ccb_h.path,
1348 "%u: %s: finishing, queue frozen %d, "
1349 "ccb status 0x%x scsi_status 0x%x.\n",
1350 ticks, __func__, sc->hs_frozen,
1352 ccb->csio.scsi_status);
1353 mtx_unlock(&sc->hs_lock);
1356 #endif /* HVS_TIMEOUT_TEST */
1360 * @brief timeout handler for requests
1362 * This function is called as a result of a callout expiring.
1364 * @param arg pointer to a request
1367 storvsc_timeout(void *arg)
1369 struct hv_storvsc_request *reqp = arg;
1370 struct storvsc_softc *sc = reqp->softc;
1371 union ccb *ccb = reqp->ccb;
1373 if (reqp->retries == 0) {
1374 mtx_lock(&sc->hs_lock);
1375 xpt_print(ccb->ccb_h.path,
1376 "%u: IO timed out (req=0x%p), wait for another %u secs.\n",
1377 ticks, reqp, ccb->ccb_h.timeout / 1000);
1378 cam_error_print(ccb, CAM_ESF_ALL, CAM_EPF_ALL);
1379 mtx_unlock(&sc->hs_lock);
1382 callout_reset_sbt(&reqp->callout, SBT_1MS * ccb->ccb_h.timeout,
1383 0, storvsc_timeout, reqp, 0);
1384 #if HVS_TIMEOUT_TEST
1385 storvsc_timeout_test(reqp, SEND_DIAGNOSTIC, 0);
1390 mtx_lock(&sc->hs_lock);
1391 xpt_print(ccb->ccb_h.path,
1392 "%u: IO (reqp = 0x%p) did not return for %u seconds, %s.\n",
1393 ticks, reqp, ccb->ccb_h.timeout * (reqp->retries+1) / 1000,
1394 (sc->hs_frozen == 0)?
1395 "freezing the queue" : "the queue is already frozen");
1396 if (sc->hs_frozen == 0) {
1398 xpt_freeze_simq(xpt_path_sim(ccb->ccb_h.path), 1);
1400 mtx_unlock(&sc->hs_lock);
1402 #if HVS_TIMEOUT_TEST
1403 storvsc_timeout_test(reqp, MODE_SELECT_10, 1);
1409 * @brief StorVSC device poll function
1411 * This function is responsible for servicing requests when
1412 * interrupts are disabled (i.e when we are dumping core.)
1414 * @param sim a pointer to a CAM SCSI interface module
1417 storvsc_poll(struct cam_sim *sim)
1419 struct storvsc_softc *sc = cam_sim_softc(sim);
1421 mtx_assert(&sc->hs_lock, MA_OWNED);
1422 mtx_unlock(&sc->hs_lock);
1423 hv_storvsc_on_channel_callback(sc->hs_chan, sc);
1424 mtx_lock(&sc->hs_lock);
1428 * @brief StorVSC device action function
1430 * This function is responsible for handling SCSI operations which
1431 * are passed from the CAM layer. The requests are in the form of
1432 * CAM control blocks which indicate the action being performed.
1433 * Not all actions require converting the request to a VSCSI protocol
1434 * message - these actions can be responded to by this driver.
1435 * Requests which are destined for a backend storage device are converted
1436 * to a VSCSI protocol message and sent on the channel connection associated
1439 * @param sim pointer to a CAM SCSI interface module
1440 * @param ccb pointer to a CAM control block
1443 storvsc_action(struct cam_sim *sim, union ccb *ccb)
1445 struct storvsc_softc *sc = cam_sim_softc(sim);
1448 mtx_assert(&sc->hs_lock, MA_OWNED);
1449 switch (ccb->ccb_h.func_code) {
1450 case XPT_PATH_INQ: {
1451 struct ccb_pathinq *cpi = &ccb->cpi;
1453 cpi->version_num = 1;
1454 cpi->hba_inquiry = PI_TAG_ABLE|PI_SDTR_ABLE;
1455 cpi->target_sprt = 0;
1456 cpi->hba_misc = PIM_NOBUSRESET;
1457 if (hv_storvsc_use_pim_unmapped)
1458 cpi->hba_misc |= PIM_UNMAPPED;
1459 cpi->maxio = STORVSC_DATA_SIZE_MAX;
1460 cpi->hba_eng_cnt = 0;
1461 cpi->max_target = STORVSC_MAX_TARGETS;
1462 cpi->max_lun = sc->hs_drv_props->drv_max_luns_per_target;
1463 cpi->initiator_id = cpi->max_target;
1464 cpi->bus_id = cam_sim_bus(sim);
1465 cpi->base_transfer_speed = 300000;
1466 cpi->transport = XPORT_SAS;
1467 cpi->transport_version = 0;
1468 cpi->protocol = PROTO_SCSI;
1469 cpi->protocol_version = SCSI_REV_SPC2;
1470 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1471 strlcpy(cpi->hba_vid, sc->hs_drv_props->drv_name, HBA_IDLEN);
1472 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1473 cpi->unit_number = cam_sim_unit(sim);
1475 ccb->ccb_h.status = CAM_REQ_CMP;
1479 case XPT_GET_TRAN_SETTINGS: {
1480 struct ccb_trans_settings *cts = &ccb->cts;
1482 cts->transport = XPORT_SAS;
1483 cts->transport_version = 0;
1484 cts->protocol = PROTO_SCSI;
1485 cts->protocol_version = SCSI_REV_SPC2;
1487 /* enable tag queuing and disconnected mode */
1488 cts->proto_specific.valid = CTS_SCSI_VALID_TQ;
1489 cts->proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
1490 cts->proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
1491 cts->xport_specific.valid = CTS_SPI_VALID_DISC;
1492 cts->xport_specific.spi.flags = CTS_SPI_FLAGS_DISC_ENB;
1494 ccb->ccb_h.status = CAM_REQ_CMP;
1498 case XPT_SET_TRAN_SETTINGS: {
1499 ccb->ccb_h.status = CAM_REQ_CMP;
1503 case XPT_CALC_GEOMETRY:{
1504 cam_calc_geometry(&ccb->ccg, 1);
1509 case XPT_RESET_DEV:{
1511 if ((res = hv_storvsc_host_reset(sc)) != 0) {
1512 xpt_print(ccb->ccb_h.path,
1513 "hv_storvsc_host_reset failed with %d\n", res);
1514 ccb->ccb_h.status = CAM_PROVIDE_FAIL;
1518 ccb->ccb_h.status = CAM_REQ_CMP;
1522 xpt_print(ccb->ccb_h.path,
1523 "%s reset not supported.\n",
1524 (ccb->ccb_h.func_code == XPT_RESET_BUS)?
1526 ccb->ccb_h.status = CAM_REQ_INVALID;
1529 #endif /* HVS_HOST_RESET */
1532 case XPT_IMMED_NOTIFY: {
1533 struct hv_storvsc_request *reqp = NULL;
1534 bus_dmamap_t dmap_saved;
1536 if (ccb->csio.cdb_len == 0) {
1537 panic("cdl_len is 0\n");
1540 if (LIST_EMPTY(&sc->hs_free_list)) {
1541 ccb->ccb_h.status = CAM_REQUEUE_REQ;
1542 if (sc->hs_frozen == 0) {
1544 xpt_freeze_simq(sim, /* count*/1);
1550 reqp = LIST_FIRST(&sc->hs_free_list);
1551 LIST_REMOVE(reqp, link);
1553 /* Save the data_dmap before reset request */
1554 dmap_saved = reqp->data_dmap;
1556 /* XXX this is ugly */
1557 bzero(reqp, sizeof(struct hv_storvsc_request));
1559 /* Restore necessary bits */
1560 reqp->data_dmap = dmap_saved;
1563 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1564 if ((res = create_storvsc_request(ccb, reqp)) != 0) {
1565 ccb->ccb_h.status = CAM_REQ_INVALID;
1571 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1572 callout_init(&reqp->callout, 1);
1573 callout_reset_sbt(&reqp->callout,
1574 SBT_1MS * ccb->ccb_h.timeout, 0,
1575 storvsc_timeout, reqp, 0);
1576 #if HVS_TIMEOUT_TEST
1577 cv_init(&reqp->event.cv, "storvsc timeout cv");
1578 mtx_init(&reqp->event.mtx, "storvsc timeout mutex",
1580 switch (reqp->vstor_packet.vm_srb.cdb[0]) {
1581 case MODE_SELECT_10:
1582 case SEND_DIAGNOSTIC:
1583 /* To have timer send the request. */
1588 #endif /* HVS_TIMEOUT_TEST */
1592 if ((res = hv_storvsc_io_request(sc, reqp)) != 0) {
1593 xpt_print(ccb->ccb_h.path,
1594 "hv_storvsc_io_request failed with %d\n", res);
1595 ccb->ccb_h.status = CAM_PROVIDE_FAIL;
1596 storvsc_free_request(sc, reqp);
1604 ccb->ccb_h.status = CAM_REQ_INVALID;
1611 * @brief destroy bounce buffer
1613 * This function is responsible for destroy a Scatter/Gather list
1614 * that create by storvsc_create_bounce_buffer()
1616 * @param sgl- the Scatter/Gather need be destroy
1617 * @param sg_count- page count of the SG list.
1621 storvsc_destroy_bounce_buffer(struct sglist *sgl)
1623 struct hv_sgl_node *sgl_node = NULL;
1624 if (LIST_EMPTY(&g_hv_sgl_page_pool.in_use_sgl_list)) {
1625 printf("storvsc error: not enough in use sgl\n");
1628 sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.in_use_sgl_list);
1629 LIST_REMOVE(sgl_node, link);
1630 sgl_node->sgl_data = sgl;
1631 LIST_INSERT_HEAD(&g_hv_sgl_page_pool.free_sgl_list, sgl_node, link);
1635 * @brief create bounce buffer
1637 * This function is responsible for create a Scatter/Gather list,
1638 * which hold several pages that can be aligned with page size.
1640 * @param seg_count- SG-list segments count
1641 * @param write - if WRITE_TYPE, set SG list page used size to 0,
1642 * otherwise set used size to page size.
1644 * return NULL if create failed
1646 static struct sglist *
1647 storvsc_create_bounce_buffer(uint16_t seg_count, int write)
1650 struct sglist *bounce_sgl = NULL;
1651 unsigned int buf_len = ((write == WRITE_TYPE) ? 0 : PAGE_SIZE);
1652 struct hv_sgl_node *sgl_node = NULL;
1654 /* get struct sglist from free_sgl_list */
1655 if (LIST_EMPTY(&g_hv_sgl_page_pool.free_sgl_list)) {
1656 printf("storvsc error: not enough free sgl\n");
1659 sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.free_sgl_list);
1660 LIST_REMOVE(sgl_node, link);
1661 bounce_sgl = sgl_node->sgl_data;
1662 LIST_INSERT_HEAD(&g_hv_sgl_page_pool.in_use_sgl_list, sgl_node, link);
1664 bounce_sgl->sg_maxseg = seg_count;
1666 if (write == WRITE_TYPE)
1667 bounce_sgl->sg_nseg = 0;
1669 bounce_sgl->sg_nseg = seg_count;
1671 for (i = 0; i < seg_count; i++)
1672 bounce_sgl->sg_segs[i].ss_len = buf_len;
1678 * @brief copy data from SG list to bounce buffer
1680 * This function is responsible for copy data from one SG list's segments
1681 * to another SG list which used as bounce buffer.
1683 * @param bounce_sgl - the destination SG list
1684 * @param orig_sgl - the segment of the source SG list.
1685 * @param orig_sgl_count - the count of segments.
1686 * @param orig_sgl_count - indicate which segment need bounce buffer,
1691 storvsc_copy_sgl_to_bounce_buf(struct sglist *bounce_sgl,
1692 bus_dma_segment_t *orig_sgl,
1693 unsigned int orig_sgl_count,
1696 int src_sgl_idx = 0;
1698 for (src_sgl_idx = 0; src_sgl_idx < orig_sgl_count; src_sgl_idx++) {
1699 if (seg_bits & (1 << src_sgl_idx)) {
1700 memcpy((void*)bounce_sgl->sg_segs[src_sgl_idx].ss_paddr,
1701 (void*)orig_sgl[src_sgl_idx].ds_addr,
1702 orig_sgl[src_sgl_idx].ds_len);
1704 bounce_sgl->sg_segs[src_sgl_idx].ss_len =
1705 orig_sgl[src_sgl_idx].ds_len;
1711 * @brief copy data from SG list which used as bounce to another SG list
1713 * This function is responsible for copy data from one SG list with bounce
1714 * buffer to another SG list's segments.
1716 * @param dest_sgl - the destination SG list's segments
1717 * @param dest_sgl_count - the count of destination SG list's segment.
1718 * @param src_sgl - the source SG list.
1719 * @param seg_bits - indicate which segment used bounce buffer of src SG-list.
1723 storvsc_copy_from_bounce_buf_to_sgl(bus_dma_segment_t *dest_sgl,
1724 unsigned int dest_sgl_count,
1725 struct sglist* src_sgl,
1730 for (sgl_idx = 0; sgl_idx < dest_sgl_count; sgl_idx++) {
1731 if (seg_bits & (1 << sgl_idx)) {
1732 memcpy((void*)(dest_sgl[sgl_idx].ds_addr),
1733 (void*)(src_sgl->sg_segs[sgl_idx].ss_paddr),
1734 src_sgl->sg_segs[sgl_idx].ss_len);
1740 * @brief check SG list with bounce buffer or not
1742 * This function is responsible for check if need bounce buffer for SG list.
1744 * @param sgl - the SG list's segments
1745 * @param sg_count - the count of SG list's segment.
1746 * @param bits - segmengs number that need bounce buffer
1748 * return -1 if SG list needless bounce buffer
1751 storvsc_check_bounce_buffer_sgl(bus_dma_segment_t *sgl,
1752 unsigned int sg_count,
1757 uint64_t phys_addr = 0;
1758 uint64_t tmp_bits = 0;
1759 boolean_t found_hole = FALSE;
1760 boolean_t pre_aligned = TRUE;
1768 phys_addr = vtophys(sgl[0].ds_addr);
1769 offset = phys_addr - trunc_page(phys_addr);
1772 pre_aligned = FALSE;
1776 for (i = 1; i < sg_count; i++) {
1777 phys_addr = vtophys(sgl[i].ds_addr);
1778 offset = phys_addr - trunc_page(phys_addr);
1781 if (FALSE == pre_aligned){
1783 * This segment is aligned, if the previous
1784 * one is not aligned, find a hole
1790 tmp_bits |= 1ULL << i;
1792 if (phys_addr != vtophys(sgl[i-1].ds_addr +
1795 * Check whether connect to previous
1796 * segment,if not, find the hole
1803 pre_aligned = FALSE;
1816 * Copy bus_dma segments to multiple page buffer, which requires
1817 * the pages are compact composed except for the 1st and last pages.
1820 storvsc_xferbuf_prepare(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1822 struct hv_storvsc_request *reqp = arg;
1823 union ccb *ccb = reqp->ccb;
1824 struct ccb_scsiio *csio = &ccb->csio;
1825 struct storvsc_gpa_range *prplist;
1828 prplist = &reqp->prp_list;
1829 prplist->gpa_range.gpa_len = csio->dxfer_len;
1830 prplist->gpa_range.gpa_ofs = segs[0].ds_addr & PAGE_MASK;
1832 for (i = 0; i < nsegs; i++) {
1836 KASSERT((segs[i].ds_addr & PAGE_MASK) +
1837 segs[i].ds_len == PAGE_SIZE,
1838 ("invalid 1st page, ofs 0x%jx, len %zu",
1839 (uintmax_t)segs[i].ds_addr,
1841 } else if (i == nsegs - 1) {
1842 KASSERT((segs[i].ds_addr & PAGE_MASK) == 0,
1843 ("invalid last page, ofs 0x%jx",
1844 (uintmax_t)segs[i].ds_addr));
1846 KASSERT((segs[i].ds_addr & PAGE_MASK) == 0 &&
1847 segs[i].ds_len == PAGE_SIZE,
1848 ("not a full page, ofs 0x%jx, len %zu",
1849 (uintmax_t)segs[i].ds_addr,
1854 prplist->gpa_page[i] = atop(segs[i].ds_addr);
1856 reqp->prp_cnt = nsegs;
1860 * @brief Fill in a request structure based on a CAM control block
1862 * Fills in a request structure based on the contents of a CAM control
1863 * block. The request structure holds the payload information for
1864 * VSCSI protocol request.
1866 * @param ccb pointer to a CAM contorl block
1867 * @param reqp pointer to a request structure
1870 create_storvsc_request(union ccb *ccb, struct hv_storvsc_request *reqp)
1872 struct ccb_scsiio *csio = &ccb->csio;
1875 uint64_t not_aligned_seg_bits = 0;
1878 /* refer to struct vmscsi_req for meanings of these two fields */
1879 reqp->vstor_packet.u.vm_srb.port =
1880 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path));
1881 reqp->vstor_packet.u.vm_srb.path_id =
1882 cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
1884 reqp->vstor_packet.u.vm_srb.target_id = ccb->ccb_h.target_id;
1885 reqp->vstor_packet.u.vm_srb.lun = ccb->ccb_h.target_lun;
1887 reqp->vstor_packet.u.vm_srb.cdb_len = csio->cdb_len;
1888 if(ccb->ccb_h.flags & CAM_CDB_POINTER) {
1889 memcpy(&reqp->vstor_packet.u.vm_srb.u.cdb, csio->cdb_io.cdb_ptr,
1892 memcpy(&reqp->vstor_packet.u.vm_srb.u.cdb, csio->cdb_io.cdb_bytes,
1896 if (hv_storvsc_use_win8ext_flags) {
1897 reqp->vstor_packet.u.vm_srb.win8_extension.time_out_value = 60;
1898 reqp->vstor_packet.u.vm_srb.win8_extension.srb_flags |=
1899 SRB_FLAGS_DISABLE_SYNCH_TRANSFER;
1901 switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
1903 reqp->vstor_packet.u.vm_srb.data_in = WRITE_TYPE;
1904 if (hv_storvsc_use_win8ext_flags) {
1905 reqp->vstor_packet.u.vm_srb.win8_extension.srb_flags |=
1910 reqp->vstor_packet.u.vm_srb.data_in = READ_TYPE;
1911 if (hv_storvsc_use_win8ext_flags) {
1912 reqp->vstor_packet.u.vm_srb.win8_extension.srb_flags |=
1917 reqp->vstor_packet.u.vm_srb.data_in = UNKNOWN_TYPE;
1918 if (hv_storvsc_use_win8ext_flags) {
1919 reqp->vstor_packet.u.vm_srb.win8_extension.srb_flags |=
1920 SRB_FLAGS_NO_DATA_TRANSFER;
1924 printf("Error: unexpected data direction: 0x%x\n",
1925 ccb->ccb_h.flags & CAM_DIR_MASK);
1929 reqp->sense_data = &csio->sense_data;
1930 reqp->sense_info_len = csio->sense_len;
1933 ccb->ccb_h.spriv_ptr0 = reqp;
1935 if (0 == csio->dxfer_len) {
1939 switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
1941 case CAM_DATA_VADDR:
1942 error = bus_dmamap_load_ccb(reqp->softc->storvsc_req_dtag,
1943 reqp->data_dmap, ccb, storvsc_xferbuf_prepare, reqp,
1946 xpt_print(ccb->ccb_h.path,
1947 "bus_dmamap_load_ccb failed: %d\n", error);
1950 if ((ccb->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_BIO)
1951 reqp->softc->sysctl_data.data_bio_cnt++;
1953 reqp->softc->sysctl_data.data_vaddr_cnt++;
1958 struct storvsc_gpa_range *prplist;
1963 bus_dma_segment_t *storvsc_sglist =
1964 (bus_dma_segment_t *)ccb->csio.data_ptr;
1965 u_int16_t storvsc_sg_count = ccb->csio.sglist_cnt;
1967 prplist = &reqp->prp_list;
1968 prplist->gpa_range.gpa_len = csio->dxfer_len;
1970 printf("Storvsc: get SG I/O operation, %d\n",
1971 reqp->vstor_packet.u.vm_srb.data_in);
1973 if (storvsc_sg_count > STORVSC_DATA_SEGCNT_MAX){
1974 printf("Storvsc: %d segments is too much, "
1975 "only support %d segments\n",
1976 storvsc_sg_count, STORVSC_DATA_SEGCNT_MAX);
1981 * We create our own bounce buffer function currently. Idealy
1982 * we should use BUS_DMA(9) framework. But with current BUS_DMA
1983 * code there is no callback API to check the page alignment of
1984 * middle segments before busdma can decide if a bounce buffer
1985 * is needed for particular segment. There is callback,
1986 * "bus_dma_filter_t *filter", but the parrameters are not
1987 * sufficient for storvsc driver.
1989 * Add page alignment check in BUS_DMA(9) callback. Once
1990 * this is complete, switch the following code to use
1991 * BUS_DMA(9) for storvsc bounce buffer support.
1993 /* check if we need to create bounce buffer */
1994 ret = storvsc_check_bounce_buffer_sgl(storvsc_sglist,
1995 storvsc_sg_count, ¬_aligned_seg_bits);
1998 storvsc_create_bounce_buffer(storvsc_sg_count,
1999 reqp->vstor_packet.u.vm_srb.data_in);
2000 if (NULL == reqp->bounce_sgl) {
2001 printf("Storvsc_error: "
2002 "create bounce buffer failed.\n");
2006 reqp->bounce_sgl_count = storvsc_sg_count;
2007 reqp->not_aligned_seg_bits = not_aligned_seg_bits;
2010 * if it is write, we need copy the original data
2013 if (WRITE_TYPE == reqp->vstor_packet.u.vm_srb.data_in) {
2014 storvsc_copy_sgl_to_bounce_buf(
2018 reqp->not_aligned_seg_bits);
2021 /* transfer virtual address to physical frame number */
2022 if (reqp->not_aligned_seg_bits & 0x1){
2024 vtophys(reqp->bounce_sgl->sg_segs[0].ss_paddr);
2027 vtophys(storvsc_sglist[0].ds_addr);
2029 prplist->gpa_range.gpa_ofs = phys_addr & PAGE_MASK;
2031 pfn = phys_addr >> PAGE_SHIFT;
2032 prplist->gpa_page[0] = pfn;
2034 for (i = 1; i < storvsc_sg_count; i++) {
2035 if (reqp->not_aligned_seg_bits & (1 << i)) {
2037 vtophys(reqp->bounce_sgl->sg_segs[i].ss_paddr);
2040 vtophys(storvsc_sglist[i].ds_addr);
2043 pfn = phys_addr >> PAGE_SHIFT;
2044 prplist->gpa_page[i] = pfn;
2048 phys_addr = vtophys(storvsc_sglist[0].ds_addr);
2050 prplist->gpa_range.gpa_ofs = phys_addr & PAGE_MASK;
2052 for (i = 0; i < storvsc_sg_count; i++) {
2053 phys_addr = vtophys(storvsc_sglist[i].ds_addr);
2054 pfn = phys_addr >> PAGE_SHIFT;
2055 prplist->gpa_page[i] = pfn;
2059 /* check the last segment cross boundary or not */
2060 offset = phys_addr & PAGE_MASK;
2062 /* Add one more PRP entry */
2064 vtophys(storvsc_sglist[i-1].ds_addr +
2065 PAGE_SIZE - offset);
2066 pfn = phys_addr >> PAGE_SHIFT;
2067 prplist->gpa_page[i] = pfn;
2071 reqp->bounce_sgl_count = 0;
2073 reqp->softc->sysctl_data.data_sg_cnt++;
2077 printf("Unknow flags: %d\n", ccb->ccb_h.flags);
2085 is_scsi_valid(const struct scsi_inquiry_data *inq_data)
2089 type = SID_TYPE(inq_data);
2090 if (type == T_NODEVICE)
2092 if (SID_QUAL(inq_data) == SID_QUAL_BAD_LU)
2098 * @brief completion function before returning to CAM
2100 * I/O process has been completed and the result needs
2101 * to be passed to the CAM layer.
2102 * Free resources related to this request.
2104 * @param reqp pointer to a request structure
2107 storvsc_io_done(struct hv_storvsc_request *reqp)
2109 union ccb *ccb = reqp->ccb;
2110 struct ccb_scsiio *csio = &ccb->csio;
2111 struct storvsc_softc *sc = reqp->softc;
2112 struct vmscsi_req *vm_srb = &reqp->vstor_packet.u.vm_srb;
2113 bus_dma_segment_t *ori_sglist = NULL;
2114 int ori_sg_count = 0;
2115 const struct scsi_generic *cmd;
2117 /* destroy bounce buffer if it is used */
2118 if (reqp->bounce_sgl_count) {
2119 ori_sglist = (bus_dma_segment_t *)ccb->csio.data_ptr;
2120 ori_sg_count = ccb->csio.sglist_cnt;
2123 * If it is READ operation, we should copy back the data
2124 * to original SG list.
2126 if (READ_TYPE == reqp->vstor_packet.u.vm_srb.data_in) {
2127 storvsc_copy_from_bounce_buf_to_sgl(ori_sglist,
2130 reqp->not_aligned_seg_bits);
2133 storvsc_destroy_bounce_buffer(reqp->bounce_sgl);
2134 reqp->bounce_sgl_count = 0;
2137 if (reqp->retries > 0) {
2138 mtx_lock(&sc->hs_lock);
2139 #if HVS_TIMEOUT_TEST
2140 xpt_print(ccb->ccb_h.path,
2141 "%u: IO returned after timeout, "
2142 "waking up timer handler if any.\n", ticks);
2143 mtx_lock(&reqp->event.mtx);
2144 cv_signal(&reqp->event.cv);
2145 mtx_unlock(&reqp->event.mtx);
2148 xpt_print(ccb->ccb_h.path,
2149 "%u: IO returned after timeout, "
2150 "stopping timer if any.\n", ticks);
2151 mtx_unlock(&sc->hs_lock);
2156 * callout_drain() will wait for the timer handler to finish
2157 * if it is running. So we don't need any lock to synchronize
2158 * between this routine and the timer handler.
2159 * Note that we need to make sure reqp is not freed when timer
2160 * handler is using or will use it.
2162 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
2163 callout_drain(&reqp->callout);
2166 cmd = (const struct scsi_generic *)
2167 ((ccb->ccb_h.flags & CAM_CDB_POINTER) ?
2168 csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes);
2170 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2171 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2172 int srb_status = SRB_STATUS(vm_srb->srb_status);
2174 if (hv_storvsc_srb_status != -1) {
2175 srb_status = SRB_STATUS(hv_storvsc_srb_status & 0x3f);
2176 hv_storvsc_srb_status = -1;
2178 #endif /* DIAGNOSTIC */
2179 if (vm_srb->scsi_status == SCSI_STATUS_OK) {
2180 if (srb_status != SRB_STATUS_SUCCESS) {
2181 bool log_error = true;
2182 switch (srb_status) {
2183 case SRB_STATUS_PENDING:
2184 /* We should never get this */
2185 panic("storvsc_io_done: SRB_STATUS_PENDING");
2187 case SRB_STATUS_ABORTED:
2189 * storvsc doesn't support aborts yet
2190 * but if we ever get this status
2191 * the I/O is complete - treat it as a
2194 ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
2196 case SRB_STATUS_ABORT_FAILED:
2197 /* We should never get this */
2198 panic("storvsc_io_done: SRB_STATUS_ABORT_FAILED");
2200 case SRB_STATUS_ERROR:
2202 * We should never get this.
2203 * Treat it as a CAM_UNREC_HBA_ERROR.
2204 * It will be retried
2206 ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR;
2208 case SRB_STATUS_BUSY:
2209 /* Host is busy. Delay and retry */
2210 ccb->ccb_h.status |= CAM_BUSY;
2212 case SRB_STATUS_INVALID_REQUEST:
2213 case SRB_STATUS_INVALID_PATH_ID:
2214 case SRB_STATUS_NO_DEVICE:
2215 case SRB_STATUS_INVALID_TARGET_ID:
2217 * These indicate an invalid address
2218 * and really should never be seen.
2219 * A CAM_PATH_INVALID could be
2220 * used here but I want to run
2221 * down retries. Do a CAM_BUSY
2222 * since the host might be having issues.
2224 ccb->ccb_h.status |= CAM_BUSY;
2226 case SRB_STATUS_TIMEOUT:
2227 case SRB_STATUS_COMMAND_TIMEOUT:
2228 /* The backend has timed this out */
2229 ccb->ccb_h.status |= CAM_BUSY;
2231 /* Some old pSCSI errors below */
2232 case SRB_STATUS_SELECTION_TIMEOUT:
2233 case SRB_STATUS_MESSAGE_REJECTED:
2234 case SRB_STATUS_PARITY_ERROR:
2235 case SRB_STATUS_NO_HBA:
2236 case SRB_STATUS_DATA_OVERRUN:
2237 case SRB_STATUS_UNEXPECTED_BUS_FREE:
2238 case SRB_STATUS_PHASE_SEQUENCE_FAILURE:
2240 * Old pSCSI responses, should never get.
2241 * If we do treat as a CAM_UNREC_HBA_ERROR
2242 * which will be retried
2244 ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR;
2246 case SRB_STATUS_BUS_RESET:
2247 ccb->ccb_h.status |= CAM_SCSI_BUS_RESET;
2249 case SRB_STATUS_BAD_SRB_BLOCK_LENGTH:
2251 * The request block is malformed and
2252 * I doubt it is from the guest. Just retry.
2254 ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR;
2256 /* Not used statuses just retry */
2257 case SRB_STATUS_REQUEST_FLUSHED:
2258 case SRB_STATUS_BAD_FUNCTION:
2259 case SRB_STATUS_NOT_POWERED:
2260 ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR;
2262 case SRB_STATUS_INVALID_LUN:
2264 * Don't log an EMS for this response since
2265 * there is no device at this LUN. This is a
2266 * normal and expected response when a device
2269 ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
2272 case SRB_STATUS_ERROR_RECOVERY:
2273 case SRB_STATUS_LINK_DOWN:
2275 * I don't ever expect these from
2276 * the host but if we ever get
2277 * retry after a delay
2279 ccb->ccb_h.status |= CAM_BUSY;
2283 * An undefined response assert on
2284 * on debug builds else retry
2286 ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR;
2287 KASSERT(srb_status <= SRB_STATUS_LINK_DOWN,
2288 ("storvsc: %s, unexpected srb_status of 0x%x",
2289 __func__, srb_status));
2293 xpt_print(ccb->ccb_h.path, "The hypervisor's I/O adapter "
2294 "driver received an unexpected response code 0x%x "
2295 "for operation: %s. If this continues to occur, "
2296 "report the condition to your hypervisor vendor so "
2297 "they can rectify the issue.\n", srb_status,
2298 scsi_op_desc(cmd->opcode, NULL));
2301 ccb->ccb_h.status |= CAM_REQ_CMP;
2304 if (cmd->opcode == INQUIRY &&
2305 srb_status == SRB_STATUS_SUCCESS) {
2306 int resp_xfer_len, resp_buf_len, data_len;
2307 uint8_t *resp_buf = (uint8_t *)csio->data_ptr;
2308 struct scsi_inquiry_data *inq_data =
2309 (struct scsi_inquiry_data *)csio->data_ptr;
2311 /* Get the buffer length reported by host */
2312 resp_xfer_len = vm_srb->transfer_len;
2314 /* Get the available buffer length */
2315 resp_buf_len = resp_xfer_len >= 5 ? resp_buf[4] + 5 : 0;
2316 data_len = (resp_buf_len < resp_xfer_len) ?
2317 resp_buf_len : resp_xfer_len;
2318 if (bootverbose && data_len >= 5) {
2319 xpt_print(ccb->ccb_h.path, "storvsc inquiry "
2320 "(%d) [%x %x %x %x %x ... ]\n", data_len,
2321 resp_buf[0], resp_buf[1], resp_buf[2],
2322 resp_buf[3], resp_buf[4]);
2325 * XXX: Hyper-V (since win2012r2) responses inquiry with
2326 * unknown version (0) for GEN-2 DVD device.
2327 * Manually set the version number to SPC3 in order to
2328 * ask CAM to continue probing with "PROBE_REPORT_LUNS".
2329 * see probedone() in scsi_xpt.c
2331 if (SID_TYPE(inq_data) == T_CDROM &&
2332 inq_data->version == 0 &&
2333 (vmstor_proto_version >= VMSTOR_PROTOCOL_VERSION_WIN8)) {
2334 inq_data->version = SCSI_REV_SPC3;
2336 xpt_print(ccb->ccb_h.path,
2337 "set version from 0 to %d\n",
2342 * XXX: Manually fix the wrong response returned from WS2012
2344 if (!is_scsi_valid(inq_data) &&
2345 (vmstor_proto_version == VMSTOR_PROTOCOL_VERSION_WIN8_1 ||
2346 vmstor_proto_version == VMSTOR_PROTOCOL_VERSION_WIN8 ||
2347 vmstor_proto_version == VMSTOR_PROTOCOL_VERSION_WIN7)) {
2348 if (data_len >= 4 &&
2349 (resp_buf[2] == 0 || resp_buf[3] == 0)) {
2350 resp_buf[2] = SCSI_REV_SPC3;
2351 resp_buf[3] = 2; // resp fmt must be 2
2353 xpt_print(ccb->ccb_h.path,
2354 "fix version and resp fmt for 0x%x\n",
2355 vmstor_proto_version);
2357 } else if (data_len >= SHORT_INQUIRY_LENGTH) {
2360 cam_strvis(vendor, inq_data->vendor,
2361 sizeof(inq_data->vendor), sizeof(vendor));
2363 * XXX: Upgrade SPC2 to SPC3 if host is WIN8 or
2364 * WIN2012 R2 in order to support UNMAP feature.
2366 if (!strncmp(vendor, "Msft", 4) &&
2367 SID_ANSI_REV(inq_data) == SCSI_REV_SPC2 &&
2368 (vmstor_proto_version ==
2369 VMSTOR_PROTOCOL_VERSION_WIN8_1 ||
2370 vmstor_proto_version ==
2371 VMSTOR_PROTOCOL_VERSION_WIN8)) {
2372 inq_data->version = SCSI_REV_SPC3;
2374 xpt_print(ccb->ccb_h.path,
2383 * On Some Windows hosts TEST_UNIT_READY command can return
2384 * SRB_STATUS_ERROR and sense data, for example, asc=0x3a,1
2385 * "(Medium not present - tray closed)". This error can be
2386 * ignored since it will be sent to host periodically.
2388 boolean_t unit_not_ready = \
2389 vm_srb->scsi_status == SCSI_STATUS_CHECK_COND &&
2390 cmd->opcode == TEST_UNIT_READY &&
2391 srb_status == SRB_STATUS_ERROR;
2392 if (!unit_not_ready && bootverbose) {
2393 mtx_lock(&sc->hs_lock);
2394 xpt_print(ccb->ccb_h.path,
2395 "storvsc scsi_status = %d, srb_status = %d\n",
2396 vm_srb->scsi_status, srb_status);
2397 mtx_unlock(&sc->hs_lock);
2399 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2402 ccb->csio.scsi_status = (vm_srb->scsi_status & 0xFF);
2403 if (srb_status == SRB_STATUS_SUCCESS ||
2404 srb_status == SRB_STATUS_DATA_OVERRUN)
2405 ccb->csio.resid = ccb->csio.dxfer_len - vm_srb->transfer_len;
2407 ccb->csio.resid = ccb->csio.dxfer_len;
2409 if (reqp->sense_info_len != 0) {
2410 csio->sense_resid = csio->sense_len - reqp->sense_info_len;
2411 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2414 mtx_lock(&sc->hs_lock);
2415 if (reqp->softc->hs_frozen == 1) {
2416 xpt_print(ccb->ccb_h.path,
2417 "%u: storvsc unfreezing softc 0x%p.\n",
2418 ticks, reqp->softc);
2419 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2420 reqp->softc->hs_frozen = 0;
2422 storvsc_free_request(sc, reqp);
2423 mtx_unlock(&sc->hs_lock);
2425 xpt_done_direct(ccb);
2429 * @brief Free a request structure
2431 * Free a request structure by returning it to the free list
2433 * @param sc pointer to a softc
2434 * @param reqp pointer to a request structure
2437 storvsc_free_request(struct storvsc_softc *sc, struct hv_storvsc_request *reqp)
2440 LIST_INSERT_HEAD(&sc->hs_free_list, reqp, link);
2444 * @brief Determine type of storage device from GUID
2446 * Using the type GUID, determine if this is a StorVSC (paravirtual
2447 * SCSI or BlkVSC (paravirtual IDE) device.
2449 * @param dev a device
2452 static enum hv_storage_type
2453 storvsc_get_storage_type(device_t dev)
2455 device_t parent = device_get_parent(dev);
2457 if (VMBUS_PROBE_GUID(parent, dev, &gBlkVscDeviceType) == 0)
2458 return DRIVER_BLKVSC;
2459 if (VMBUS_PROBE_GUID(parent, dev, &gStorVscDeviceType) == 0)
2460 return DRIVER_STORVSC;
2461 return DRIVER_UNKNOWN;
2464 #define PCI_VENDOR_INTEL 0x8086
2465 #define PCI_PRODUCT_PIIX4 0x7111
2468 storvsc_ada_probe_veto(void *arg __unused, struct cam_path *path,
2469 struct ata_params *ident_buf __unused, int *veto)
2473 * The ATA disks are shared with the controllers managed
2474 * by this driver, so veto the ATA disks' attachment; the
2475 * ATA disks will be attached as SCSI disks once this driver
2478 if (path->device->protocol == PROTO_ATA) {
2479 struct ccb_pathinq cpi;
2481 xpt_path_inq(&cpi, path);
2482 if (cpi.ccb_h.status == CAM_REQ_CMP &&
2483 cpi.hba_vendor == PCI_VENDOR_INTEL &&
2484 cpi.hba_device == PCI_PRODUCT_PIIX4) {
2488 "Disable ATA disks on "
2489 "simulated ATA controller (0x%04x%04x)\n",
2490 cpi.hba_device, cpi.hba_vendor);
2497 storvsc_sysinit(void *arg __unused)
2499 if (vm_guest == VM_GUEST_HV) {
2500 storvsc_handler_tag = EVENTHANDLER_REGISTER(ada_probe_veto,
2501 storvsc_ada_probe_veto, NULL, EVENTHANDLER_PRI_ANY);
2504 SYSINIT(storvsc_sys_init, SI_SUB_DRIVERS, SI_ORDER_SECOND, storvsc_sysinit,
2508 storvsc_sysuninit(void *arg __unused)
2510 if (storvsc_handler_tag != NULL)
2511 EVENTHANDLER_DEREGISTER(ada_probe_veto, storvsc_handler_tag);
2513 SYSUNINIT(storvsc_sys_uninit, SI_SUB_DRIVERS, SI_ORDER_SECOND,
2514 storvsc_sysuninit, NULL);