2 * Copyright (c) 2009-2012 Microsoft Corp.
3 * Copyright (c) 2012 NetApp Inc.
4 * Copyright (c) 2012 Citrix Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * StorVSC driver for Hyper-V. This driver presents a SCSI HBA interface
31 * to the Comman Access Method (CAM) layer. CAM control blocks (CCBs) are
32 * converted into VSCSI protocol messages which are delivered to the parent
33 * partition StorVSP driver over the Hyper-V VMBUS.
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include <sys/param.h>
40 #include <sys/condvar.h>
42 #include <sys/systm.h>
43 #include <sys/sockio.h>
45 #include <sys/malloc.h>
46 #include <sys/module.h>
47 #include <sys/kernel.h>
48 #include <sys/queue.h>
51 #include <sys/taskqueue.h>
53 #include <sys/mutex.h>
54 #include <sys/callout.h>
60 #include <sys/sglist.h>
61 #include <machine/bus.h>
62 #include <sys/bus_dma.h>
65 #include <cam/cam_ccb.h>
66 #include <cam/cam_periph.h>
67 #include <cam/cam_sim.h>
68 #include <cam/cam_xpt_sim.h>
69 #include <cam/cam_xpt_internal.h>
70 #include <cam/cam_debug.h>
71 #include <cam/scsi/scsi_all.h>
72 #include <cam/scsi/scsi_message.h>
74 #include <dev/hyperv/include/hyperv.h>
75 #include "hv_vstorage.h"
77 #define STORVSC_RINGBUFFER_SIZE (20*PAGE_SIZE)
78 #define STORVSC_MAX_LUNS_PER_TARGET (64)
79 #define STORVSC_MAX_IO_REQUESTS (STORVSC_MAX_LUNS_PER_TARGET * 2)
80 #define BLKVSC_MAX_IDE_DISKS_PER_TARGET (1)
81 #define BLKVSC_MAX_IO_REQUESTS STORVSC_MAX_IO_REQUESTS
82 #define STORVSC_MAX_TARGETS (2)
84 #define VSTOR_PKT_SIZE (sizeof(struct vstor_packet) - vmscsi_size_delta)
86 #define HV_ALIGN(x, a) roundup2(x, a)
91 LIST_ENTRY(hv_sgl_node) link;
92 struct sglist *sgl_data;
95 struct hv_sgl_page_pool{
96 LIST_HEAD(, hv_sgl_node) in_use_sgl_list;
97 LIST_HEAD(, hv_sgl_node) free_sgl_list;
101 #define STORVSC_MAX_SG_PAGE_CNT STORVSC_MAX_IO_REQUESTS * HV_MAX_MULTIPAGE_BUFFER_COUNT
103 enum storvsc_request_type {
109 struct hv_storvsc_request {
110 LIST_ENTRY(hv_storvsc_request) link;
111 struct vstor_packet vstor_packet;
112 hv_vmbus_multipage_buffer data_buf;
114 uint8_t sense_info_len;
117 struct storvsc_softc *softc;
118 struct callout callout;
119 struct sema synch_sema; /*Synchronize the request/response if needed */
120 struct sglist *bounce_sgl;
121 unsigned int bounce_sgl_count;
122 uint64_t not_aligned_seg_bits;
125 struct storvsc_softc {
126 struct hv_device *hs_dev;
127 LIST_HEAD(, hv_storvsc_request) hs_free_list;
129 struct storvsc_driver_props *hs_drv_props;
132 struct cam_sim *hs_sim;
133 struct cam_path *hs_path;
134 uint32_t hs_num_out_reqs;
135 boolean_t hs_destroy;
136 boolean_t hs_drain_notify;
137 boolean_t hs_open_multi_channel;
138 struct sema hs_drain_sema;
139 struct hv_storvsc_request hs_init_req;
140 struct hv_storvsc_request hs_reset_req;
145 * HyperV storvsc timeout testing cases:
146 * a. IO returned after first timeout;
147 * b. IO returned after second timeout and queue freeze;
148 * c. IO returned while timer handler is running
149 * The first can be tested by "sg_senddiag -vv /dev/daX",
150 * and the second and third can be done by
151 * "sg_wr_mode -v -p 08 -c 0,1a -m 0,ff /dev/daX".
153 #define HVS_TIMEOUT_TEST 0
156 * Bus/adapter reset functionality on the Hyper-V host is
157 * buggy and it will be disabled until
158 * it can be further tested.
160 #define HVS_HOST_RESET 0
162 struct storvsc_driver_props {
165 uint8_t drv_max_luns_per_target;
166 uint8_t drv_max_ios_per_target;
167 uint32_t drv_ringbuffer_size;
170 enum hv_storage_type {
176 #define HS_MAX_ADAPTERS 10
178 #define HV_STORAGE_SUPPORTS_MULTI_CHANNEL 0x1
180 /* {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} */
181 static const hv_guid gStorVscDeviceType={
182 .data = {0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d,
183 0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f}
186 /* {32412632-86cb-44a2-9b5c-50d1417354f5} */
187 static const hv_guid gBlkVscDeviceType={
188 .data = {0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44,
189 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5}
192 static struct storvsc_driver_props g_drv_props_table[] = {
193 {"blkvsc", "Hyper-V IDE Storage Interface",
194 BLKVSC_MAX_IDE_DISKS_PER_TARGET, BLKVSC_MAX_IO_REQUESTS,
195 STORVSC_RINGBUFFER_SIZE},
196 {"storvsc", "Hyper-V SCSI Storage Interface",
197 STORVSC_MAX_LUNS_PER_TARGET, STORVSC_MAX_IO_REQUESTS,
198 STORVSC_RINGBUFFER_SIZE}
202 * Sense buffer size changed in win8; have a run-time
203 * variable to track the size we should use.
205 static int sense_buffer_size = PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE;
208 * The size of the vmscsi_request has changed in win8. The
209 * additional size is for the newly added elements in the
210 * structure. These elements are valid only when we are talking
212 * Track the correct size we need to apply.
214 static int vmscsi_size_delta;
216 * The storage protocol version is determined during the
217 * initial exchange with the host. It will indicate which
218 * storage functionality is available in the host.
220 static int vmstor_proto_version;
222 struct vmstor_proto {
224 int sense_buffer_size;
225 int vmscsi_size_delta;
228 static const struct vmstor_proto vmstor_proto_list[] = {
230 VMSTOR_PROTOCOL_VERSION_WIN10,
231 POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
235 VMSTOR_PROTOCOL_VERSION_WIN8_1,
236 POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
240 VMSTOR_PROTOCOL_VERSION_WIN8,
241 POST_WIN7_STORVSC_SENSE_BUFFER_SIZE,
245 VMSTOR_PROTOCOL_VERSION_WIN7,
246 PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE,
247 sizeof(struct vmscsi_win8_extension),
250 VMSTOR_PROTOCOL_VERSION_WIN6,
251 PRE_WIN8_STORVSC_SENSE_BUFFER_SIZE,
252 sizeof(struct vmscsi_win8_extension),
256 /* static functions */
257 static int storvsc_probe(device_t dev);
258 static int storvsc_attach(device_t dev);
259 static int storvsc_detach(device_t dev);
260 static void storvsc_poll(struct cam_sim * sim);
261 static void storvsc_action(struct cam_sim * sim, union ccb * ccb);
262 static int create_storvsc_request(union ccb *ccb, struct hv_storvsc_request *reqp);
263 static void storvsc_free_request(struct storvsc_softc *sc, struct hv_storvsc_request *reqp);
264 static enum hv_storage_type storvsc_get_storage_type(device_t dev);
265 static void hv_storvsc_rescan_target(struct storvsc_softc *sc);
266 static void hv_storvsc_on_channel_callback(void *context);
267 static void hv_storvsc_on_iocompletion( struct storvsc_softc *sc,
268 struct vstor_packet *vstor_packet,
269 struct hv_storvsc_request *request);
270 static int hv_storvsc_connect_vsp(struct hv_device *device);
271 static void storvsc_io_done(struct hv_storvsc_request *reqp);
272 static void storvsc_copy_sgl_to_bounce_buf(struct sglist *bounce_sgl,
273 bus_dma_segment_t *orig_sgl,
274 unsigned int orig_sgl_count,
276 void storvsc_copy_from_bounce_buf_to_sgl(bus_dma_segment_t *dest_sgl,
277 unsigned int dest_sgl_count,
278 struct sglist* src_sgl,
281 static device_method_t storvsc_methods[] = {
282 /* Device interface */
283 DEVMETHOD(device_probe, storvsc_probe),
284 DEVMETHOD(device_attach, storvsc_attach),
285 DEVMETHOD(device_detach, storvsc_detach),
286 DEVMETHOD(device_shutdown, bus_generic_shutdown),
290 static driver_t storvsc_driver = {
291 "storvsc", storvsc_methods, sizeof(struct storvsc_softc),
294 static devclass_t storvsc_devclass;
295 DRIVER_MODULE(storvsc, vmbus, storvsc_driver, storvsc_devclass, 0, 0);
296 MODULE_VERSION(storvsc, 1);
297 MODULE_DEPEND(storvsc, vmbus, 1, 1, 1);
301 * The host is capable of sending messages to us that are
302 * completely unsolicited. So, we need to address the race
303 * condition where we may be in the process of unloading the
304 * driver when the host may send us an unsolicited message.
305 * We address this issue by implementing a sequentially
306 * consistent protocol:
308 * 1. Channel callback is invoked while holding the the channel lock
309 * and an unloading driver will reset the channel callback under
310 * the protection of this channel lock.
312 * 2. To ensure bounded wait time for unloading a driver, we don't
313 * permit outgoing traffic once the device is marked as being
316 * 3. Once the device is marked as being destroyed, we only
317 * permit incoming traffic to properly account for
318 * packets already sent out.
320 static inline struct storvsc_softc *
321 get_stor_device(struct hv_device *device,
324 struct storvsc_softc *sc;
326 sc = device_get_softc(device->device);
333 * Here we permit outgoing I/O only
334 * if the device is not being destroyed.
337 if (sc->hs_destroy) {
342 * inbound case; if being destroyed
343 * only permit to account for
344 * messages already sent out.
346 if (sc->hs_destroy && (sc->hs_num_out_reqs == 0)) {
354 * @brief Callback handler, will be invoked when receive mutil-channel offer
356 * @param context new multi-channel
359 storvsc_handle_sc_creation(void *context)
361 hv_vmbus_channel *new_channel;
362 struct hv_device *device;
363 struct storvsc_softc *sc;
364 struct vmstor_chan_props props;
367 new_channel = (hv_vmbus_channel *)context;
368 device = new_channel->device;
369 sc = get_stor_device(device, TRUE);
373 if (FALSE == sc->hs_open_multi_channel)
376 memset(&props, 0, sizeof(props));
378 ret = hv_vmbus_channel_open(new_channel,
379 sc->hs_drv_props->drv_ringbuffer_size,
380 sc->hs_drv_props->drv_ringbuffer_size,
382 sizeof(struct vmstor_chan_props),
383 hv_storvsc_on_channel_callback,
390 * @brief Send multi-channel creation request to host
392 * @param device a Hyper-V device pointer
393 * @param max_chans the max channels supported by vmbus
396 storvsc_send_multichannel_request(struct hv_device *dev, int max_chans)
398 struct storvsc_softc *sc;
399 struct hv_storvsc_request *request;
400 struct vstor_packet *vstor_packet;
401 int request_channels_cnt = 0;
404 /* get multichannels count that need to create */
405 request_channels_cnt = MIN(max_chans, mp_ncpus);
407 sc = get_stor_device(dev, TRUE);
409 printf("Storvsc_error: get sc failed while send mutilchannel "
414 request = &sc->hs_init_req;
416 /* Establish a handler for multi-channel */
417 dev->channel->sc_creation_callback = storvsc_handle_sc_creation;
419 /* request the host to create multi-channel */
420 memset(request, 0, sizeof(struct hv_storvsc_request));
422 sema_init(&request->synch_sema, 0, ("stor_synch_sema"));
424 vstor_packet = &request->vstor_packet;
426 vstor_packet->operation = VSTOR_OPERATION_CREATE_MULTI_CHANNELS;
427 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
428 vstor_packet->u.multi_channels_cnt = request_channels_cnt;
430 ret = hv_vmbus_channel_send_packet(
434 (uint64_t)(uintptr_t)request,
435 HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
436 HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
438 /* wait for 5 seconds */
439 ret = sema_timedwait(&request->synch_sema, 5 * hz);
441 printf("Storvsc_error: create multi-channel timeout, %d\n",
446 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
447 vstor_packet->status != 0) {
448 printf("Storvsc_error: create multi-channel invalid operation "
449 "(%d) or statue (%u)\n",
450 vstor_packet->operation, vstor_packet->status);
454 sc->hs_open_multi_channel = TRUE;
457 printf("Storvsc create multi-channel success!\n");
461 * @brief initialize channel connection to parent partition
463 * @param dev a Hyper-V device pointer
464 * @returns 0 on success, non-zero error on failure
467 hv_storvsc_channel_init(struct hv_device *dev)
470 struct hv_storvsc_request *request;
471 struct vstor_packet *vstor_packet;
472 struct storvsc_softc *sc;
473 uint16_t max_chans = 0;
474 boolean_t support_multichannel = FALSE;
477 support_multichannel = FALSE;
479 sc = get_stor_device(dev, TRUE);
483 request = &sc->hs_init_req;
484 memset(request, 0, sizeof(struct hv_storvsc_request));
485 vstor_packet = &request->vstor_packet;
489 * Initiate the vsc/vsp initialization protocol on the open channel
491 sema_init(&request->synch_sema, 0, ("stor_synch_sema"));
493 vstor_packet->operation = VSTOR_OPERATION_BEGININITIALIZATION;
494 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
497 ret = hv_vmbus_channel_send_packet(
501 (uint64_t)(uintptr_t)request,
502 HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
503 HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
509 ret = sema_timedwait(&request->synch_sema, 5 * hz);
513 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
514 vstor_packet->status != 0) {
518 for (i = 0; i < nitems(vmstor_proto_list); i++) {
519 /* reuse the packet for version range supported */
521 memset(vstor_packet, 0, sizeof(struct vstor_packet));
522 vstor_packet->operation = VSTOR_OPERATION_QUERYPROTOCOLVERSION;
523 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
525 vstor_packet->u.version.major_minor =
526 vmstor_proto_list[i].proto_version;
528 /* revision is only significant for Windows guests */
529 vstor_packet->u.version.revision = 0;
531 ret = hv_vmbus_channel_send_packet(
535 (uint64_t)(uintptr_t)request,
536 HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
537 HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
543 ret = sema_timedwait(&request->synch_sema, 5 * hz);
548 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO) {
552 if (vstor_packet->status == 0) {
553 vmstor_proto_version =
554 vmstor_proto_list[i].proto_version;
556 vmstor_proto_list[i].sense_buffer_size;
558 vmstor_proto_list[i].vmscsi_size_delta;
563 if (vstor_packet->status != 0) {
568 * Query channel properties
570 memset(vstor_packet, 0, sizeof(struct vstor_packet));
571 vstor_packet->operation = VSTOR_OPERATION_QUERYPROPERTIES;
572 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
574 ret = hv_vmbus_channel_send_packet(
578 (uint64_t)(uintptr_t)request,
579 HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
580 HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
586 ret = sema_timedwait(&request->synch_sema, 5 * hz);
591 /* TODO: Check returned version */
592 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
593 vstor_packet->status != 0) {
597 /* multi-channels feature is supported by WIN8 and above version */
598 max_chans = vstor_packet->u.chan_props.max_channel_cnt;
599 if ((hv_vmbus_protocal_version != HV_VMBUS_VERSION_WIN7) &&
600 (hv_vmbus_protocal_version != HV_VMBUS_VERSION_WS2008) &&
601 (vstor_packet->u.chan_props.flags &
602 HV_STORAGE_SUPPORTS_MULTI_CHANNEL)) {
603 support_multichannel = TRUE;
606 memset(vstor_packet, 0, sizeof(struct vstor_packet));
607 vstor_packet->operation = VSTOR_OPERATION_ENDINITIALIZATION;
608 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
610 ret = hv_vmbus_channel_send_packet(
614 (uint64_t)(uintptr_t)request,
615 HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
616 HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
623 ret = sema_timedwait(&request->synch_sema, 5 * hz);
628 if (vstor_packet->operation != VSTOR_OPERATION_COMPLETEIO ||
629 vstor_packet->status != 0)
633 * If multi-channel is supported, send multichannel create
636 if (support_multichannel)
637 storvsc_send_multichannel_request(dev, max_chans);
640 sema_destroy(&request->synch_sema);
645 * @brief Open channel connection to paraent partition StorVSP driver
647 * Open and initialize channel connection to parent partition StorVSP driver.
649 * @param pointer to a Hyper-V device
650 * @returns 0 on success, non-zero error on failure
653 hv_storvsc_connect_vsp(struct hv_device *dev)
656 struct vmstor_chan_props props;
657 struct storvsc_softc *sc;
659 sc = device_get_softc(dev->device);
661 memset(&props, 0, sizeof(struct vmstor_chan_props));
667 ret = hv_vmbus_channel_open(
669 sc->hs_drv_props->drv_ringbuffer_size,
670 sc->hs_drv_props->drv_ringbuffer_size,
672 sizeof(struct vmstor_chan_props),
673 hv_storvsc_on_channel_callback,
680 ret = hv_storvsc_channel_init(dev);
687 hv_storvsc_host_reset(struct hv_device *dev)
690 struct storvsc_softc *sc;
692 struct hv_storvsc_request *request;
693 struct vstor_packet *vstor_packet;
695 sc = get_stor_device(dev, TRUE);
700 request = &sc->hs_reset_req;
702 vstor_packet = &request->vstor_packet;
704 sema_init(&request->synch_sema, 0, "stor synch sema");
706 vstor_packet->operation = VSTOR_OPERATION_RESETBUS;
707 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
709 ret = hv_vmbus_channel_send_packet(dev->channel,
712 (uint64_t)(uintptr_t)&sc->hs_reset_req,
713 HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
714 HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
720 ret = sema_timedwait(&request->synch_sema, 5 * hz); /* KYS 5 seconds */
728 * At this point, all outstanding requests in the adapter
729 * should have been flushed out and return to us
733 sema_destroy(&request->synch_sema);
736 #endif /* HVS_HOST_RESET */
739 * @brief Function to initiate an I/O request
741 * @param device Hyper-V device pointer
742 * @param request pointer to a request structure
743 * @returns 0 on success, non-zero error on failure
746 hv_storvsc_io_request(struct hv_device *device,
747 struct hv_storvsc_request *request)
749 struct storvsc_softc *sc;
750 struct vstor_packet *vstor_packet = &request->vstor_packet;
751 struct hv_vmbus_channel* outgoing_channel = NULL;
754 sc = get_stor_device(device, TRUE);
760 vstor_packet->flags |= REQUEST_COMPLETION_FLAG;
762 vstor_packet->u.vm_srb.length = VSTOR_PKT_SIZE;
764 vstor_packet->u.vm_srb.sense_info_len = sense_buffer_size;
766 vstor_packet->u.vm_srb.transfer_len = request->data_buf.length;
768 vstor_packet->operation = VSTOR_OPERATION_EXECUTESRB;
770 outgoing_channel = vmbus_select_outgoing_channel(device->channel);
772 mtx_unlock(&request->softc->hs_lock);
773 if (request->data_buf.length) {
774 ret = hv_vmbus_channel_send_packet_multipagebuffer(
779 (uint64_t)(uintptr_t)request);
782 ret = hv_vmbus_channel_send_packet(
786 (uint64_t)(uintptr_t)request,
787 HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
788 HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
790 mtx_lock(&request->softc->hs_lock);
793 printf("Unable to send packet %p ret %d", vstor_packet, ret);
795 atomic_add_int(&sc->hs_num_out_reqs, 1);
803 * Process IO_COMPLETION_OPERATION and ready
804 * the result to be completed for upper layer
805 * processing by the CAM layer.
808 hv_storvsc_on_iocompletion(struct storvsc_softc *sc,
809 struct vstor_packet *vstor_packet,
810 struct hv_storvsc_request *request)
812 struct vmscsi_req *vm_srb;
814 vm_srb = &vstor_packet->u.vm_srb;
816 if (((vm_srb->scsi_status & 0xFF) == SCSI_STATUS_CHECK_COND) &&
817 (vm_srb->srb_status & SRB_STATUS_AUTOSENSE_VALID)) {
818 /* Autosense data available */
820 KASSERT(vm_srb->sense_info_len <= request->sense_info_len,
821 ("vm_srb->sense_info_len <= "
822 "request->sense_info_len"));
824 memcpy(request->sense_data, vm_srb->u.sense_data,
825 vm_srb->sense_info_len);
827 request->sense_info_len = vm_srb->sense_info_len;
830 /* Complete request by passing to the CAM layer */
831 storvsc_io_done(request);
832 atomic_subtract_int(&sc->hs_num_out_reqs, 1);
833 if (sc->hs_drain_notify && (sc->hs_num_out_reqs == 0)) {
834 sema_post(&sc->hs_drain_sema);
839 hv_storvsc_rescan_target(struct storvsc_softc *sc)
842 target_id_t targetid;
845 pathid = cam_sim_path(sc->hs_sim);
846 targetid = CAM_TARGET_WILDCARD;
849 * Allocate a CCB and schedule a rescan.
851 ccb = xpt_alloc_ccb_nowait();
853 printf("unable to alloc CCB for rescan\n");
857 if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, targetid,
858 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
859 printf("unable to create path for rescan, pathid: %u,"
860 "targetid: %u\n", pathid, targetid);
865 if (targetid == CAM_TARGET_WILDCARD)
866 ccb->ccb_h.func_code = XPT_SCAN_BUS;
868 ccb->ccb_h.func_code = XPT_SCAN_TGT;
874 hv_storvsc_on_channel_callback(void *context)
877 hv_vmbus_channel *channel = (hv_vmbus_channel *)context;
878 struct hv_device *device = NULL;
879 struct storvsc_softc *sc;
880 uint32_t bytes_recvd;
882 uint8_t packet[roundup2(sizeof(struct vstor_packet), 8)];
883 struct hv_storvsc_request *request;
884 struct vstor_packet *vstor_packet;
886 device = channel->device;
887 KASSERT(device, ("device is NULL"));
889 sc = get_stor_device(device, FALSE);
891 printf("Storvsc_error: get stor device failed.\n");
895 ret = hv_vmbus_channel_recv_packet(
898 roundup2(VSTOR_PKT_SIZE, 8),
902 while ((ret == 0) && (bytes_recvd > 0)) {
903 request = (struct hv_storvsc_request *)(uintptr_t)request_id;
905 if ((request == &sc->hs_init_req) ||
906 (request == &sc->hs_reset_req)) {
907 memcpy(&request->vstor_packet, packet,
908 sizeof(struct vstor_packet));
909 sema_post(&request->synch_sema);
911 vstor_packet = (struct vstor_packet *)packet;
912 switch(vstor_packet->operation) {
913 case VSTOR_OPERATION_COMPLETEIO:
915 panic("VMBUS: storvsc received a "
916 "packet with NULL request id in "
917 "COMPLETEIO operation.");
919 hv_storvsc_on_iocompletion(sc,
920 vstor_packet, request);
922 case VSTOR_OPERATION_REMOVEDEVICE:
923 printf("VMBUS: storvsc operation %d not "
924 "implemented.\n", vstor_packet->operation);
925 /* TODO: implement */
927 case VSTOR_OPERATION_ENUMERATE_BUS:
928 hv_storvsc_rescan_target(sc);
934 ret = hv_vmbus_channel_recv_packet(
937 roundup2(VSTOR_PKT_SIZE, 8),
944 * @brief StorVSC probe function
946 * Device probe function. Returns 0 if the input device is a StorVSC
947 * device. Otherwise, a ENXIO is returned. If the input device is
948 * for BlkVSC (paravirtual IDE) device and this support is disabled in
949 * favor of the emulated ATA/IDE device, return ENXIO.
952 * @returns 0 on success, ENXIO if not a matcing StorVSC device
955 storvsc_probe(device_t dev)
957 int ata_disk_enable = 0;
960 switch (storvsc_get_storage_type(dev)) {
963 device_printf(dev, "DRIVER_BLKVSC-Emulated ATA/IDE probe\n");
964 if (!getenv_int("hw.ata.disk_enable", &ata_disk_enable)) {
967 "Enlightened ATA/IDE detected\n");
968 ret = BUS_PROBE_DEFAULT;
969 } else if(bootverbose)
970 device_printf(dev, "Emulated ATA/IDE set (hw.ata.disk_enable set)\n");
974 device_printf(dev, "Enlightened SCSI device detected\n");
975 ret = BUS_PROBE_DEFAULT;
984 * @brief StorVSC attach function
986 * Function responsible for allocating per-device structures,
987 * setting up CAM interfaces and scanning for available LUNs to
988 * be used for SCSI device peripherals.
991 * @returns 0 on success or an error on failure
994 storvsc_attach(device_t dev)
996 struct hv_device *hv_dev = vmbus_get_devctx(dev);
997 enum hv_storage_type stor_type;
998 struct storvsc_softc *sc;
999 struct cam_devq *devq;
1001 struct hv_storvsc_request *reqp;
1002 struct root_hold_token *root_mount_token = NULL;
1003 struct hv_sgl_node *sgl_node = NULL;
1004 void *tmp_buff = NULL;
1007 * We need to serialize storvsc attach calls.
1009 root_mount_token = root_mount_hold("storvsc");
1011 sc = device_get_softc(dev);
1017 stor_type = storvsc_get_storage_type(dev);
1019 if (stor_type == DRIVER_UNKNOWN) {
1024 bzero(sc, sizeof(struct storvsc_softc));
1026 /* fill in driver specific properties */
1027 sc->hs_drv_props = &g_drv_props_table[stor_type];
1029 /* fill in device specific properties */
1030 sc->hs_unit = device_get_unit(dev);
1031 sc->hs_dev = hv_dev;
1032 device_set_desc(dev, g_drv_props_table[stor_type].drv_desc);
1034 LIST_INIT(&sc->hs_free_list);
1035 mtx_init(&sc->hs_lock, "hvslck", NULL, MTX_DEF);
1037 for (i = 0; i < sc->hs_drv_props->drv_max_ios_per_target; ++i) {
1038 reqp = malloc(sizeof(struct hv_storvsc_request),
1039 M_DEVBUF, M_WAITOK|M_ZERO);
1042 LIST_INSERT_HEAD(&sc->hs_free_list, reqp, link);
1045 /* create sg-list page pool */
1046 if (FALSE == g_hv_sgl_page_pool.is_init) {
1047 g_hv_sgl_page_pool.is_init = TRUE;
1048 LIST_INIT(&g_hv_sgl_page_pool.in_use_sgl_list);
1049 LIST_INIT(&g_hv_sgl_page_pool.free_sgl_list);
1052 * Pre-create SG list, each SG list with
1053 * HV_MAX_MULTIPAGE_BUFFER_COUNT segments, each
1054 * segment has one page buffer
1056 for (i = 0; i < STORVSC_MAX_IO_REQUESTS; i++) {
1057 sgl_node = malloc(sizeof(struct hv_sgl_node),
1058 M_DEVBUF, M_WAITOK|M_ZERO);
1060 sgl_node->sgl_data =
1061 sglist_alloc(HV_MAX_MULTIPAGE_BUFFER_COUNT,
1064 for (j = 0; j < HV_MAX_MULTIPAGE_BUFFER_COUNT; j++) {
1065 tmp_buff = malloc(PAGE_SIZE,
1066 M_DEVBUF, M_WAITOK|M_ZERO);
1068 sgl_node->sgl_data->sg_segs[j].ss_paddr =
1069 (vm_paddr_t)tmp_buff;
1072 LIST_INSERT_HEAD(&g_hv_sgl_page_pool.free_sgl_list,
1077 sc->hs_destroy = FALSE;
1078 sc->hs_drain_notify = FALSE;
1079 sc->hs_open_multi_channel = FALSE;
1080 sema_init(&sc->hs_drain_sema, 0, "Store Drain Sema");
1082 ret = hv_storvsc_connect_vsp(hv_dev);
1088 * Create the device queue.
1089 * Hyper-V maps each target to one SCSI HBA
1091 devq = cam_simq_alloc(sc->hs_drv_props->drv_max_ios_per_target);
1093 device_printf(dev, "Failed to alloc device queue\n");
1098 sc->hs_sim = cam_sim_alloc(storvsc_action,
1100 sc->hs_drv_props->drv_name,
1104 sc->hs_drv_props->drv_max_ios_per_target,
1107 if (sc->hs_sim == NULL) {
1108 device_printf(dev, "Failed to alloc sim\n");
1109 cam_simq_free(devq);
1114 mtx_lock(&sc->hs_lock);
1115 /* bus_id is set to 0, need to get it from VMBUS channel query? */
1116 if (xpt_bus_register(sc->hs_sim, dev, 0) != CAM_SUCCESS) {
1117 cam_sim_free(sc->hs_sim, /*free_devq*/TRUE);
1118 mtx_unlock(&sc->hs_lock);
1119 device_printf(dev, "Unable to register SCSI bus\n");
1124 if (xpt_create_path(&sc->hs_path, /*periph*/NULL,
1125 cam_sim_path(sc->hs_sim),
1126 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1127 xpt_bus_deregister(cam_sim_path(sc->hs_sim));
1128 cam_sim_free(sc->hs_sim, /*free_devq*/TRUE);
1129 mtx_unlock(&sc->hs_lock);
1130 device_printf(dev, "Unable to create path\n");
1135 mtx_unlock(&sc->hs_lock);
1137 root_mount_rel(root_mount_token);
1142 root_mount_rel(root_mount_token);
1143 while (!LIST_EMPTY(&sc->hs_free_list)) {
1144 reqp = LIST_FIRST(&sc->hs_free_list);
1145 LIST_REMOVE(reqp, link);
1146 free(reqp, M_DEVBUF);
1149 while (!LIST_EMPTY(&g_hv_sgl_page_pool.free_sgl_list)) {
1150 sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.free_sgl_list);
1151 LIST_REMOVE(sgl_node, link);
1152 for (j = 0; j < HV_MAX_MULTIPAGE_BUFFER_COUNT; j++) {
1154 (void*)sgl_node->sgl_data->sg_segs[j].ss_paddr) {
1155 free((void*)sgl_node->sgl_data->sg_segs[j].ss_paddr, M_DEVBUF);
1158 sglist_free(sgl_node->sgl_data);
1159 free(sgl_node, M_DEVBUF);
1166 * @brief StorVSC device detach function
1168 * This function is responsible for safely detaching a
1169 * StorVSC device. This includes waiting for inbound responses
1170 * to complete and freeing associated per-device structures.
1172 * @param dev a device
1173 * returns 0 on success
1176 storvsc_detach(device_t dev)
1178 struct storvsc_softc *sc = device_get_softc(dev);
1179 struct hv_storvsc_request *reqp = NULL;
1180 struct hv_device *hv_device = vmbus_get_devctx(dev);
1181 struct hv_sgl_node *sgl_node = NULL;
1184 sc->hs_destroy = TRUE;
1187 * At this point, all outbound traffic should be disabled. We
1188 * only allow inbound traffic (responses) to proceed so that
1189 * outstanding requests can be completed.
1192 sc->hs_drain_notify = TRUE;
1193 sema_wait(&sc->hs_drain_sema);
1194 sc->hs_drain_notify = FALSE;
1197 * Since we have already drained, we don't need to busy wait.
1198 * The call to close the channel will reset the callback
1199 * under the protection of the incoming channel lock.
1202 hv_vmbus_channel_close(hv_device->channel);
1204 mtx_lock(&sc->hs_lock);
1205 while (!LIST_EMPTY(&sc->hs_free_list)) {
1206 reqp = LIST_FIRST(&sc->hs_free_list);
1207 LIST_REMOVE(reqp, link);
1209 free(reqp, M_DEVBUF);
1211 mtx_unlock(&sc->hs_lock);
1213 while (!LIST_EMPTY(&g_hv_sgl_page_pool.free_sgl_list)) {
1214 sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.free_sgl_list);
1215 LIST_REMOVE(sgl_node, link);
1216 for (j = 0; j < HV_MAX_MULTIPAGE_BUFFER_COUNT; j++){
1218 (void*)sgl_node->sgl_data->sg_segs[j].ss_paddr) {
1219 free((void*)sgl_node->sgl_data->sg_segs[j].ss_paddr, M_DEVBUF);
1222 sglist_free(sgl_node->sgl_data);
1223 free(sgl_node, M_DEVBUF);
1229 #if HVS_TIMEOUT_TEST
1231 * @brief unit test for timed out operations
1233 * This function provides unit testing capability to simulate
1234 * timed out operations. Recompilation with HV_TIMEOUT_TEST=1
1237 * @param reqp pointer to a request structure
1238 * @param opcode SCSI operation being performed
1239 * @param wait if 1, wait for I/O to complete
1242 storvsc_timeout_test(struct hv_storvsc_request *reqp,
1243 uint8_t opcode, int wait)
1246 union ccb *ccb = reqp->ccb;
1247 struct storvsc_softc *sc = reqp->softc;
1249 if (reqp->vstor_packet.vm_srb.cdb[0] != opcode) {
1254 mtx_lock(&reqp->event.mtx);
1256 ret = hv_storvsc_io_request(sc->hs_dev, reqp);
1259 mtx_unlock(&reqp->event.mtx);
1261 printf("%s: io_request failed with %d.\n",
1263 ccb->ccb_h.status = CAM_PROVIDE_FAIL;
1264 mtx_lock(&sc->hs_lock);
1265 storvsc_free_request(sc, reqp);
1267 mtx_unlock(&sc->hs_lock);
1272 xpt_print(ccb->ccb_h.path,
1273 "%u: %s: waiting for IO return.\n",
1275 ret = cv_timedwait(&reqp->event.cv, &reqp->event.mtx, 60*hz);
1276 mtx_unlock(&reqp->event.mtx);
1277 xpt_print(ccb->ccb_h.path, "%u: %s: %s.\n",
1278 ticks, __func__, (ret == 0)?
1279 "IO return detected" :
1280 "IO return not detected");
1282 * Now both the timer handler and io done are running
1283 * simultaneously. We want to confirm the io done always
1284 * finishes after the timer handler exits. So reqp used by
1285 * timer handler is not freed or stale. Do busy loop for
1286 * another 1/10 second to make sure io done does
1287 * wait for the timer handler to complete.
1290 mtx_lock(&sc->hs_lock);
1291 xpt_print(ccb->ccb_h.path,
1292 "%u: %s: finishing, queue frozen %d, "
1293 "ccb status 0x%x scsi_status 0x%x.\n",
1294 ticks, __func__, sc->hs_frozen,
1296 ccb->csio.scsi_status);
1297 mtx_unlock(&sc->hs_lock);
1300 #endif /* HVS_TIMEOUT_TEST */
1304 * @brief timeout handler for requests
1306 * This function is called as a result of a callout expiring.
1308 * @param arg pointer to a request
1311 storvsc_timeout(void *arg)
1313 struct hv_storvsc_request *reqp = arg;
1314 struct storvsc_softc *sc = reqp->softc;
1315 union ccb *ccb = reqp->ccb;
1317 if (reqp->retries == 0) {
1318 mtx_lock(&sc->hs_lock);
1319 xpt_print(ccb->ccb_h.path,
1320 "%u: IO timed out (req=0x%p), wait for another %u secs.\n",
1321 ticks, reqp, ccb->ccb_h.timeout / 1000);
1322 cam_error_print(ccb, CAM_ESF_ALL, CAM_EPF_ALL);
1323 mtx_unlock(&sc->hs_lock);
1326 callout_reset_sbt(&reqp->callout, SBT_1MS * ccb->ccb_h.timeout,
1327 0, storvsc_timeout, reqp, 0);
1328 #if HVS_TIMEOUT_TEST
1329 storvsc_timeout_test(reqp, SEND_DIAGNOSTIC, 0);
1334 mtx_lock(&sc->hs_lock);
1335 xpt_print(ccb->ccb_h.path,
1336 "%u: IO (reqp = 0x%p) did not return for %u seconds, %s.\n",
1337 ticks, reqp, ccb->ccb_h.timeout * (reqp->retries+1) / 1000,
1338 (sc->hs_frozen == 0)?
1339 "freezing the queue" : "the queue is already frozen");
1340 if (sc->hs_frozen == 0) {
1342 xpt_freeze_simq(xpt_path_sim(ccb->ccb_h.path), 1);
1344 mtx_unlock(&sc->hs_lock);
1346 #if HVS_TIMEOUT_TEST
1347 storvsc_timeout_test(reqp, MODE_SELECT_10, 1);
1353 * @brief StorVSC device poll function
1355 * This function is responsible for servicing requests when
1356 * interrupts are disabled (i.e when we are dumping core.)
1358 * @param sim a pointer to a CAM SCSI interface module
1361 storvsc_poll(struct cam_sim *sim)
1363 struct storvsc_softc *sc = cam_sim_softc(sim);
1365 mtx_assert(&sc->hs_lock, MA_OWNED);
1366 mtx_unlock(&sc->hs_lock);
1367 hv_storvsc_on_channel_callback(sc->hs_dev->channel);
1368 mtx_lock(&sc->hs_lock);
1372 * @brief StorVSC device action function
1374 * This function is responsible for handling SCSI operations which
1375 * are passed from the CAM layer. The requests are in the form of
1376 * CAM control blocks which indicate the action being performed.
1377 * Not all actions require converting the request to a VSCSI protocol
1378 * message - these actions can be responded to by this driver.
1379 * Requests which are destined for a backend storage device are converted
1380 * to a VSCSI protocol message and sent on the channel connection associated
1383 * @param sim pointer to a CAM SCSI interface module
1384 * @param ccb pointer to a CAM control block
1387 storvsc_action(struct cam_sim *sim, union ccb *ccb)
1389 struct storvsc_softc *sc = cam_sim_softc(sim);
1392 mtx_assert(&sc->hs_lock, MA_OWNED);
1393 switch (ccb->ccb_h.func_code) {
1394 case XPT_PATH_INQ: {
1395 struct ccb_pathinq *cpi = &ccb->cpi;
1397 cpi->version_num = 1;
1398 cpi->hba_inquiry = PI_TAG_ABLE|PI_SDTR_ABLE;
1399 cpi->target_sprt = 0;
1400 cpi->hba_misc = PIM_NOBUSRESET;
1401 cpi->hba_eng_cnt = 0;
1402 cpi->max_target = STORVSC_MAX_TARGETS;
1403 cpi->max_lun = sc->hs_drv_props->drv_max_luns_per_target;
1404 cpi->initiator_id = cpi->max_target;
1405 cpi->bus_id = cam_sim_bus(sim);
1406 cpi->base_transfer_speed = 300000;
1407 cpi->transport = XPORT_SAS;
1408 cpi->transport_version = 0;
1409 cpi->protocol = PROTO_SCSI;
1410 cpi->protocol_version = SCSI_REV_SPC2;
1411 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1412 strncpy(cpi->hba_vid, sc->hs_drv_props->drv_name, HBA_IDLEN);
1413 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1414 cpi->unit_number = cam_sim_unit(sim);
1416 ccb->ccb_h.status = CAM_REQ_CMP;
1420 case XPT_GET_TRAN_SETTINGS: {
1421 struct ccb_trans_settings *cts = &ccb->cts;
1423 cts->transport = XPORT_SAS;
1424 cts->transport_version = 0;
1425 cts->protocol = PROTO_SCSI;
1426 cts->protocol_version = SCSI_REV_SPC2;
1428 /* enable tag queuing and disconnected mode */
1429 cts->proto_specific.valid = CTS_SCSI_VALID_TQ;
1430 cts->proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
1431 cts->proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
1432 cts->xport_specific.valid = CTS_SPI_VALID_DISC;
1433 cts->xport_specific.spi.flags = CTS_SPI_FLAGS_DISC_ENB;
1435 ccb->ccb_h.status = CAM_REQ_CMP;
1439 case XPT_SET_TRAN_SETTINGS: {
1440 ccb->ccb_h.status = CAM_REQ_CMP;
1444 case XPT_CALC_GEOMETRY:{
1445 cam_calc_geometry(&ccb->ccg, 1);
1450 case XPT_RESET_DEV:{
1452 if ((res = hv_storvsc_host_reset(sc->hs_dev)) != 0) {
1453 xpt_print(ccb->ccb_h.path,
1454 "hv_storvsc_host_reset failed with %d\n", res);
1455 ccb->ccb_h.status = CAM_PROVIDE_FAIL;
1459 ccb->ccb_h.status = CAM_REQ_CMP;
1463 xpt_print(ccb->ccb_h.path,
1464 "%s reset not supported.\n",
1465 (ccb->ccb_h.func_code == XPT_RESET_BUS)?
1467 ccb->ccb_h.status = CAM_REQ_INVALID;
1470 #endif /* HVS_HOST_RESET */
1473 case XPT_IMMED_NOTIFY: {
1474 struct hv_storvsc_request *reqp = NULL;
1476 if (ccb->csio.cdb_len == 0) {
1477 panic("cdl_len is 0\n");
1480 if (LIST_EMPTY(&sc->hs_free_list)) {
1481 ccb->ccb_h.status = CAM_REQUEUE_REQ;
1482 if (sc->hs_frozen == 0) {
1484 xpt_freeze_simq(sim, /* count*/1);
1490 reqp = LIST_FIRST(&sc->hs_free_list);
1491 LIST_REMOVE(reqp, link);
1493 bzero(reqp, sizeof(struct hv_storvsc_request));
1496 ccb->ccb_h.status |= CAM_SIM_QUEUED;
1497 if ((res = create_storvsc_request(ccb, reqp)) != 0) {
1498 ccb->ccb_h.status = CAM_REQ_INVALID;
1504 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1505 callout_init(&reqp->callout, CALLOUT_MPSAFE);
1506 callout_reset_sbt(&reqp->callout,
1507 SBT_1MS * ccb->ccb_h.timeout, 0,
1508 storvsc_timeout, reqp, 0);
1509 #if HVS_TIMEOUT_TEST
1510 cv_init(&reqp->event.cv, "storvsc timeout cv");
1511 mtx_init(&reqp->event.mtx, "storvsc timeout mutex",
1513 switch (reqp->vstor_packet.vm_srb.cdb[0]) {
1514 case MODE_SELECT_10:
1515 case SEND_DIAGNOSTIC:
1516 /* To have timer send the request. */
1521 #endif /* HVS_TIMEOUT_TEST */
1525 if ((res = hv_storvsc_io_request(sc->hs_dev, reqp)) != 0) {
1526 xpt_print(ccb->ccb_h.path,
1527 "hv_storvsc_io_request failed with %d\n", res);
1528 ccb->ccb_h.status = CAM_PROVIDE_FAIL;
1529 storvsc_free_request(sc, reqp);
1537 ccb->ccb_h.status = CAM_REQ_INVALID;
1544 * @brief destroy bounce buffer
1546 * This function is responsible for destroy a Scatter/Gather list
1547 * that create by storvsc_create_bounce_buffer()
1549 * @param sgl- the Scatter/Gather need be destroy
1550 * @param sg_count- page count of the SG list.
1554 storvsc_destroy_bounce_buffer(struct sglist *sgl)
1556 struct hv_sgl_node *sgl_node = NULL;
1557 if (LIST_EMPTY(&g_hv_sgl_page_pool.in_use_sgl_list)) {
1558 printf("storvsc error: not enough in use sgl\n");
1561 sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.in_use_sgl_list);
1562 LIST_REMOVE(sgl_node, link);
1563 sgl_node->sgl_data = sgl;
1564 LIST_INSERT_HEAD(&g_hv_sgl_page_pool.free_sgl_list, sgl_node, link);
1568 * @brief create bounce buffer
1570 * This function is responsible for create a Scatter/Gather list,
1571 * which hold several pages that can be aligned with page size.
1573 * @param seg_count- SG-list segments count
1574 * @param write - if WRITE_TYPE, set SG list page used size to 0,
1575 * otherwise set used size to page size.
1577 * return NULL if create failed
1579 static struct sglist *
1580 storvsc_create_bounce_buffer(uint16_t seg_count, int write)
1583 struct sglist *bounce_sgl = NULL;
1584 unsigned int buf_len = ((write == WRITE_TYPE) ? 0 : PAGE_SIZE);
1585 struct hv_sgl_node *sgl_node = NULL;
1587 /* get struct sglist from free_sgl_list */
1588 if (LIST_EMPTY(&g_hv_sgl_page_pool.free_sgl_list)) {
1589 printf("storvsc error: not enough free sgl\n");
1592 sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.free_sgl_list);
1593 LIST_REMOVE(sgl_node, link);
1594 bounce_sgl = sgl_node->sgl_data;
1595 LIST_INSERT_HEAD(&g_hv_sgl_page_pool.in_use_sgl_list, sgl_node, link);
1597 bounce_sgl->sg_maxseg = seg_count;
1599 if (write == WRITE_TYPE)
1600 bounce_sgl->sg_nseg = 0;
1602 bounce_sgl->sg_nseg = seg_count;
1604 for (i = 0; i < seg_count; i++)
1605 bounce_sgl->sg_segs[i].ss_len = buf_len;
1611 * @brief copy data from SG list to bounce buffer
1613 * This function is responsible for copy data from one SG list's segments
1614 * to another SG list which used as bounce buffer.
1616 * @param bounce_sgl - the destination SG list
1617 * @param orig_sgl - the segment of the source SG list.
1618 * @param orig_sgl_count - the count of segments.
1619 * @param orig_sgl_count - indicate which segment need bounce buffer,
1624 storvsc_copy_sgl_to_bounce_buf(struct sglist *bounce_sgl,
1625 bus_dma_segment_t *orig_sgl,
1626 unsigned int orig_sgl_count,
1629 int src_sgl_idx = 0;
1631 for (src_sgl_idx = 0; src_sgl_idx < orig_sgl_count; src_sgl_idx++) {
1632 if (seg_bits & (1 << src_sgl_idx)) {
1633 memcpy((void*)bounce_sgl->sg_segs[src_sgl_idx].ss_paddr,
1634 (void*)orig_sgl[src_sgl_idx].ds_addr,
1635 orig_sgl[src_sgl_idx].ds_len);
1637 bounce_sgl->sg_segs[src_sgl_idx].ss_len =
1638 orig_sgl[src_sgl_idx].ds_len;
1644 * @brief copy data from SG list which used as bounce to another SG list
1646 * This function is responsible for copy data from one SG list with bounce
1647 * buffer to another SG list's segments.
1649 * @param dest_sgl - the destination SG list's segments
1650 * @param dest_sgl_count - the count of destination SG list's segment.
1651 * @param src_sgl - the source SG list.
1652 * @param seg_bits - indicate which segment used bounce buffer of src SG-list.
1656 storvsc_copy_from_bounce_buf_to_sgl(bus_dma_segment_t *dest_sgl,
1657 unsigned int dest_sgl_count,
1658 struct sglist* src_sgl,
1663 for (sgl_idx = 0; sgl_idx < dest_sgl_count; sgl_idx++) {
1664 if (seg_bits & (1 << sgl_idx)) {
1665 memcpy((void*)(dest_sgl[sgl_idx].ds_addr),
1666 (void*)(src_sgl->sg_segs[sgl_idx].ss_paddr),
1667 src_sgl->sg_segs[sgl_idx].ss_len);
1673 * @brief check SG list with bounce buffer or not
1675 * This function is responsible for check if need bounce buffer for SG list.
1677 * @param sgl - the SG list's segments
1678 * @param sg_count - the count of SG list's segment.
1679 * @param bits - segmengs number that need bounce buffer
1681 * return -1 if SG list needless bounce buffer
1684 storvsc_check_bounce_buffer_sgl(bus_dma_segment_t *sgl,
1685 unsigned int sg_count,
1690 uint64_t phys_addr = 0;
1691 uint64_t tmp_bits = 0;
1692 boolean_t found_hole = FALSE;
1693 boolean_t pre_aligned = TRUE;
1701 phys_addr = vtophys(sgl[0].ds_addr);
1702 offset = phys_addr - trunc_page(phys_addr);
1705 pre_aligned = FALSE;
1709 for (i = 1; i < sg_count; i++) {
1710 phys_addr = vtophys(sgl[i].ds_addr);
1711 offset = phys_addr - trunc_page(phys_addr);
1714 if (FALSE == pre_aligned){
1716 * This segment is aligned, if the previous
1717 * one is not aligned, find a hole
1725 if (phys_addr != vtophys(sgl[i-1].ds_addr +
1728 * Check whether connect to previous
1729 * segment,if not, find the hole
1736 pre_aligned = FALSE;
1749 * @brief Fill in a request structure based on a CAM control block
1751 * Fills in a request structure based on the contents of a CAM control
1752 * block. The request structure holds the payload information for
1753 * VSCSI protocol request.
1755 * @param ccb pointer to a CAM contorl block
1756 * @param reqp pointer to a request structure
1759 create_storvsc_request(union ccb *ccb, struct hv_storvsc_request *reqp)
1761 struct ccb_scsiio *csio = &ccb->csio;
1763 uint32_t bytes_to_copy = 0;
1764 uint32_t pfn_num = 0;
1766 uint64_t not_aligned_seg_bits = 0;
1768 /* refer to struct vmscsi_req for meanings of these two fields */
1769 reqp->vstor_packet.u.vm_srb.port =
1770 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path));
1771 reqp->vstor_packet.u.vm_srb.path_id =
1772 cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
1774 reqp->vstor_packet.u.vm_srb.target_id = ccb->ccb_h.target_id;
1775 reqp->vstor_packet.u.vm_srb.lun = ccb->ccb_h.target_lun;
1777 reqp->vstor_packet.u.vm_srb.cdb_len = csio->cdb_len;
1778 if(ccb->ccb_h.flags & CAM_CDB_POINTER) {
1779 memcpy(&reqp->vstor_packet.u.vm_srb.u.cdb, csio->cdb_io.cdb_ptr,
1782 memcpy(&reqp->vstor_packet.u.vm_srb.u.cdb, csio->cdb_io.cdb_bytes,
1786 switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
1788 reqp->vstor_packet.u.vm_srb.data_in = WRITE_TYPE;
1791 reqp->vstor_packet.u.vm_srb.data_in = READ_TYPE;
1794 reqp->vstor_packet.u.vm_srb.data_in = UNKNOWN_TYPE;
1797 reqp->vstor_packet.u.vm_srb.data_in = UNKNOWN_TYPE;
1801 reqp->sense_data = &csio->sense_data;
1802 reqp->sense_info_len = csio->sense_len;
1806 if (0 == csio->dxfer_len) {
1810 reqp->data_buf.length = csio->dxfer_len;
1812 switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
1813 case CAM_DATA_VADDR:
1815 bytes_to_copy = csio->dxfer_len;
1816 phys_addr = vtophys(csio->data_ptr);
1817 reqp->data_buf.offset = phys_addr & PAGE_MASK;
1819 while (bytes_to_copy != 0) {
1820 int bytes, page_offset;
1822 vtophys(&csio->data_ptr[reqp->data_buf.length -
1824 pfn = phys_addr >> PAGE_SHIFT;
1825 reqp->data_buf.pfn_array[pfn_num] = pfn;
1826 page_offset = phys_addr & PAGE_MASK;
1828 bytes = min(PAGE_SIZE - page_offset, bytes_to_copy);
1830 bytes_to_copy -= bytes;
1842 bus_dma_segment_t *storvsc_sglist =
1843 (bus_dma_segment_t *)ccb->csio.data_ptr;
1844 u_int16_t storvsc_sg_count = ccb->csio.sglist_cnt;
1846 printf("Storvsc: get SG I/O operation, %d\n",
1847 reqp->vstor_packet.u.vm_srb.data_in);
1849 if (storvsc_sg_count > HV_MAX_MULTIPAGE_BUFFER_COUNT){
1850 printf("Storvsc: %d segments is too much, "
1851 "only support %d segments\n",
1852 storvsc_sg_count, HV_MAX_MULTIPAGE_BUFFER_COUNT);
1857 * We create our own bounce buffer function currently. Idealy
1858 * we should use BUS_DMA(9) framework. But with current BUS_DMA
1859 * code there is no callback API to check the page alignment of
1860 * middle segments before busdma can decide if a bounce buffer
1861 * is needed for particular segment. There is callback,
1862 * "bus_dma_filter_t *filter", but the parrameters are not
1863 * sufficient for storvsc driver.
1865 * Add page alignment check in BUS_DMA(9) callback. Once
1866 * this is complete, switch the following code to use
1867 * BUS_DMA(9) for storvsc bounce buffer support.
1869 /* check if we need to create bounce buffer */
1870 ret = storvsc_check_bounce_buffer_sgl(storvsc_sglist,
1871 storvsc_sg_count, ¬_aligned_seg_bits);
1874 storvsc_create_bounce_buffer(storvsc_sg_count,
1875 reqp->vstor_packet.u.vm_srb.data_in);
1876 if (NULL == reqp->bounce_sgl) {
1877 printf("Storvsc_error: "
1878 "create bounce buffer failed.\n");
1882 reqp->bounce_sgl_count = storvsc_sg_count;
1883 reqp->not_aligned_seg_bits = not_aligned_seg_bits;
1886 * if it is write, we need copy the original data
1889 if (WRITE_TYPE == reqp->vstor_packet.u.vm_srb.data_in) {
1890 storvsc_copy_sgl_to_bounce_buf(
1894 reqp->not_aligned_seg_bits);
1897 /* transfer virtual address to physical frame number */
1898 if (reqp->not_aligned_seg_bits & 0x1){
1900 vtophys(reqp->bounce_sgl->sg_segs[0].ss_paddr);
1903 vtophys(storvsc_sglist[0].ds_addr);
1905 reqp->data_buf.offset = phys_addr & PAGE_MASK;
1907 pfn = phys_addr >> PAGE_SHIFT;
1908 reqp->data_buf.pfn_array[0] = pfn;
1910 for (i = 1; i < storvsc_sg_count; i++) {
1911 if (reqp->not_aligned_seg_bits & (1 << i)) {
1913 vtophys(reqp->bounce_sgl->sg_segs[i].ss_paddr);
1916 vtophys(storvsc_sglist[i].ds_addr);
1919 pfn = phys_addr >> PAGE_SHIFT;
1920 reqp->data_buf.pfn_array[i] = pfn;
1923 phys_addr = vtophys(storvsc_sglist[0].ds_addr);
1925 reqp->data_buf.offset = phys_addr & PAGE_MASK;
1927 for (i = 0; i < storvsc_sg_count; i++) {
1928 phys_addr = vtophys(storvsc_sglist[i].ds_addr);
1929 pfn = phys_addr >> PAGE_SHIFT;
1930 reqp->data_buf.pfn_array[i] = pfn;
1933 /* check the last segment cross boundary or not */
1934 offset = phys_addr & PAGE_MASK;
1937 vtophys(storvsc_sglist[i-1].ds_addr +
1938 PAGE_SIZE - offset);
1939 pfn = phys_addr >> PAGE_SHIFT;
1940 reqp->data_buf.pfn_array[i] = pfn;
1943 reqp->bounce_sgl_count = 0;
1948 printf("Unknow flags: %d\n", ccb->ccb_h.flags);
1956 * Modified based on scsi_print_inquiry which is responsible to
1957 * print the detail information for scsi_inquiry_data.
1959 * Return 1 if it is valid, 0 otherwise.
1962 is_inquiry_valid(const struct scsi_inquiry_data *inq_data)
1965 char vendor[16], product[48], revision[16];
1968 * Check device type and qualifier
1970 if (!(SID_QUAL_IS_VENDOR_UNIQUE(inq_data) ||
1971 SID_QUAL(inq_data) == SID_QUAL_LU_CONNECTED))
1974 type = SID_TYPE(inq_data);
1999 * Check vendor, product, and revision
2001 cam_strvis(vendor, inq_data->vendor, sizeof(inq_data->vendor),
2003 cam_strvis(product, inq_data->product, sizeof(inq_data->product),
2005 cam_strvis(revision, inq_data->revision, sizeof(inq_data->revision),
2007 if (strlen(vendor) == 0 ||
2008 strlen(product) == 0 ||
2009 strlen(revision) == 0)
2016 * @brief completion function before returning to CAM
2018 * I/O process has been completed and the result needs
2019 * to be passed to the CAM layer.
2020 * Free resources related to this request.
2022 * @param reqp pointer to a request structure
2025 storvsc_io_done(struct hv_storvsc_request *reqp)
2027 union ccb *ccb = reqp->ccb;
2028 struct ccb_scsiio *csio = &ccb->csio;
2029 struct storvsc_softc *sc = reqp->softc;
2030 struct vmscsi_req *vm_srb = &reqp->vstor_packet.u.vm_srb;
2031 bus_dma_segment_t *ori_sglist = NULL;
2032 int ori_sg_count = 0;
2034 /* destroy bounce buffer if it is used */
2035 if (reqp->bounce_sgl_count) {
2036 ori_sglist = (bus_dma_segment_t *)ccb->csio.data_ptr;
2037 ori_sg_count = ccb->csio.sglist_cnt;
2040 * If it is READ operation, we should copy back the data
2041 * to original SG list.
2043 if (READ_TYPE == reqp->vstor_packet.u.vm_srb.data_in) {
2044 storvsc_copy_from_bounce_buf_to_sgl(ori_sglist,
2047 reqp->not_aligned_seg_bits);
2050 storvsc_destroy_bounce_buffer(reqp->bounce_sgl);
2051 reqp->bounce_sgl_count = 0;
2054 if (reqp->retries > 0) {
2055 mtx_lock(&sc->hs_lock);
2056 #if HVS_TIMEOUT_TEST
2057 xpt_print(ccb->ccb_h.path,
2058 "%u: IO returned after timeout, "
2059 "waking up timer handler if any.\n", ticks);
2060 mtx_lock(&reqp->event.mtx);
2061 cv_signal(&reqp->event.cv);
2062 mtx_unlock(&reqp->event.mtx);
2065 xpt_print(ccb->ccb_h.path,
2066 "%u: IO returned after timeout, "
2067 "stopping timer if any.\n", ticks);
2068 mtx_unlock(&sc->hs_lock);
2073 * callout_drain() will wait for the timer handler to finish
2074 * if it is running. So we don't need any lock to synchronize
2075 * between this routine and the timer handler.
2076 * Note that we need to make sure reqp is not freed when timer
2077 * handler is using or will use it.
2079 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
2080 callout_drain(&reqp->callout);
2084 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2085 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2086 if (vm_srb->scsi_status == SCSI_STATUS_OK) {
2087 const struct scsi_generic *cmd;
2090 * Check whether the data for INQUIRY cmd is valid or
2091 * not. Windows 10 and Windows 2016 send all zero
2092 * inquiry data to VM even for unpopulated slots.
2094 cmd = (const struct scsi_generic *)
2095 ((ccb->ccb_h.flags & CAM_CDB_POINTER) ?
2096 csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes);
2097 if (cmd->opcode == INQUIRY &&
2099 * XXX: Temporary work around disk hot plugin on win2k12r2,
2100 * only filtering the invalid disk on win10 or 2016 server.
2101 * So, the hot plugin on win10 and 2016 server needs
2104 vmstor_proto_version == VMSTOR_PROTOCOL_VERSION_WIN10 &&
2106 (const struct scsi_inquiry_data *)csio->data_ptr) == 0) {
2107 ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
2109 mtx_lock(&sc->hs_lock);
2110 xpt_print(ccb->ccb_h.path,
2111 "storvsc uninstalled device\n");
2112 mtx_unlock(&sc->hs_lock);
2115 ccb->ccb_h.status |= CAM_REQ_CMP;
2118 mtx_lock(&sc->hs_lock);
2119 xpt_print(ccb->ccb_h.path,
2120 "storvsc scsi_status = %d\n",
2121 vm_srb->scsi_status);
2122 mtx_unlock(&sc->hs_lock);
2123 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2126 ccb->csio.scsi_status = (vm_srb->scsi_status & 0xFF);
2127 ccb->csio.resid = ccb->csio.dxfer_len - vm_srb->transfer_len;
2129 if (reqp->sense_info_len != 0) {
2130 csio->sense_resid = csio->sense_len - reqp->sense_info_len;
2131 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
2134 mtx_lock(&sc->hs_lock);
2135 if (reqp->softc->hs_frozen == 1) {
2136 xpt_print(ccb->ccb_h.path,
2137 "%u: storvsc unfreezing softc 0x%p.\n",
2138 ticks, reqp->softc);
2139 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2140 reqp->softc->hs_frozen = 0;
2142 storvsc_free_request(sc, reqp);
2144 mtx_unlock(&sc->hs_lock);
2148 * @brief Free a request structure
2150 * Free a request structure by returning it to the free list
2152 * @param sc pointer to a softc
2153 * @param reqp pointer to a request structure
2156 storvsc_free_request(struct storvsc_softc *sc, struct hv_storvsc_request *reqp)
2159 LIST_INSERT_HEAD(&sc->hs_free_list, reqp, link);
2163 * @brief Determine type of storage device from GUID
2165 * Using the type GUID, determine if this is a StorVSC (paravirtual
2166 * SCSI or BlkVSC (paravirtual IDE) device.
2168 * @param dev a device
2171 static enum hv_storage_type
2172 storvsc_get_storage_type(device_t dev)
2174 const char *p = vmbus_get_type(dev);
2176 if (!memcmp(p, &gBlkVscDeviceType, sizeof(hv_guid))) {
2177 return DRIVER_BLKVSC;
2178 } else if (!memcmp(p, &gStorVscDeviceType, sizeof(hv_guid))) {
2179 return DRIVER_STORVSC;
2181 return (DRIVER_UNKNOWN);