2 * Copyright (c) 2009-2012 Microsoft Corp.
3 * Copyright (c) 2010-2012 Citrix Inc.
4 * Copyright (c) 2012 NetApp Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * HyperV vmbus network VSC (virtual services client) module
37 #include <sys/param.h>
38 #include <sys/kernel.h>
39 #include <sys/socket.h>
42 #include <net/if_var.h>
43 #include <net/if_arp.h>
44 #include <machine/bus.h>
45 #include <machine/atomic.h>
47 #include <dev/hyperv/include/hyperv.h>
48 #include "hv_net_vsc.h"
50 #include "hv_rndis_filter.h"
52 MALLOC_DEFINE(M_NETVSC, "netvsc", "Hyper-V netvsc driver");
55 * Forward declarations
57 static void hv_nv_on_channel_callback(void *context);
58 static int hv_nv_init_send_buffer_with_net_vsp(struct hv_device *device);
59 static int hv_nv_init_rx_buffer_with_net_vsp(struct hv_device *device);
60 static int hv_nv_destroy_send_buffer(netvsc_dev *net_dev);
61 static int hv_nv_destroy_rx_buffer(netvsc_dev *net_dev);
62 static int hv_nv_connect_to_vsp(struct hv_device *device);
63 static void hv_nv_on_send_completion(netvsc_dev *net_dev,
64 struct hv_device *device, hv_vm_packet_descriptor *pkt);
65 static void hv_nv_on_receive(netvsc_dev *net_dev,
66 struct hv_device *device, hv_vm_packet_descriptor *pkt);
71 static inline netvsc_dev *
72 hv_nv_alloc_net_device(struct hv_device *device)
75 hn_softc_t *sc = device_get_softc(device->device);
77 net_dev = malloc(sizeof(netvsc_dev), M_NETVSC, M_WAITOK | M_ZERO);
79 net_dev->dev = device;
80 net_dev->destroy = FALSE;
81 sc->net_dev = net_dev;
89 static inline netvsc_dev *
90 hv_nv_get_outbound_net_device(struct hv_device *device)
92 hn_softc_t *sc = device_get_softc(device->device);
93 netvsc_dev *net_dev = sc->net_dev;;
95 if ((net_dev != NULL) && net_dev->destroy) {
105 static inline netvsc_dev *
106 hv_nv_get_inbound_net_device(struct hv_device *device)
108 hn_softc_t *sc = device_get_softc(device->device);
109 netvsc_dev *net_dev = sc->net_dev;;
111 if (net_dev == NULL) {
115 * When the device is being destroyed; we only
116 * permit incoming packets if and only if there
117 * are outstanding sends.
119 if (net_dev->destroy && net_dev->num_outstanding_sends == 0) {
127 hv_nv_get_next_send_section(netvsc_dev *net_dev)
129 unsigned long bitsmap_words = net_dev->bitsmap_words;
130 unsigned long *bitsmap = net_dev->send_section_bitsmap;
132 int ret = NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX;
135 for (i = 0; i < bitsmap_words; i++) {
136 idx = ffsl(~bitsmap[i]);
141 KASSERT(i * BITS_PER_LONG + idx < net_dev->send_section_count,
142 ("invalid i %d and idx %lu", i, idx));
144 if (atomic_testandset_long(&bitsmap[i], idx))
147 ret = i * BITS_PER_LONG + idx;
155 * Net VSC initialize receive buffer with net VSP
157 * Net VSP: Network virtual services client, also known as the
158 * Hyper-V extensible switch and the synthetic data path.
161 hv_nv_init_rx_buffer_with_net_vsp(struct hv_device *device)
167 net_dev = hv_nv_get_outbound_net_device(device);
172 net_dev->rx_buf = contigmalloc(net_dev->rx_buf_size, M_NETVSC,
173 M_ZERO, 0UL, BUS_SPACE_MAXADDR, PAGE_SIZE, 0);
176 * Establish the GPADL handle for this buffer on this channel.
177 * Note: This call uses the vmbus connection rather than the
178 * channel to establish the gpadl handle.
179 * GPADL: Guest physical address descriptor list.
181 ret = hv_vmbus_channel_establish_gpadl(
182 device->channel, net_dev->rx_buf,
183 net_dev->rx_buf_size, &net_dev->rx_buf_gpadl_handle);
188 /* sema_wait(&ext->channel_init_sema); KYS CHECK */
190 /* Notify the NetVsp of the gpadl handle */
191 init_pkt = &net_dev->channel_init_packet;
193 memset(init_pkt, 0, sizeof(nvsp_msg));
195 init_pkt->hdr.msg_type = nvsp_msg_1_type_send_rx_buf;
196 init_pkt->msgs.vers_1_msgs.send_rx_buf.gpadl_handle =
197 net_dev->rx_buf_gpadl_handle;
198 init_pkt->msgs.vers_1_msgs.send_rx_buf.id =
199 NETVSC_RECEIVE_BUFFER_ID;
201 /* Send the gpadl notification request */
203 ret = hv_vmbus_channel_send_packet(device->channel, init_pkt,
204 sizeof(nvsp_msg), (uint64_t)(uintptr_t)init_pkt,
205 HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
206 HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
211 sema_wait(&net_dev->channel_init_sema);
213 /* Check the response */
214 if (init_pkt->msgs.vers_1_msgs.send_rx_buf_complete.status
215 != nvsp_status_success) {
220 net_dev->rx_section_count =
221 init_pkt->msgs.vers_1_msgs.send_rx_buf_complete.num_sections;
223 net_dev->rx_sections = malloc(net_dev->rx_section_count *
224 sizeof(nvsp_1_rx_buf_section), M_NETVSC, M_WAITOK);
225 memcpy(net_dev->rx_sections,
226 init_pkt->msgs.vers_1_msgs.send_rx_buf_complete.sections,
227 net_dev->rx_section_count * sizeof(nvsp_1_rx_buf_section));
231 * For first release, there should only be 1 section that represents
232 * the entire receive buffer
234 if (net_dev->rx_section_count != 1
235 || net_dev->rx_sections->offset != 0) {
243 hv_nv_destroy_rx_buffer(net_dev);
250 * Net VSC initialize send buffer with net VSP
253 hv_nv_init_send_buffer_with_net_vsp(struct hv_device *device)
259 net_dev = hv_nv_get_outbound_net_device(device);
264 net_dev->send_buf = contigmalloc(net_dev->send_buf_size, M_NETVSC,
265 M_ZERO, 0UL, BUS_SPACE_MAXADDR, PAGE_SIZE, 0);
266 if (net_dev->send_buf == NULL) {
272 * Establish the gpadl handle for this buffer on this channel.
273 * Note: This call uses the vmbus connection rather than the
274 * channel to establish the gpadl handle.
276 ret = hv_vmbus_channel_establish_gpadl(device->channel,
277 net_dev->send_buf, net_dev->send_buf_size,
278 &net_dev->send_buf_gpadl_handle);
283 /* Notify the NetVsp of the gpadl handle */
285 init_pkt = &net_dev->channel_init_packet;
287 memset(init_pkt, 0, sizeof(nvsp_msg));
289 init_pkt->hdr.msg_type = nvsp_msg_1_type_send_send_buf;
290 init_pkt->msgs.vers_1_msgs.send_rx_buf.gpadl_handle =
291 net_dev->send_buf_gpadl_handle;
292 init_pkt->msgs.vers_1_msgs.send_rx_buf.id =
293 NETVSC_SEND_BUFFER_ID;
295 /* Send the gpadl notification request */
297 ret = hv_vmbus_channel_send_packet(device->channel, init_pkt,
298 sizeof(nvsp_msg), (uint64_t)init_pkt,
299 HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
300 HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
305 sema_wait(&net_dev->channel_init_sema);
307 /* Check the response */
308 if (init_pkt->msgs.vers_1_msgs.send_send_buf_complete.status
309 != nvsp_status_success) {
314 net_dev->send_section_size =
315 init_pkt->msgs.vers_1_msgs.send_send_buf_complete.section_size;
316 net_dev->send_section_count =
317 net_dev->send_buf_size / net_dev->send_section_size;
318 net_dev->bitsmap_words = howmany(net_dev->send_section_count,
320 net_dev->send_section_bitsmap =
321 malloc(net_dev->bitsmap_words * sizeof(long), M_NETVSC,
327 hv_nv_destroy_send_buffer(net_dev);
334 * Net VSC destroy receive buffer
337 hv_nv_destroy_rx_buffer(netvsc_dev *net_dev)
339 nvsp_msg *revoke_pkt;
343 * If we got a section count, it means we received a
344 * send_rx_buf_complete msg
345 * (ie sent nvsp_msg_1_type_send_rx_buf msg) therefore,
346 * we need to send a revoke msg here
348 if (net_dev->rx_section_count) {
349 /* Send the revoke receive buffer */
350 revoke_pkt = &net_dev->revoke_packet;
351 memset(revoke_pkt, 0, sizeof(nvsp_msg));
353 revoke_pkt->hdr.msg_type = nvsp_msg_1_type_revoke_rx_buf;
354 revoke_pkt->msgs.vers_1_msgs.revoke_rx_buf.id =
355 NETVSC_RECEIVE_BUFFER_ID;
357 ret = hv_vmbus_channel_send_packet(net_dev->dev->channel,
358 revoke_pkt, sizeof(nvsp_msg),
359 (uint64_t)(uintptr_t)revoke_pkt,
360 HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, 0);
363 * If we failed here, we might as well return and have a leak
364 * rather than continue and a bugchk
371 /* Tear down the gpadl on the vsp end */
372 if (net_dev->rx_buf_gpadl_handle) {
373 ret = hv_vmbus_channel_teardown_gpdal(net_dev->dev->channel,
374 net_dev->rx_buf_gpadl_handle);
376 * If we failed here, we might as well return and have a leak
377 * rather than continue and a bugchk
382 net_dev->rx_buf_gpadl_handle = 0;
385 if (net_dev->rx_buf) {
386 /* Free up the receive buffer */
387 contigfree(net_dev->rx_buf, net_dev->rx_buf_size, M_NETVSC);
388 net_dev->rx_buf = NULL;
391 if (net_dev->rx_sections) {
392 free(net_dev->rx_sections, M_NETVSC);
393 net_dev->rx_sections = NULL;
394 net_dev->rx_section_count = 0;
401 * Net VSC destroy send buffer
404 hv_nv_destroy_send_buffer(netvsc_dev *net_dev)
406 nvsp_msg *revoke_pkt;
410 * If we got a section count, it means we received a
411 * send_rx_buf_complete msg
412 * (ie sent nvsp_msg_1_type_send_rx_buf msg) therefore,
413 * we need to send a revoke msg here
415 if (net_dev->send_section_size) {
416 /* Send the revoke send buffer */
417 revoke_pkt = &net_dev->revoke_packet;
418 memset(revoke_pkt, 0, sizeof(nvsp_msg));
420 revoke_pkt->hdr.msg_type =
421 nvsp_msg_1_type_revoke_send_buf;
422 revoke_pkt->msgs.vers_1_msgs.revoke_send_buf.id =
423 NETVSC_SEND_BUFFER_ID;
425 ret = hv_vmbus_channel_send_packet(net_dev->dev->channel,
426 revoke_pkt, sizeof(nvsp_msg),
427 (uint64_t)(uintptr_t)revoke_pkt,
428 HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, 0);
430 * If we failed here, we might as well return and have a leak
431 * rather than continue and a bugchk
438 /* Tear down the gpadl on the vsp end */
439 if (net_dev->send_buf_gpadl_handle) {
440 ret = hv_vmbus_channel_teardown_gpdal(net_dev->dev->channel,
441 net_dev->send_buf_gpadl_handle);
444 * If we failed here, we might as well return and have a leak
445 * rather than continue and a bugchk
450 net_dev->send_buf_gpadl_handle = 0;
453 if (net_dev->send_buf) {
454 /* Free up the receive buffer */
455 contigfree(net_dev->send_buf, net_dev->send_buf_size, M_NETVSC);
456 net_dev->send_buf = NULL;
459 if (net_dev->send_section_bitsmap) {
460 free(net_dev->send_section_bitsmap, M_NETVSC);
468 * Attempt to negotiate the caller-specified NVSP version
470 * For NVSP v2, Server 2008 R2 does not set
471 * init_pkt->msgs.init_msgs.init_compl.negotiated_prot_vers
472 * to the negotiated version, so we cannot rely on that.
475 hv_nv_negotiate_nvsp_protocol(struct hv_device *device, netvsc_dev *net_dev,
481 init_pkt = &net_dev->channel_init_packet;
482 memset(init_pkt, 0, sizeof(nvsp_msg));
483 init_pkt->hdr.msg_type = nvsp_msg_type_init;
486 * Specify parameter as the only acceptable protocol version
488 init_pkt->msgs.init_msgs.init.p1.protocol_version = nvsp_ver;
489 init_pkt->msgs.init_msgs.init.protocol_version_2 = nvsp_ver;
491 /* Send the init request */
492 ret = hv_vmbus_channel_send_packet(device->channel, init_pkt,
493 sizeof(nvsp_msg), (uint64_t)(uintptr_t)init_pkt,
494 HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
495 HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
499 sema_wait(&net_dev->channel_init_sema);
501 if (init_pkt->msgs.init_msgs.init_compl.status != nvsp_status_success)
508 * Send NDIS version 2 config packet containing MTU.
510 * Not valid for NDIS version 1.
513 hv_nv_send_ndis_config(struct hv_device *device, uint32_t mtu)
519 net_dev = hv_nv_get_outbound_net_device(device);
524 * Set up configuration packet, write MTU
525 * Indicate we are capable of handling VLAN tags
527 init_pkt = &net_dev->channel_init_packet;
528 memset(init_pkt, 0, sizeof(nvsp_msg));
529 init_pkt->hdr.msg_type = nvsp_msg_2_type_send_ndis_config;
530 init_pkt->msgs.vers_2_msgs.send_ndis_config.mtu = mtu;
532 msgs.vers_2_msgs.send_ndis_config.capabilities.u1.u2.ieee8021q
535 /* Send the configuration packet */
536 ret = hv_vmbus_channel_send_packet(device->channel, init_pkt,
537 sizeof(nvsp_msg), (uint64_t)(uintptr_t)init_pkt,
538 HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, 0);
546 * Net VSC connect to VSP
549 hv_nv_connect_to_vsp(struct hv_device *device)
553 uint32_t ndis_version;
554 uint32_t protocol_list[] = { NVSP_PROTOCOL_VERSION_1,
555 NVSP_PROTOCOL_VERSION_2,
556 NVSP_PROTOCOL_VERSION_4,
557 NVSP_PROTOCOL_VERSION_5 };
559 int protocol_number = nitems(protocol_list);
561 device_t dev = device->device;
562 hn_softc_t *sc = device_get_softc(dev);
563 struct ifnet *ifp = sc->hn_ifp;
565 net_dev = hv_nv_get_outbound_net_device(device);
571 * Negotiate the NVSP version. Try the latest NVSP first.
573 for (i = protocol_number - 1; i >= 0; i--) {
574 if (hv_nv_negotiate_nvsp_protocol(device, net_dev,
575 protocol_list[i]) == 0) {
576 net_dev->nvsp_version = protocol_list[i];
578 device_printf(dev, "Netvsc: got version 0x%x\n",
579 net_dev->nvsp_version);
586 device_printf(dev, "failed to negotiate a valid "
592 * Set the MTU if supported by this NVSP protocol version
593 * This needs to be right after the NVSP init message per Haiyang
595 if (net_dev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
596 ret = hv_nv_send_ndis_config(device, ifp->if_mtu);
599 * Send the NDIS version
601 init_pkt = &net_dev->channel_init_packet;
603 memset(init_pkt, 0, sizeof(nvsp_msg));
605 if (net_dev->nvsp_version <= NVSP_PROTOCOL_VERSION_4) {
606 ndis_version = NDIS_VERSION_6_1;
608 ndis_version = NDIS_VERSION_6_30;
611 init_pkt->hdr.msg_type = nvsp_msg_1_type_send_ndis_vers;
612 init_pkt->msgs.vers_1_msgs.send_ndis_vers.ndis_major_vers =
613 (ndis_version & 0xFFFF0000) >> 16;
614 init_pkt->msgs.vers_1_msgs.send_ndis_vers.ndis_minor_vers =
615 ndis_version & 0xFFFF;
617 /* Send the init request */
619 ret = hv_vmbus_channel_send_packet(device->channel, init_pkt,
620 sizeof(nvsp_msg), (uint64_t)(uintptr_t)init_pkt,
621 HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, 0);
626 * TODO: BUGBUG - We have to wait for the above msg since the netvsp
627 * uses KMCL which acknowledges packet (completion packet)
628 * since our Vmbus always set the
629 * HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED flag
631 /* sema_wait(&NetVscChannel->channel_init_sema); */
633 /* Post the big receive buffer to NetVSP */
634 if (net_dev->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
635 net_dev->rx_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
637 net_dev->rx_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
638 net_dev->send_buf_size = NETVSC_SEND_BUFFER_SIZE;
640 ret = hv_nv_init_rx_buffer_with_net_vsp(device);
642 ret = hv_nv_init_send_buffer_with_net_vsp(device);
649 * Net VSC disconnect from VSP
652 hv_nv_disconnect_from_vsp(netvsc_dev *net_dev)
654 hv_nv_destroy_rx_buffer(net_dev);
655 hv_nv_destroy_send_buffer(net_dev);
659 * Net VSC on device add
661 * Callback when the device belonging to this driver is added
664 hv_nv_on_device_add(struct hv_device *device, void *additional_info)
669 net_dev = hv_nv_alloc_net_device(device);
673 /* Initialize the NetVSC channel extension */
675 sema_init(&net_dev->channel_init_sema, 0, "netdev_sema");
680 ret = hv_vmbus_channel_open(device->channel,
681 NETVSC_DEVICE_RING_BUFFER_SIZE, NETVSC_DEVICE_RING_BUFFER_SIZE,
682 NULL, 0, hv_nv_on_channel_callback, device);
687 * Connect with the NetVsp
689 ret = hv_nv_connect_to_vsp(device);
696 /* Now, we can close the channel safely */
698 hv_vmbus_channel_close(device->channel);
702 * Free the packet buffers on the netvsc device packet queue.
703 * Release other resources.
706 sema_destroy(&net_dev->channel_init_sema);
707 free(net_dev, M_NETVSC);
714 * Net VSC on device remove
717 hv_nv_on_device_remove(struct hv_device *device, boolean_t destroy_channel)
719 hn_softc_t *sc = device_get_softc(device->device);
720 netvsc_dev *net_dev = sc->net_dev;;
722 /* Stop outbound traffic ie sends and receives completions */
723 mtx_lock(&device->channel->inbound_lock);
724 net_dev->destroy = TRUE;
725 mtx_unlock(&device->channel->inbound_lock);
727 /* Wait for all send completions */
728 while (net_dev->num_outstanding_sends) {
732 hv_nv_disconnect_from_vsp(net_dev);
734 /* At this point, no one should be accessing net_dev except in here */
736 /* Now, we can close the channel safely */
738 if (!destroy_channel) {
739 device->channel->state =
740 HV_CHANNEL_CLOSING_NONDESTRUCTIVE_STATE;
743 hv_vmbus_channel_close(device->channel);
745 sema_destroy(&net_dev->channel_init_sema);
746 free(net_dev, M_NETVSC);
752 * Net VSC on send completion
755 hv_nv_on_send_completion(netvsc_dev *net_dev,
756 struct hv_device *device, hv_vm_packet_descriptor *pkt)
758 nvsp_msg *nvsp_msg_pkt;
759 netvsc_packet *net_vsc_pkt;
762 (nvsp_msg *)((unsigned long)pkt + (pkt->data_offset8 << 3));
764 if (nvsp_msg_pkt->hdr.msg_type == nvsp_msg_type_init_complete
765 || nvsp_msg_pkt->hdr.msg_type
766 == nvsp_msg_1_type_send_rx_buf_complete
767 || nvsp_msg_pkt->hdr.msg_type
768 == nvsp_msg_1_type_send_send_buf_complete) {
769 /* Copy the response back */
770 memcpy(&net_dev->channel_init_packet, nvsp_msg_pkt,
772 sema_post(&net_dev->channel_init_sema);
773 } else if (nvsp_msg_pkt->hdr.msg_type ==
774 nvsp_msg_1_type_send_rndis_pkt_complete) {
775 /* Get the send context */
777 (netvsc_packet *)(unsigned long)pkt->transaction_id;
778 if (NULL != net_vsc_pkt) {
779 if (net_vsc_pkt->send_buf_section_idx !=
780 NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX) {
784 idx = net_vsc_pkt->send_buf_section_idx /
786 KASSERT(idx < net_dev->bitsmap_words,
787 ("invalid section index %u",
788 net_vsc_pkt->send_buf_section_idx));
790 (net_vsc_pkt->send_buf_section_idx %
793 KASSERT(net_dev->send_section_bitsmap[idx] &
795 ("index bitmap 0x%lx, section index %u, "
796 "bitmap idx %d, bitmask 0x%lx",
797 net_dev->send_section_bitsmap[idx],
798 net_vsc_pkt->send_buf_section_idx,
801 &net_dev->send_section_bitsmap[idx], mask);
804 /* Notify the layer above us */
805 net_vsc_pkt->compl.send.on_send_completion(
806 net_vsc_pkt->compl.send.send_completion_context);
810 atomic_subtract_int(&net_dev->num_outstanding_sends, 1);
816 * Sends a packet on the specified Hyper-V device.
817 * Returns 0 on success, non-zero on failure.
820 hv_nv_on_send(struct hv_device *device, netvsc_packet *pkt)
826 net_dev = hv_nv_get_outbound_net_device(device);
830 send_msg.hdr.msg_type = nvsp_msg_1_type_send_rndis_pkt;
831 if (pkt->is_data_pkt) {
833 send_msg.msgs.vers_1_msgs.send_rndis_pkt.chan_type = 0;
835 /* 1 is RMC_CONTROL */
836 send_msg.msgs.vers_1_msgs.send_rndis_pkt.chan_type = 1;
839 send_msg.msgs.vers_1_msgs.send_rndis_pkt.send_buf_section_idx =
840 pkt->send_buf_section_idx;
841 send_msg.msgs.vers_1_msgs.send_rndis_pkt.send_buf_section_size =
842 pkt->send_buf_section_size;
844 if (pkt->page_buf_count) {
845 ret = hv_vmbus_channel_send_packet_pagebuffer(device->channel,
846 pkt->page_buffers, pkt->page_buf_count,
847 &send_msg, sizeof(nvsp_msg), (uint64_t)(uintptr_t)pkt);
849 ret = hv_vmbus_channel_send_packet(device->channel,
850 &send_msg, sizeof(nvsp_msg), (uint64_t)(uintptr_t)pkt,
851 HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
852 HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
855 /* Record outstanding send only if send_packet() succeeded */
857 atomic_add_int(&net_dev->num_outstanding_sends, 1);
865 * In the FreeBSD Hyper-V virtual world, this function deals exclusively
866 * with virtual addresses.
869 hv_nv_on_receive(netvsc_dev *net_dev, struct hv_device *device,
870 hv_vm_packet_descriptor *pkt)
872 hv_vm_transfer_page_packet_header *vm_xfer_page_pkt;
873 nvsp_msg *nvsp_msg_pkt;
874 netvsc_packet vsc_pkt;
875 netvsc_packet *net_vsc_pkt = &vsc_pkt;
876 device_t dev = device->device;
879 int status = nvsp_status_success;
882 * All inbound packets other than send completion should be
885 if (pkt->type != HV_VMBUS_PACKET_TYPE_DATA_USING_TRANSFER_PAGES) {
886 device_printf(dev, "packet type %d is invalid!\n", pkt->type);
890 nvsp_msg_pkt = (nvsp_msg *)((unsigned long)pkt
891 + (pkt->data_offset8 << 3));
893 /* Make sure this is a valid nvsp packet */
894 if (nvsp_msg_pkt->hdr.msg_type != nvsp_msg_1_type_send_rndis_pkt) {
895 device_printf(dev, "packet hdr type %d is invalid!\n",
900 vm_xfer_page_pkt = (hv_vm_transfer_page_packet_header *)pkt;
902 if (vm_xfer_page_pkt->transfer_page_set_id !=
903 NETVSC_RECEIVE_BUFFER_ID) {
904 device_printf(dev, "transfer_page_set_id %d is invalid!\n",
905 vm_xfer_page_pkt->transfer_page_set_id);
909 count = vm_xfer_page_pkt->range_count;
910 net_vsc_pkt->device = device;
912 /* Each range represents 1 RNDIS pkt that contains 1 Ethernet frame */
913 for (i = 0; i < count; i++) {
914 net_vsc_pkt->status = nvsp_status_success;
915 net_vsc_pkt->data = (void *)((unsigned long)net_dev->rx_buf +
916 vm_xfer_page_pkt->ranges[i].byte_offset);
917 net_vsc_pkt->tot_data_buf_len =
918 vm_xfer_page_pkt->ranges[i].byte_count;
920 hv_rf_on_receive(net_dev, device, net_vsc_pkt);
921 if (net_vsc_pkt->status != nvsp_status_success) {
922 status = nvsp_status_failure;
927 * Moved completion call back here so that all received
928 * messages (not just data messages) will trigger a response
929 * message back to the host.
931 hv_nv_on_receive_completion(device, vm_xfer_page_pkt->d.transaction_id,
933 hv_rf_receive_rollup(net_dev);
937 * Net VSC on receive completion
939 * Send a receive completion packet to RNDIS device (ie NetVsp)
942 hv_nv_on_receive_completion(struct hv_device *device, uint64_t tid,
945 nvsp_msg rx_comp_msg;
949 rx_comp_msg.hdr.msg_type = nvsp_msg_1_type_send_rndis_pkt_complete;
951 /* Pass in the status */
952 rx_comp_msg.msgs.vers_1_msgs.send_rndis_pkt_complete.status =
956 /* Send the completion */
957 ret = hv_vmbus_channel_send_packet(device->channel, &rx_comp_msg,
958 sizeof(nvsp_msg), tid, HV_VMBUS_PACKET_TYPE_COMPLETION, 0);
962 } else if (ret == EAGAIN) {
963 /* no more room... wait a bit and attempt to retry 3 times */
968 goto retry_send_cmplt;
974 * Net VSC on channel callback
977 hv_nv_on_channel_callback(void *context)
979 struct hv_device *device = (struct hv_device *)context;
981 device_t dev = device->device;
984 hv_vm_packet_descriptor *desc;
986 int bufferlen = NETVSC_PACKET_SIZE;
989 net_dev = hv_nv_get_inbound_net_device(device);
993 buffer = net_dev->callback_buf;
996 ret = hv_vmbus_channel_recv_packet_raw(device->channel,
997 buffer, bufferlen, &bytes_rxed, &request_id);
999 if (bytes_rxed > 0) {
1000 desc = (hv_vm_packet_descriptor *)buffer;
1001 switch (desc->type) {
1002 case HV_VMBUS_PACKET_TYPE_COMPLETION:
1003 hv_nv_on_send_completion(net_dev, device, desc);
1005 case HV_VMBUS_PACKET_TYPE_DATA_USING_TRANSFER_PAGES:
1006 hv_nv_on_receive(net_dev, device, desc);
1010 "hv_cb recv unknow type %d "
1011 " packet\n", desc->type);
1017 } else if (ret == ENOBUFS) {
1018 /* Handle large packet */
1019 if (bufferlen > NETVSC_PACKET_SIZE) {
1020 free(buffer, M_NETVSC);
1024 /* alloc new buffer */
1025 buffer = malloc(bytes_rxed, M_NETVSC, M_NOWAIT);
1026 if (buffer == NULL) {
1028 "hv_cb malloc buffer failed, len=%u\n",
1033 bufferlen = bytes_rxed;
1037 if (bufferlen > NETVSC_PACKET_SIZE)
1038 free(buffer, M_NETVSC);
1040 hv_rf_channel_rollup(net_dev);