2 * Copyright (c) 2009-2012 Microsoft Corp.
3 * Copyright (c) 2010-2012 Citrix Inc.
4 * Copyright (c) 2012 NetApp Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 * HyperV vmbus network VSC (virtual services client) module
37 #include <sys/param.h>
38 #include <sys/kernel.h>
39 #include <sys/socket.h>
42 #include <net/if_arp.h>
43 #include <machine/bus.h>
44 #include <machine/atomic.h>
46 #include <dev/hyperv/include/hyperv.h>
47 #include "hv_net_vsc.h"
49 #include "hv_rndis_filter.h"
51 /* priv1 and priv2 are consumed by the main driver */
52 #define hv_chan_rdbuf hv_chan_priv3
54 MALLOC_DEFINE(M_NETVSC, "netvsc", "Hyper-V netvsc driver");
57 * Forward declarations
59 static void hv_nv_on_channel_callback(void *xchan);
60 static int hv_nv_init_send_buffer_with_net_vsp(struct hv_device *device);
61 static int hv_nv_init_rx_buffer_with_net_vsp(struct hv_device *device);
62 static int hv_nv_destroy_send_buffer(netvsc_dev *net_dev);
63 static int hv_nv_destroy_rx_buffer(netvsc_dev *net_dev);
64 static int hv_nv_connect_to_vsp(struct hv_device *device);
65 static void hv_nv_on_send_completion(netvsc_dev *net_dev,
66 struct hv_device *device, struct hv_vmbus_channel *, hv_vm_packet_descriptor *pkt);
67 static void hv_nv_on_receive_completion(struct hv_vmbus_channel *chan,
68 uint64_t tid, uint32_t status);
69 static void hv_nv_on_receive(netvsc_dev *net_dev,
70 struct hv_device *device, struct hv_vmbus_channel *chan,
71 hv_vm_packet_descriptor *pkt);
76 static inline netvsc_dev *
77 hv_nv_alloc_net_device(struct hv_device *device)
80 hn_softc_t *sc = device_get_softc(device->device);
82 net_dev = malloc(sizeof(netvsc_dev), M_NETVSC, M_WAITOK | M_ZERO);
84 net_dev->dev = device;
85 net_dev->destroy = FALSE;
86 sc->net_dev = net_dev;
94 static inline netvsc_dev *
95 hv_nv_get_outbound_net_device(struct hv_device *device)
97 hn_softc_t *sc = device_get_softc(device->device);
98 netvsc_dev *net_dev = sc->net_dev;;
100 if ((net_dev != NULL) && net_dev->destroy) {
110 static inline netvsc_dev *
111 hv_nv_get_inbound_net_device(struct hv_device *device)
113 hn_softc_t *sc = device_get_softc(device->device);
114 netvsc_dev *net_dev = sc->net_dev;;
116 if (net_dev == NULL) {
120 * When the device is being destroyed; we only
121 * permit incoming packets if and only if there
122 * are outstanding sends.
124 if (net_dev->destroy) {
132 hv_nv_get_next_send_section(netvsc_dev *net_dev)
134 unsigned long bitsmap_words = net_dev->bitsmap_words;
135 unsigned long *bitsmap = net_dev->send_section_bitsmap;
137 int ret = NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX;
140 for (i = 0; i < bitsmap_words; i++) {
141 idx = ffsl(~bitsmap[i]);
146 KASSERT(i * BITS_PER_LONG + idx < net_dev->send_section_count,
147 ("invalid i %d and idx %lu", i, idx));
149 if (atomic_testandset_long(&bitsmap[i], idx))
152 ret = i * BITS_PER_LONG + idx;
160 * Net VSC initialize receive buffer with net VSP
162 * Net VSP: Network virtual services client, also known as the
163 * Hyper-V extensible switch and the synthetic data path.
166 hv_nv_init_rx_buffer_with_net_vsp(struct hv_device *device)
172 net_dev = hv_nv_get_outbound_net_device(device);
177 net_dev->rx_buf = contigmalloc(net_dev->rx_buf_size, M_NETVSC,
178 M_ZERO, 0UL, BUS_SPACE_MAXADDR, PAGE_SIZE, 0);
181 * Establish the GPADL handle for this buffer on this channel.
182 * Note: This call uses the vmbus connection rather than the
183 * channel to establish the gpadl handle.
184 * GPADL: Guest physical address descriptor list.
186 ret = hv_vmbus_channel_establish_gpadl(
187 device->channel, net_dev->rx_buf,
188 net_dev->rx_buf_size, &net_dev->rx_buf_gpadl_handle);
193 /* sema_wait(&ext->channel_init_sema); KYS CHECK */
195 /* Notify the NetVsp of the gpadl handle */
196 init_pkt = &net_dev->channel_init_packet;
198 memset(init_pkt, 0, sizeof(nvsp_msg));
200 init_pkt->hdr.msg_type = nvsp_msg_1_type_send_rx_buf;
201 init_pkt->msgs.vers_1_msgs.send_rx_buf.gpadl_handle =
202 net_dev->rx_buf_gpadl_handle;
203 init_pkt->msgs.vers_1_msgs.send_rx_buf.id =
204 NETVSC_RECEIVE_BUFFER_ID;
206 /* Send the gpadl notification request */
208 ret = hv_vmbus_channel_send_packet(device->channel, init_pkt,
209 sizeof(nvsp_msg), (uint64_t)(uintptr_t)init_pkt,
210 HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
211 HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
216 sema_wait(&net_dev->channel_init_sema);
218 /* Check the response */
219 if (init_pkt->msgs.vers_1_msgs.send_rx_buf_complete.status
220 != nvsp_status_success) {
225 net_dev->rx_section_count =
226 init_pkt->msgs.vers_1_msgs.send_rx_buf_complete.num_sections;
228 net_dev->rx_sections = malloc(net_dev->rx_section_count *
229 sizeof(nvsp_1_rx_buf_section), M_NETVSC, M_WAITOK);
230 memcpy(net_dev->rx_sections,
231 init_pkt->msgs.vers_1_msgs.send_rx_buf_complete.sections,
232 net_dev->rx_section_count * sizeof(nvsp_1_rx_buf_section));
236 * For first release, there should only be 1 section that represents
237 * the entire receive buffer
239 if (net_dev->rx_section_count != 1
240 || net_dev->rx_sections->offset != 0) {
248 hv_nv_destroy_rx_buffer(net_dev);
255 * Net VSC initialize send buffer with net VSP
258 hv_nv_init_send_buffer_with_net_vsp(struct hv_device *device)
264 net_dev = hv_nv_get_outbound_net_device(device);
269 net_dev->send_buf = contigmalloc(net_dev->send_buf_size, M_NETVSC,
270 M_ZERO, 0UL, BUS_SPACE_MAXADDR, PAGE_SIZE, 0);
271 if (net_dev->send_buf == NULL) {
277 * Establish the gpadl handle for this buffer on this channel.
278 * Note: This call uses the vmbus connection rather than the
279 * channel to establish the gpadl handle.
281 ret = hv_vmbus_channel_establish_gpadl(device->channel,
282 net_dev->send_buf, net_dev->send_buf_size,
283 &net_dev->send_buf_gpadl_handle);
288 /* Notify the NetVsp of the gpadl handle */
290 init_pkt = &net_dev->channel_init_packet;
292 memset(init_pkt, 0, sizeof(nvsp_msg));
294 init_pkt->hdr.msg_type = nvsp_msg_1_type_send_send_buf;
295 init_pkt->msgs.vers_1_msgs.send_rx_buf.gpadl_handle =
296 net_dev->send_buf_gpadl_handle;
297 init_pkt->msgs.vers_1_msgs.send_rx_buf.id =
298 NETVSC_SEND_BUFFER_ID;
300 /* Send the gpadl notification request */
302 ret = hv_vmbus_channel_send_packet(device->channel, init_pkt,
303 sizeof(nvsp_msg), (uint64_t)init_pkt,
304 HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
305 HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
310 sema_wait(&net_dev->channel_init_sema);
312 /* Check the response */
313 if (init_pkt->msgs.vers_1_msgs.send_send_buf_complete.status
314 != nvsp_status_success) {
319 net_dev->send_section_size =
320 init_pkt->msgs.vers_1_msgs.send_send_buf_complete.section_size;
321 net_dev->send_section_count =
322 net_dev->send_buf_size / net_dev->send_section_size;
323 net_dev->bitsmap_words = howmany(net_dev->send_section_count,
325 net_dev->send_section_bitsmap =
326 malloc(net_dev->bitsmap_words * sizeof(long), M_NETVSC,
332 hv_nv_destroy_send_buffer(net_dev);
339 * Net VSC destroy receive buffer
342 hv_nv_destroy_rx_buffer(netvsc_dev *net_dev)
344 nvsp_msg *revoke_pkt;
348 * If we got a section count, it means we received a
349 * send_rx_buf_complete msg
350 * (ie sent nvsp_msg_1_type_send_rx_buf msg) therefore,
351 * we need to send a revoke msg here
353 if (net_dev->rx_section_count) {
354 /* Send the revoke receive buffer */
355 revoke_pkt = &net_dev->revoke_packet;
356 memset(revoke_pkt, 0, sizeof(nvsp_msg));
358 revoke_pkt->hdr.msg_type = nvsp_msg_1_type_revoke_rx_buf;
359 revoke_pkt->msgs.vers_1_msgs.revoke_rx_buf.id =
360 NETVSC_RECEIVE_BUFFER_ID;
362 ret = hv_vmbus_channel_send_packet(net_dev->dev->channel,
363 revoke_pkt, sizeof(nvsp_msg),
364 (uint64_t)(uintptr_t)revoke_pkt,
365 HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, 0);
368 * If we failed here, we might as well return and have a leak
369 * rather than continue and a bugchk
376 /* Tear down the gpadl on the vsp end */
377 if (net_dev->rx_buf_gpadl_handle) {
378 ret = hv_vmbus_channel_teardown_gpdal(net_dev->dev->channel,
379 net_dev->rx_buf_gpadl_handle);
381 * If we failed here, we might as well return and have a leak
382 * rather than continue and a bugchk
387 net_dev->rx_buf_gpadl_handle = 0;
390 if (net_dev->rx_buf) {
391 /* Free up the receive buffer */
392 contigfree(net_dev->rx_buf, net_dev->rx_buf_size, M_NETVSC);
393 net_dev->rx_buf = NULL;
396 if (net_dev->rx_sections) {
397 free(net_dev->rx_sections, M_NETVSC);
398 net_dev->rx_sections = NULL;
399 net_dev->rx_section_count = 0;
406 * Net VSC destroy send buffer
409 hv_nv_destroy_send_buffer(netvsc_dev *net_dev)
411 nvsp_msg *revoke_pkt;
415 * If we got a section count, it means we received a
416 * send_rx_buf_complete msg
417 * (ie sent nvsp_msg_1_type_send_rx_buf msg) therefore,
418 * we need to send a revoke msg here
420 if (net_dev->send_section_size) {
421 /* Send the revoke send buffer */
422 revoke_pkt = &net_dev->revoke_packet;
423 memset(revoke_pkt, 0, sizeof(nvsp_msg));
425 revoke_pkt->hdr.msg_type =
426 nvsp_msg_1_type_revoke_send_buf;
427 revoke_pkt->msgs.vers_1_msgs.revoke_send_buf.id =
428 NETVSC_SEND_BUFFER_ID;
430 ret = hv_vmbus_channel_send_packet(net_dev->dev->channel,
431 revoke_pkt, sizeof(nvsp_msg),
432 (uint64_t)(uintptr_t)revoke_pkt,
433 HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, 0);
435 * If we failed here, we might as well return and have a leak
436 * rather than continue and a bugchk
443 /* Tear down the gpadl on the vsp end */
444 if (net_dev->send_buf_gpadl_handle) {
445 ret = hv_vmbus_channel_teardown_gpdal(net_dev->dev->channel,
446 net_dev->send_buf_gpadl_handle);
449 * If we failed here, we might as well return and have a leak
450 * rather than continue and a bugchk
455 net_dev->send_buf_gpadl_handle = 0;
458 if (net_dev->send_buf) {
459 /* Free up the receive buffer */
460 contigfree(net_dev->send_buf, net_dev->send_buf_size, M_NETVSC);
461 net_dev->send_buf = NULL;
464 if (net_dev->send_section_bitsmap) {
465 free(net_dev->send_section_bitsmap, M_NETVSC);
473 * Attempt to negotiate the caller-specified NVSP version
475 * For NVSP v2, Server 2008 R2 does not set
476 * init_pkt->msgs.init_msgs.init_compl.negotiated_prot_vers
477 * to the negotiated version, so we cannot rely on that.
480 hv_nv_negotiate_nvsp_protocol(struct hv_device *device, netvsc_dev *net_dev,
486 init_pkt = &net_dev->channel_init_packet;
487 memset(init_pkt, 0, sizeof(nvsp_msg));
488 init_pkt->hdr.msg_type = nvsp_msg_type_init;
491 * Specify parameter as the only acceptable protocol version
493 init_pkt->msgs.init_msgs.init.p1.protocol_version = nvsp_ver;
494 init_pkt->msgs.init_msgs.init.protocol_version_2 = nvsp_ver;
496 /* Send the init request */
497 ret = hv_vmbus_channel_send_packet(device->channel, init_pkt,
498 sizeof(nvsp_msg), (uint64_t)(uintptr_t)init_pkt,
499 HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
500 HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
504 sema_wait(&net_dev->channel_init_sema);
506 if (init_pkt->msgs.init_msgs.init_compl.status != nvsp_status_success)
513 * Send NDIS version 2 config packet containing MTU.
515 * Not valid for NDIS version 1.
518 hv_nv_send_ndis_config(struct hv_device *device, uint32_t mtu)
524 net_dev = hv_nv_get_outbound_net_device(device);
529 * Set up configuration packet, write MTU
530 * Indicate we are capable of handling VLAN tags
532 init_pkt = &net_dev->channel_init_packet;
533 memset(init_pkt, 0, sizeof(nvsp_msg));
534 init_pkt->hdr.msg_type = nvsp_msg_2_type_send_ndis_config;
535 init_pkt->msgs.vers_2_msgs.send_ndis_config.mtu = mtu;
537 msgs.vers_2_msgs.send_ndis_config.capabilities.u1.u2.ieee8021q
540 /* Send the configuration packet */
541 ret = hv_vmbus_channel_send_packet(device->channel, init_pkt,
542 sizeof(nvsp_msg), (uint64_t)(uintptr_t)init_pkt,
543 HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, 0);
551 * Net VSC connect to VSP
554 hv_nv_connect_to_vsp(struct hv_device *device)
558 uint32_t ndis_version;
559 uint32_t protocol_list[] = { NVSP_PROTOCOL_VERSION_1,
560 NVSP_PROTOCOL_VERSION_2,
561 NVSP_PROTOCOL_VERSION_4,
562 NVSP_PROTOCOL_VERSION_5 };
564 int protocol_number = nitems(protocol_list);
566 device_t dev = device->device;
567 hn_softc_t *sc = device_get_softc(dev);
568 struct ifnet *ifp = sc->arpcom.ac_ifp;
570 net_dev = hv_nv_get_outbound_net_device(device);
576 * Negotiate the NVSP version. Try the latest NVSP first.
578 for (i = protocol_number - 1; i >= 0; i--) {
579 if (hv_nv_negotiate_nvsp_protocol(device, net_dev,
580 protocol_list[i]) == 0) {
581 net_dev->nvsp_version = protocol_list[i];
583 device_printf(dev, "Netvsc: got version 0x%x\n",
584 net_dev->nvsp_version);
591 device_printf(dev, "failed to negotiate a valid "
597 * Set the MTU if supported by this NVSP protocol version
598 * This needs to be right after the NVSP init message per Haiyang
600 if (net_dev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
601 ret = hv_nv_send_ndis_config(device, ifp->if_mtu);
604 * Send the NDIS version
606 init_pkt = &net_dev->channel_init_packet;
608 memset(init_pkt, 0, sizeof(nvsp_msg));
610 if (net_dev->nvsp_version <= NVSP_PROTOCOL_VERSION_4) {
611 ndis_version = NDIS_VERSION_6_1;
613 ndis_version = NDIS_VERSION_6_30;
616 init_pkt->hdr.msg_type = nvsp_msg_1_type_send_ndis_vers;
617 init_pkt->msgs.vers_1_msgs.send_ndis_vers.ndis_major_vers =
618 (ndis_version & 0xFFFF0000) >> 16;
619 init_pkt->msgs.vers_1_msgs.send_ndis_vers.ndis_minor_vers =
620 ndis_version & 0xFFFF;
622 /* Send the init request */
624 ret = hv_vmbus_channel_send_packet(device->channel, init_pkt,
625 sizeof(nvsp_msg), (uint64_t)(uintptr_t)init_pkt,
626 HV_VMBUS_PACKET_TYPE_DATA_IN_BAND, 0);
631 * TODO: BUGBUG - We have to wait for the above msg since the netvsp
632 * uses KMCL which acknowledges packet (completion packet)
633 * since our Vmbus always set the
634 * HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED flag
636 /* sema_wait(&NetVscChannel->channel_init_sema); */
638 /* Post the big receive buffer to NetVSP */
639 if (net_dev->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
640 net_dev->rx_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
642 net_dev->rx_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
643 net_dev->send_buf_size = NETVSC_SEND_BUFFER_SIZE;
645 ret = hv_nv_init_rx_buffer_with_net_vsp(device);
647 ret = hv_nv_init_send_buffer_with_net_vsp(device);
654 * Net VSC disconnect from VSP
657 hv_nv_disconnect_from_vsp(netvsc_dev *net_dev)
659 hv_nv_destroy_rx_buffer(net_dev);
660 hv_nv_destroy_send_buffer(net_dev);
664 * Callback handler for subchannel offer
665 * @@param context new subchannel
668 hv_nv_subchan_callback(void *xchan)
670 struct hv_vmbus_channel *chan = xchan;
672 uint16_t chn_index = chan->offer_msg.offer.sub_channel_index;
673 struct hv_device *device = chan->device;
674 hn_softc_t *sc = device_get_softc(device->device);
677 net_dev = sc->net_dev;
679 if (chn_index >= net_dev->num_channel) {
680 /* Would this ever happen? */
683 netvsc_subchan_callback(sc, chan);
685 chan->hv_chan_rdbuf = malloc(NETVSC_PACKET_SIZE, M_NETVSC, M_WAITOK);
686 ret = hv_vmbus_channel_open(chan, NETVSC_DEVICE_RING_BUFFER_SIZE,
687 NETVSC_DEVICE_RING_BUFFER_SIZE, NULL, 0,
688 hv_nv_on_channel_callback, chan);
692 * Net VSC on device add
694 * Callback when the device belonging to this driver is added
697 hv_nv_on_device_add(struct hv_device *device, void *additional_info)
699 struct hv_vmbus_channel *chan = device->channel;
703 net_dev = hv_nv_alloc_net_device(device);
707 /* Initialize the NetVSC channel extension */
709 sema_init(&net_dev->channel_init_sema, 0, "netdev_sema");
711 chan->hv_chan_rdbuf = malloc(NETVSC_PACKET_SIZE, M_NETVSC, M_WAITOK);
716 ret = hv_vmbus_channel_open(chan,
717 NETVSC_DEVICE_RING_BUFFER_SIZE, NETVSC_DEVICE_RING_BUFFER_SIZE,
718 NULL, 0, hv_nv_on_channel_callback, chan);
720 free(chan->hv_chan_rdbuf, M_NETVSC);
723 chan->sc_creation_callback = hv_nv_subchan_callback;
726 * Connect with the NetVsp
728 ret = hv_nv_connect_to_vsp(device);
735 /* Now, we can close the channel safely */
736 free(chan->hv_chan_rdbuf, M_NETVSC);
737 hv_vmbus_channel_close(chan);
741 * Free the packet buffers on the netvsc device packet queue.
742 * Release other resources.
744 sema_destroy(&net_dev->channel_init_sema);
745 free(net_dev, M_NETVSC);
751 * Net VSC on device remove
754 hv_nv_on_device_remove(struct hv_device *device, boolean_t destroy_channel)
756 hn_softc_t *sc = device_get_softc(device->device);
757 netvsc_dev *net_dev = sc->net_dev;;
759 /* Stop outbound traffic ie sends and receives completions */
760 net_dev->destroy = TRUE;
762 hv_nv_disconnect_from_vsp(net_dev);
764 /* At this point, no one should be accessing net_dev except in here */
766 /* Now, we can close the channel safely */
768 if (!destroy_channel) {
769 device->channel->state =
770 HV_CHANNEL_CLOSING_NONDESTRUCTIVE_STATE;
773 free(device->channel->hv_chan_rdbuf, M_NETVSC);
774 hv_vmbus_channel_close(device->channel);
776 sema_destroy(&net_dev->channel_init_sema);
777 free(net_dev, M_NETVSC);
783 * Net VSC on send completion
786 hv_nv_on_send_completion(netvsc_dev *net_dev,
787 struct hv_device *device, struct hv_vmbus_channel *chan,
788 hv_vm_packet_descriptor *pkt)
790 nvsp_msg *nvsp_msg_pkt;
791 netvsc_packet *net_vsc_pkt;
794 (nvsp_msg *)((unsigned long)pkt + (pkt->data_offset8 << 3));
796 if (nvsp_msg_pkt->hdr.msg_type == nvsp_msg_type_init_complete
797 || nvsp_msg_pkt->hdr.msg_type
798 == nvsp_msg_1_type_send_rx_buf_complete
799 || nvsp_msg_pkt->hdr.msg_type
800 == nvsp_msg_1_type_send_send_buf_complete
801 || nvsp_msg_pkt->hdr.msg_type
802 == nvsp_msg5_type_subchannel) {
803 /* Copy the response back */
804 memcpy(&net_dev->channel_init_packet, nvsp_msg_pkt,
806 sema_post(&net_dev->channel_init_sema);
807 } else if (nvsp_msg_pkt->hdr.msg_type ==
808 nvsp_msg_1_type_send_rndis_pkt_complete) {
809 /* Get the send context */
811 (netvsc_packet *)(unsigned long)pkt->transaction_id;
812 if (NULL != net_vsc_pkt) {
813 if (net_vsc_pkt->send_buf_section_idx !=
814 NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX) {
818 idx = net_vsc_pkt->send_buf_section_idx /
820 KASSERT(idx < net_dev->bitsmap_words,
821 ("invalid section index %u",
822 net_vsc_pkt->send_buf_section_idx));
824 (net_vsc_pkt->send_buf_section_idx %
827 KASSERT(net_dev->send_section_bitsmap[idx] &
829 ("index bitmap 0x%lx, section index %u, "
830 "bitmap idx %d, bitmask 0x%lx",
831 net_dev->send_section_bitsmap[idx],
832 net_vsc_pkt->send_buf_section_idx,
835 &net_dev->send_section_bitsmap[idx], mask);
838 /* Notify the layer above us */
839 net_vsc_pkt->compl.send.on_send_completion(chan,
840 net_vsc_pkt->compl.send.send_completion_context);
848 * Sends a packet on the specified Hyper-V device.
849 * Returns 0 on success, non-zero on failure.
852 hv_nv_on_send(struct hv_vmbus_channel *chan, netvsc_packet *pkt)
857 send_msg.hdr.msg_type = nvsp_msg_1_type_send_rndis_pkt;
858 if (pkt->is_data_pkt) {
860 send_msg.msgs.vers_1_msgs.send_rndis_pkt.chan_type = 0;
862 /* 1 is RMC_CONTROL */
863 send_msg.msgs.vers_1_msgs.send_rndis_pkt.chan_type = 1;
866 send_msg.msgs.vers_1_msgs.send_rndis_pkt.send_buf_section_idx =
867 pkt->send_buf_section_idx;
868 send_msg.msgs.vers_1_msgs.send_rndis_pkt.send_buf_section_size =
869 pkt->send_buf_section_size;
871 if (pkt->page_buf_count) {
872 ret = hv_vmbus_channel_send_packet_pagebuffer(chan,
873 pkt->page_buffers, pkt->page_buf_count,
874 &send_msg, sizeof(nvsp_msg), (uint64_t)(uintptr_t)pkt);
876 ret = hv_vmbus_channel_send_packet(chan,
877 &send_msg, sizeof(nvsp_msg), (uint64_t)(uintptr_t)pkt,
878 HV_VMBUS_PACKET_TYPE_DATA_IN_BAND,
879 HV_VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
888 * In the FreeBSD Hyper-V virtual world, this function deals exclusively
889 * with virtual addresses.
892 hv_nv_on_receive(netvsc_dev *net_dev, struct hv_device *device,
893 struct hv_vmbus_channel *chan, hv_vm_packet_descriptor *pkt)
895 hv_vm_transfer_page_packet_header *vm_xfer_page_pkt;
896 nvsp_msg *nvsp_msg_pkt;
897 netvsc_packet vsc_pkt;
898 netvsc_packet *net_vsc_pkt = &vsc_pkt;
899 device_t dev = device->device;
902 int status = nvsp_status_success;
905 * All inbound packets other than send completion should be
908 if (pkt->type != HV_VMBUS_PACKET_TYPE_DATA_USING_TRANSFER_PAGES) {
909 device_printf(dev, "packet type %d is invalid!\n", pkt->type);
913 nvsp_msg_pkt = (nvsp_msg *)((unsigned long)pkt
914 + (pkt->data_offset8 << 3));
916 /* Make sure this is a valid nvsp packet */
917 if (nvsp_msg_pkt->hdr.msg_type != nvsp_msg_1_type_send_rndis_pkt) {
918 device_printf(dev, "packet hdr type %d is invalid!\n",
923 vm_xfer_page_pkt = (hv_vm_transfer_page_packet_header *)pkt;
925 if (vm_xfer_page_pkt->transfer_page_set_id !=
926 NETVSC_RECEIVE_BUFFER_ID) {
927 device_printf(dev, "transfer_page_set_id %d is invalid!\n",
928 vm_xfer_page_pkt->transfer_page_set_id);
932 count = vm_xfer_page_pkt->range_count;
933 net_vsc_pkt->device = device;
935 /* Each range represents 1 RNDIS pkt that contains 1 Ethernet frame */
936 for (i = 0; i < count; i++) {
937 net_vsc_pkt->status = nvsp_status_success;
938 net_vsc_pkt->data = (void *)((unsigned long)net_dev->rx_buf +
939 vm_xfer_page_pkt->ranges[i].byte_offset);
940 net_vsc_pkt->tot_data_buf_len =
941 vm_xfer_page_pkt->ranges[i].byte_count;
943 hv_rf_on_receive(net_dev, device, chan, net_vsc_pkt);
944 if (net_vsc_pkt->status != nvsp_status_success) {
945 status = nvsp_status_failure;
950 * Moved completion call back here so that all received
951 * messages (not just data messages) will trigger a response
952 * message back to the host.
954 hv_nv_on_receive_completion(chan, vm_xfer_page_pkt->d.transaction_id,
959 * Net VSC on receive completion
961 * Send a receive completion packet to RNDIS device (ie NetVsp)
964 hv_nv_on_receive_completion(struct hv_vmbus_channel *chan, uint64_t tid,
967 nvsp_msg rx_comp_msg;
971 rx_comp_msg.hdr.msg_type = nvsp_msg_1_type_send_rndis_pkt_complete;
973 /* Pass in the status */
974 rx_comp_msg.msgs.vers_1_msgs.send_rndis_pkt_complete.status =
978 /* Send the completion */
979 ret = hv_vmbus_channel_send_packet(chan, &rx_comp_msg,
980 sizeof(nvsp_msg), tid, HV_VMBUS_PACKET_TYPE_COMPLETION, 0);
984 } else if (ret == EAGAIN) {
985 /* no more room... wait a bit and attempt to retry 3 times */
990 goto retry_send_cmplt;
996 * Net VSC receiving vRSS send table from VSP
999 hv_nv_send_table(struct hv_device *device, hv_vm_packet_descriptor *pkt)
1001 netvsc_dev *net_dev;
1002 nvsp_msg *nvsp_msg_pkt;
1004 uint32_t count, *table;
1006 net_dev = hv_nv_get_inbound_net_device(device);
1011 (nvsp_msg *)((unsigned long)pkt + (pkt->data_offset8 << 3));
1013 if (nvsp_msg_pkt->hdr.msg_type !=
1014 nvsp_msg5_type_send_indirection_table) {
1015 printf("Netvsc: !Warning! receive msg type not "
1016 "send_indirection_table. type = %d\n",
1017 nvsp_msg_pkt->hdr.msg_type);
1021 count = nvsp_msg_pkt->msgs.vers_5_msgs.send_table.count;
1022 if (count != VRSS_SEND_TABLE_SIZE) {
1023 printf("Netvsc: Received wrong send table size: %u\n", count);
1027 table = (uint32_t *)
1028 ((unsigned long)&nvsp_msg_pkt->msgs.vers_5_msgs.send_table +
1029 nvsp_msg_pkt->msgs.vers_5_msgs.send_table.offset);
1031 for (i = 0; i < count; i++)
1032 net_dev->vrss_send_table[i] = table[i];
1036 * Net VSC on channel callback
1039 hv_nv_on_channel_callback(void *xchan)
1041 struct hv_vmbus_channel *chan = xchan;
1042 struct hv_device *device = chan->device;
1043 netvsc_dev *net_dev;
1044 device_t dev = device->device;
1045 uint32_t bytes_rxed;
1046 uint64_t request_id;
1047 hv_vm_packet_descriptor *desc;
1049 int bufferlen = NETVSC_PACKET_SIZE;
1052 net_dev = hv_nv_get_inbound_net_device(device);
1053 if (net_dev == NULL)
1056 buffer = chan->hv_chan_rdbuf;
1059 ret = hv_vmbus_channel_recv_packet_raw(chan,
1060 buffer, bufferlen, &bytes_rxed, &request_id);
1062 if (bytes_rxed > 0) {
1063 desc = (hv_vm_packet_descriptor *)buffer;
1064 switch (desc->type) {
1065 case HV_VMBUS_PACKET_TYPE_COMPLETION:
1066 hv_nv_on_send_completion(net_dev, device,
1069 case HV_VMBUS_PACKET_TYPE_DATA_USING_TRANSFER_PAGES:
1070 hv_nv_on_receive(net_dev, device, chan, desc);
1072 case HV_VMBUS_PACKET_TYPE_DATA_IN_BAND:
1073 hv_nv_send_table(device, desc);
1077 "hv_cb recv unknow type %d "
1078 " packet\n", desc->type);
1084 } else if (ret == ENOBUFS) {
1085 /* Handle large packet */
1086 if (bufferlen > NETVSC_PACKET_SIZE) {
1087 free(buffer, M_NETVSC);
1091 /* alloc new buffer */
1092 buffer = malloc(bytes_rxed, M_NETVSC, M_NOWAIT);
1093 if (buffer == NULL) {
1095 "hv_cb malloc buffer failed, len=%u\n",
1100 bufferlen = bytes_rxed;
1104 if (bufferlen > NETVSC_PACKET_SIZE)
1105 free(buffer, M_NETVSC);
1107 hv_rf_channel_rollup(chan);