2 * Copyright (c) 2010-2012 Citrix Inc.
3 * Copyright (c) 2009-2012 Microsoft Corp.
4 * Copyright (c) 2012 NetApp Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * Copyright (c) 2004-2006 Kip Macy
31 * All rights reserved.
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 #include <sys/cdefs.h>
56 __FBSDID("$FreeBSD$");
58 #include "opt_inet6.h"
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/sockio.h>
65 #include <sys/malloc.h>
66 #include <sys/module.h>
67 #include <sys/kernel.h>
68 #include <sys/socket.h>
69 #include <sys/queue.h>
72 #include <sys/sysctl.h>
75 #include <net/if_arp.h>
76 #include <net/ethernet.h>
77 #include <net/if_dl.h>
78 #include <net/if_media.h>
82 #include <net/if_types.h>
83 #include <net/if_vlan_var.h>
86 #include <netinet/in_systm.h>
87 #include <netinet/in.h>
88 #include <netinet/ip.h>
89 #include <netinet/if_ether.h>
90 #include <netinet/tcp.h>
91 #include <netinet/udp.h>
92 #include <netinet/ip6.h>
95 #include <vm/vm_param.h>
96 #include <vm/vm_kern.h>
99 #include <machine/bus.h>
100 #include <machine/resource.h>
101 #include <machine/frame.h>
102 #include <machine/vmparam.h>
105 #include <sys/rman.h>
106 #include <sys/mutex.h>
107 #include <sys/errno.h>
108 #include <sys/types.h>
109 #include <machine/atomic.h>
111 #include <machine/intr_machdep.h>
113 #include <machine/in_cksum.h>
115 #include <dev/hyperv/include/hyperv.h>
116 #include "hv_net_vsc.h"
117 #include "hv_rndis.h"
118 #include "hv_rndis_filter.h"
121 /* Short for Hyper-V network interface */
122 #define NETVSC_DEVNAME "hn"
125 * It looks like offset 0 of buf is reserved to hold the softc pointer.
126 * The sc pointer evidently not needed, and is not presently populated.
127 * The packet offset is where the netvsc_packet starts in the buffer.
129 #define HV_NV_SC_PTR_OFFSET_IN_BUF 0
130 #define HV_NV_PACKET_OFFSET_IN_BUF 16
132 /* YYY should get it from the underlying channel */
133 #define HN_TX_DESC_CNT 512
135 #define HN_RNDIS_MSG_LEN \
136 (sizeof(rndis_msg) + \
137 RNDIS_VLAN_PPI_SIZE + \
138 RNDIS_TSO_PPI_SIZE + \
140 #define HN_RNDIS_MSG_BOUNDARY PAGE_SIZE
141 #define HN_RNDIS_MSG_ALIGN CACHE_LINE_SIZE
143 #define HN_TX_DATA_BOUNDARY PAGE_SIZE
144 #define HN_TX_DATA_MAXSIZE IP_MAXPACKET
145 #define HN_TX_DATA_SEGSIZE PAGE_SIZE
146 #define HN_TX_DATA_SEGCNT_MAX \
147 (NETVSC_PACKET_MAXPAGE - HV_RF_NUM_TX_RESERVED_PAGE_BUFS)
150 SLIST_ENTRY(hn_txdesc) link;
154 uint32_t flags; /* HN_TXD_FLAG_ */
155 netvsc_packet netvsc_pkt; /* XXX to be removed */
157 bus_dmamap_t data_dmap;
159 bus_addr_t rndis_msg_paddr;
160 rndis_msg *rndis_msg;
161 bus_dmamap_t rndis_msg_dmap;
164 #define HN_TXD_FLAG_ONLIST 0x1
165 #define HN_TXD_FLAG_DMAMAP 0x2
168 * A unified flag for all outbound check sum flags is useful,
169 * and it helps avoiding unnecessary check sum calculation in
170 * network forwarding scenario.
172 #define HV_CSUM_FOR_OUTBOUND \
173 (CSUM_IP|CSUM_IP_UDP|CSUM_IP_TCP|CSUM_IP_SCTP|CSUM_IP_TSO| \
174 CSUM_IP_ISCSI|CSUM_IP6_UDP|CSUM_IP6_TCP|CSUM_IP6_SCTP| \
175 CSUM_IP6_TSO|CSUM_IP6_ISCSI)
177 /* XXX move to netinet/tcp_lro.h */
178 #define HN_LRO_HIWAT_MAX 65535
179 #define HN_LRO_HIWAT_DEF HN_LRO_HIWAT_MAX
180 /* YYY 2*MTU is a bit rough, but should be good enough. */
181 #define HN_LRO_HIWAT_MTULIM(ifp) (2 * (ifp)->if_mtu)
182 #define HN_LRO_HIWAT_ISVALID(sc, hiwat) \
183 ((hiwat) >= HN_LRO_HIWAT_MTULIM((sc)->hn_ifp) || \
184 (hiwat) <= HN_LRO_HIWAT_MAX)
187 * Be aware that this sleepable mutex will exhibit WITNESS errors when
188 * certain TCP and ARP code paths are taken. This appears to be a
189 * well-known condition, as all other drivers checked use a sleeping
190 * mutex to protect their transmit paths.
191 * Also Be aware that mutexes do not play well with semaphores, and there
192 * is a conflicting semaphore in a certain channel code path.
194 #define NV_LOCK_INIT(_sc, _name) \
195 mtx_init(&(_sc)->hn_lock, _name, MTX_NETWORK_LOCK, MTX_DEF)
196 #define NV_LOCK(_sc) mtx_lock(&(_sc)->hn_lock)
197 #define NV_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->hn_lock, MA_OWNED)
198 #define NV_UNLOCK(_sc) mtx_unlock(&(_sc)->hn_lock)
199 #define NV_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->hn_lock)
206 int hv_promisc_mode = 0; /* normal mode by default */
208 /* Trust tcp segements verification on host side. */
209 static int hn_trust_hosttcp = 0;
210 TUNABLE_INT("dev.hn.trust_hosttcp", &hn_trust_hosttcp);
212 #if __FreeBSD_version >= 1100045
213 /* Limit TSO burst size */
214 static int hn_tso_maxlen = 0;
215 TUNABLE_INT("dev.hn.tso_maxlen", &hn_tso_maxlen);
218 /* Limit chimney send size */
219 static int hn_tx_chimney_size = 0;
220 TUNABLE_INT("dev.hn.tx_chimney_size", &hn_tx_chimney_size);
223 * Forward declarations
225 static void hn_stop(hn_softc_t *sc);
226 static void hn_ifinit_locked(hn_softc_t *sc);
227 static void hn_ifinit(void *xsc);
228 static int hn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
229 static void hn_start_locked(struct ifnet *ifp);
230 static void hn_start(struct ifnet *ifp);
231 static int hn_ifmedia_upd(struct ifnet *ifp);
232 static void hn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
234 static int hn_lro_hiwat_sysctl(SYSCTL_HANDLER_ARGS);
236 static int hn_tx_chimney_size_sysctl(SYSCTL_HANDLER_ARGS);
237 static int hn_check_iplen(const struct mbuf *, int);
238 static int hn_create_tx_ring(struct hn_softc *sc);
239 static void hn_destroy_tx_ring(struct hn_softc *sc);
242 hn_set_lro_hiwat(struct hn_softc *sc, int hiwat)
244 sc->hn_lro_hiwat = hiwat;
246 sc->hn_lro.lro_hiwat = sc->hn_lro_hiwat;
251 * NetVsc get message transport protocol type
253 static uint32_t get_transport_proto_type(struct mbuf *m_head)
255 uint32_t ret_val = TRANSPORT_TYPE_NOT_IP;
256 uint16_t ether_type = 0;
258 struct ether_vlan_header *eh;
266 eh = mtod(m_head, struct ether_vlan_header*);
267 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
268 ether_len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
269 ether_type = eh->evl_proto;
271 ether_len = ETHER_HDR_LEN;
272 ether_type = eh->evl_encap_proto;
275 switch (ntohs(ether_type)) {
278 ip6 = (struct ip6_hdr *)(m_head->m_data + ether_len);
280 if (IPPROTO_TCP == ip6->ip6_nxt) {
281 ret_val = TRANSPORT_TYPE_IPV6_TCP;
282 } else if (IPPROTO_UDP == ip6->ip6_nxt) {
283 ret_val = TRANSPORT_TYPE_IPV6_UDP;
289 iph = (struct ip *)(m_head->m_data + ether_len);
291 if (IPPROTO_TCP == iph->ip_p) {
292 ret_val = TRANSPORT_TYPE_IPV4_TCP;
293 } else if (IPPROTO_UDP == iph->ip_p) {
294 ret_val = TRANSPORT_TYPE_IPV4_UDP;
299 ret_val = TRANSPORT_TYPE_NOT_IP;
307 hn_ifmedia_upd(struct ifnet *ifp __unused)
314 hn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
316 struct hn_softc *sc = ifp->if_softc;
318 ifmr->ifm_status = IFM_AVALID;
319 ifmr->ifm_active = IFM_ETHER;
321 if (!sc->hn_carrier) {
322 ifmr->ifm_active |= IFM_NONE;
325 ifmr->ifm_status |= IFM_ACTIVE;
326 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
329 /* {F8615163-DF3E-46c5-913F-F2D2F965ED0E} */
330 static const hv_guid g_net_vsc_device_type = {
331 .data = {0x63, 0x51, 0x61, 0xF8, 0x3E, 0xDF, 0xc5, 0x46,
332 0x91, 0x3F, 0xF2, 0xD2, 0xF9, 0x65, 0xED, 0x0E}
336 * Standard probe entry point.
340 netvsc_probe(device_t dev)
344 p = vmbus_get_type(dev);
345 if (!memcmp(p, &g_net_vsc_device_type.data, sizeof(hv_guid))) {
346 device_set_desc(dev, "Synthetic Network Interface");
348 printf("Netvsc probe... DONE \n");
350 return (BUS_PROBE_DEFAULT);
357 * Standard attach entry point.
359 * Called when the driver is loaded. It allocates needed resources,
360 * and initializes the "hardware" and software.
363 netvsc_attach(device_t dev)
365 struct hv_device *device_ctx = vmbus_get_devctx(dev);
366 netvsc_device_info device_info;
368 int unit = device_get_unit(dev);
369 struct ifnet *ifp = NULL;
370 struct sysctl_oid_list *child;
371 struct sysctl_ctx_list *ctx;
373 #if __FreeBSD_version >= 1100045
377 sc = device_get_softc(dev);
382 bzero(sc, sizeof(hn_softc_t));
385 sc->hn_lro_hiwat = HN_LRO_HIWAT_DEF;
386 sc->hn_trust_hosttcp = hn_trust_hosttcp;
388 error = hn_create_tx_ring(sc);
392 NV_LOCK_INIT(sc, "NetVSCLock");
394 sc->hn_dev_obj = device_ctx;
396 ifp = sc->hn_ifp = sc->arpcom.ac_ifp = if_alloc(IFT_ETHER);
399 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
400 ifp->if_dunit = unit;
401 ifp->if_dname = NETVSC_DEVNAME;
403 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
404 ifp->if_ioctl = hn_ioctl;
405 ifp->if_start = hn_start;
406 ifp->if_init = hn_ifinit;
407 /* needed by hv_rf_on_device_add() code */
408 ifp->if_mtu = ETHERMTU;
409 IFQ_SET_MAXLEN(&ifp->if_snd, 512);
410 ifp->if_snd.ifq_drv_maxlen = 511;
411 IFQ_SET_READY(&ifp->if_snd);
413 ifmedia_init(&sc->hn_media, 0, hn_ifmedia_upd, hn_ifmedia_sts);
414 ifmedia_add(&sc->hn_media, IFM_ETHER | IFM_AUTO, 0, NULL);
415 ifmedia_set(&sc->hn_media, IFM_ETHER | IFM_AUTO);
416 /* XXX ifmedia_set really should do this for us */
417 sc->hn_media.ifm_media = sc->hn_media.ifm_cur->ifm_media;
420 * Tell upper layers that we support full VLAN capability.
422 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
423 ifp->if_capabilities |=
424 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_TSO |
427 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_TSO |
430 * Only enable UDP checksum offloading when it is on 2012R2 or
431 * later. UDP checksum offloading doesn't work on earlier
434 if (hv_vmbus_protocal_version >= HV_VMBUS_VERSION_WIN8_1)
435 ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_TSO;
437 ifp->if_hwassist = CSUM_TCP | CSUM_TSO;
439 error = hv_rf_on_device_add(device_ctx, &device_info);
443 if (device_info.link_state == 0) {
447 #if defined(INET) || defined(INET6)
448 tcp_lro_init(&sc->hn_lro);
449 /* Driver private LRO settings */
450 sc->hn_lro.ifp = ifp;
452 sc->hn_lro.lro_hiwat = sc->hn_lro_hiwat;
454 #endif /* INET || INET6 */
456 #if __FreeBSD_version >= 1100045
457 tso_maxlen = hn_tso_maxlen;
458 if (tso_maxlen <= 0 || tso_maxlen > IP_MAXPACKET)
459 tso_maxlen = IP_MAXPACKET;
461 ifp->if_hw_tsomaxsegcount = HN_TX_DATA_SEGCNT_MAX;
462 ifp->if_hw_tsomaxsegsize = PAGE_SIZE;
463 ifp->if_hw_tsomax = tso_maxlen -
464 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
467 ether_ifattach(ifp, device_info.mac_addr);
469 #if __FreeBSD_version >= 1100045
470 if_printf(ifp, "TSO: %u/%u/%u\n", ifp->if_hw_tsomax,
471 ifp->if_hw_tsomaxsegcount, ifp->if_hw_tsomaxsegsize);
474 sc->hn_tx_chimney_max = sc->net_dev->send_section_size;
475 sc->hn_tx_chimney_size = sc->hn_tx_chimney_max;
476 if (hn_tx_chimney_size > 0 &&
477 hn_tx_chimney_size < sc->hn_tx_chimney_max)
478 sc->hn_tx_chimney_size = hn_tx_chimney_size;
480 ctx = device_get_sysctl_ctx(dev);
481 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
483 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "lro_queued",
484 CTLFLAG_RW, &sc->hn_lro.lro_queued, 0, "LRO queued");
485 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "lro_flushed",
486 CTLFLAG_RW, &sc->hn_lro.lro_flushed, 0, "LRO flushed");
487 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "lro_tried",
488 CTLFLAG_RW, &sc->hn_lro_tried, "# of LRO tries");
490 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_hiwat",
491 CTLTYPE_INT | CTLFLAG_RW, sc, 0, hn_lro_hiwat_sysctl,
492 "I", "LRO high watermark");
494 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "trust_hosttcp",
495 CTLFLAG_RW, &sc->hn_trust_hosttcp, 0,
496 "Trust tcp segement verification on host side, "
497 "when csum info is missing");
498 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "csum_ip",
499 CTLFLAG_RW, &sc->hn_csum_ip, "RXCSUM IP");
500 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "csum_tcp",
501 CTLFLAG_RW, &sc->hn_csum_tcp, "RXCSUM TCP");
502 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "csum_trusted",
503 CTLFLAG_RW, &sc->hn_csum_trusted,
504 "# of TCP segements that we trust host's csum verification");
505 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "small_pkts",
506 CTLFLAG_RW, &sc->hn_small_pkts, "# of small packets received");
507 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "no_txdescs",
508 CTLFLAG_RW, &sc->hn_no_txdescs, "# of times short of TX descs");
509 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "send_failed",
510 CTLFLAG_RW, &sc->hn_send_failed, "# of hyper-v sending failure");
511 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "txdma_failed",
512 CTLFLAG_RW, &sc->hn_txdma_failed, "# of TX DMA failure");
513 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_collapsed",
514 CTLFLAG_RW, &sc->hn_tx_collapsed, "# of TX mbuf collapsed");
515 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_chimney",
516 CTLFLAG_RW, &sc->hn_tx_chimney, "# of chimney send");
517 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "txdesc_cnt",
518 CTLFLAG_RD, &sc->hn_txdesc_cnt, 0, "# of total TX descs");
519 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "txdesc_avail",
520 CTLFLAG_RD, &sc->hn_txdesc_avail, 0, "# of available TX descs");
521 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_chimney_max",
522 CTLFLAG_RD, &sc->hn_tx_chimney_max, 0,
523 "Chimney send packet size upper boundary");
524 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_chimney_size",
525 CTLTYPE_INT | CTLFLAG_RW, sc, 0, hn_tx_chimney_size_sysctl,
526 "I", "Chimney send packet size limit");
529 struct sysctl_ctx_list *dc_ctx;
530 struct sysctl_oid_list *dc_child;
534 * Add sysctl nodes for devclass
536 dc = device_get_devclass(dev);
537 dc_ctx = devclass_get_sysctl_ctx(dc);
538 dc_child = SYSCTL_CHILDREN(devclass_get_sysctl_tree(dc));
540 SYSCTL_ADD_INT(dc_ctx, dc_child, OID_AUTO, "trust_hosttcp",
541 CTLFLAG_RD, &hn_trust_hosttcp, 0,
542 "Trust tcp segement verification on host side, "
543 "when csum info is missing (global setting)");
544 SYSCTL_ADD_INT(dc_ctx, dc_child, OID_AUTO, "tx_chimney_size",
545 CTLFLAG_RD, &hn_tx_chimney_size, 0,
546 "Chimney send packet size limit");
547 #if __FreeBSD_version >= 1100045
548 SYSCTL_ADD_INT(dc_ctx, dc_child, OID_AUTO, "tso_maxlen",
549 CTLFLAG_RD, &hn_tso_maxlen, 0, "TSO burst limit");
555 hn_destroy_tx_ring(sc);
562 * Standard detach entry point
565 netvsc_detach(device_t dev)
567 struct hn_softc *sc = device_get_softc(dev);
568 struct hv_device *hv_device = vmbus_get_devctx(dev);
571 printf("netvsc_detach\n");
574 * XXXKYS: Need to clean up all our
575 * driver state; this is the driver
580 * XXXKYS: Need to stop outgoing traffic and unregister
584 hv_rf_on_device_remove(hv_device, HV_RF_NV_DESTROY_CHANNEL);
586 ifmedia_removeall(&sc->hn_media);
587 #if defined(INET) || defined(INET6)
588 tcp_lro_free(&sc->hn_lro);
590 hn_destroy_tx_ring(sc);
596 * Standard shutdown entry point
599 netvsc_shutdown(device_t dev)
605 hn_txdesc_dmamap_load(struct hn_softc *sc, struct hn_txdesc *txd,
606 struct mbuf **m_head, bus_dma_segment_t *segs, int *nsegs)
608 struct mbuf *m = *m_head;
611 error = bus_dmamap_load_mbuf_sg(sc->hn_tx_data_dtag, txd->data_dmap,
612 m, segs, nsegs, BUS_DMA_NOWAIT);
613 if (error == EFBIG) {
616 m_new = m_collapse(m, M_NOWAIT, HN_TX_DATA_SEGCNT_MAX);
621 sc->hn_tx_collapsed++;
623 error = bus_dmamap_load_mbuf_sg(sc->hn_tx_data_dtag,
624 txd->data_dmap, m, segs, nsegs, BUS_DMA_NOWAIT);
627 bus_dmamap_sync(sc->hn_tx_data_dtag, txd->data_dmap,
628 BUS_DMASYNC_PREWRITE);
629 txd->flags |= HN_TXD_FLAG_DMAMAP;
635 hn_txdesc_dmamap_unload(struct hn_softc *sc, struct hn_txdesc *txd)
638 if (txd->flags & HN_TXD_FLAG_DMAMAP) {
639 bus_dmamap_sync(sc->hn_tx_data_dtag,
640 txd->data_dmap, BUS_DMASYNC_POSTWRITE);
641 bus_dmamap_unload(sc->hn_tx_data_dtag,
643 txd->flags &= ~HN_TXD_FLAG_DMAMAP;
648 hn_txdesc_put(struct hn_softc *sc, struct hn_txdesc *txd)
651 KASSERT((txd->flags & HN_TXD_FLAG_ONLIST) == 0,
652 ("put an onlist txd %#x", txd->flags));
654 KASSERT(txd->refs > 0, ("invalid txd refs %d", txd->refs));
655 if (atomic_fetchadd_int(&txd->refs, -1) != 1)
658 hn_txdesc_dmamap_unload(sc, txd);
659 if (txd->m != NULL) {
664 txd->flags |= HN_TXD_FLAG_ONLIST;
666 mtx_lock_spin(&sc->hn_txlist_spin);
667 KASSERT(sc->hn_txdesc_avail >= 0 &&
668 sc->hn_txdesc_avail < sc->hn_txdesc_cnt,
669 ("txdesc_put: invalid txd avail %d", sc->hn_txdesc_avail));
670 sc->hn_txdesc_avail++;
671 SLIST_INSERT_HEAD(&sc->hn_txlist, txd, link);
672 mtx_unlock_spin(&sc->hn_txlist_spin);
677 static __inline struct hn_txdesc *
678 hn_txdesc_get(struct hn_softc *sc)
680 struct hn_txdesc *txd;
682 mtx_lock_spin(&sc->hn_txlist_spin);
683 txd = SLIST_FIRST(&sc->hn_txlist);
685 KASSERT(sc->hn_txdesc_avail > 0,
686 ("txdesc_get: invalid txd avail %d", sc->hn_txdesc_avail));
687 sc->hn_txdesc_avail--;
688 SLIST_REMOVE_HEAD(&sc->hn_txlist, link);
690 mtx_unlock_spin(&sc->hn_txlist_spin);
693 KASSERT(txd->m == NULL && txd->refs == 0 &&
694 (txd->flags & HN_TXD_FLAG_ONLIST), ("invalid txd"));
695 txd->flags &= ~HN_TXD_FLAG_ONLIST;
702 hn_txdesc_hold(struct hn_txdesc *txd)
705 /* 0->1 transition will never work */
706 KASSERT(txd->refs > 0, ("invalid refs %d", txd->refs));
707 atomic_add_int(&txd->refs, 1);
711 * Send completion processing
713 * Note: It looks like offset 0 of buf is reserved to hold the softc
714 * pointer. The sc pointer is not currently needed in this function, and
715 * it is not presently populated by the TX function.
718 netvsc_xmit_completion(void *context)
720 netvsc_packet *packet = context;
721 struct hn_txdesc *txd;
724 txd = (struct hn_txdesc *)(uintptr_t)
725 packet->compl.send.send_completion_tid;
729 hn_txdesc_put(sc, txd);
733 netvsc_channel_rollup(struct hv_device *device_ctx)
735 struct hn_softc *sc = device_get_softc(device_ctx->device);
744 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
745 hn_start_locked(ifp);
750 * Start a transmit of one or more packets
753 hn_start_locked(struct ifnet *ifp)
755 hn_softc_t *sc = ifp->if_softc;
756 struct hv_device *device_ctx = vmbus_get_devctx(sc->hn_dev);
757 netvsc_dev *net_dev = sc->net_dev;
758 netvsc_packet *packet;
759 struct mbuf *m_head, *m;
760 struct ether_vlan_header *eh;
761 rndis_msg *rndis_mesg;
762 rndis_packet *rndis_pkt;
763 rndis_per_packet_info *rppi;
764 ndis_8021q_info *rppi_vlan_info;
765 rndis_tcp_ip_csum_info *csum_info;
766 rndis_tcp_tso_info *tso_info;
768 uint32_t rndis_msg_size = 0;
769 uint32_t trans_proto_type;
770 uint32_t send_buf_section_idx =
771 NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX;
773 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
777 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
778 bus_dma_segment_t segs[HN_TX_DATA_SEGCNT_MAX];
779 int error, nsegs, i, send_failed = 0;
780 struct hn_txdesc *txd;
782 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
786 txd = hn_txdesc_get(sc);
789 IF_PREPEND(&ifp->if_snd, m_head);
790 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
794 packet = &txd->netvsc_pkt;
795 /* XXX not necessary */
796 memset(packet, 0, sizeof(*packet));
798 packet->is_data_pkt = TRUE;
800 /* Initialize it from the mbuf */
801 packet->tot_data_buf_len = m_head->m_pkthdr.len;
804 * extension points to the area reserved for the
805 * rndis_filter_packet, which is placed just after
806 * the netvsc_packet (and rppi struct, if present;
807 * length is updated later).
809 rndis_mesg = txd->rndis_msg;
810 /* XXX not necessary */
811 memset(rndis_mesg, 0, HN_RNDIS_MSG_LEN);
812 rndis_mesg->ndis_msg_type = REMOTE_NDIS_PACKET_MSG;
814 rndis_pkt = &rndis_mesg->msg.packet;
815 rndis_pkt->data_offset = sizeof(rndis_packet);
816 rndis_pkt->data_length = packet->tot_data_buf_len;
817 rndis_pkt->per_pkt_info_offset = sizeof(rndis_packet);
819 rndis_msg_size = RNDIS_MESSAGE_SIZE(rndis_packet);
822 * If the Hyper-V infrastructure needs to embed a VLAN tag,
823 * initialize netvsc_packet and rppi struct values as needed.
825 if (m_head->m_flags & M_VLANTAG) {
827 * set up some additional fields so the Hyper-V infrastructure will stuff the VLAN tag
830 rndis_msg_size += RNDIS_VLAN_PPI_SIZE;
832 rppi = hv_set_rppi_data(rndis_mesg, RNDIS_VLAN_PPI_SIZE,
835 /* VLAN info immediately follows rppi struct */
836 rppi_vlan_info = (ndis_8021q_info *)((char*)rppi +
837 rppi->per_packet_info_offset);
838 /* FreeBSD does not support CFI or priority */
839 rppi_vlan_info->u1.s1.vlan_id =
840 m_head->m_pkthdr.ether_vtag & 0xfff;
843 /* Only check the flags for outbound and ignore the ones for inbound */
844 if (0 == (m_head->m_pkthdr.csum_flags & HV_CSUM_FOR_OUTBOUND)) {
848 eh = mtod(m_head, struct ether_vlan_header*);
849 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
850 ether_len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
852 ether_len = ETHER_HDR_LEN;
855 trans_proto_type = get_transport_proto_type(m_head);
856 if (TRANSPORT_TYPE_NOT_IP == trans_proto_type) {
861 * TSO packet needless to setup the send side checksum
864 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
868 /* setup checksum offload */
869 rndis_msg_size += RNDIS_CSUM_PPI_SIZE;
870 rppi = hv_set_rppi_data(rndis_mesg, RNDIS_CSUM_PPI_SIZE,
872 csum_info = (rndis_tcp_ip_csum_info *)((char*)rppi +
873 rppi->per_packet_info_offset);
875 if (trans_proto_type & (TYPE_IPV4 << 16)) {
876 csum_info->xmit.is_ipv4 = 1;
878 csum_info->xmit.is_ipv6 = 1;
881 if (trans_proto_type & TYPE_TCP) {
882 csum_info->xmit.tcp_csum = 1;
883 csum_info->xmit.tcp_header_offset = 0;
884 } else if (trans_proto_type & TYPE_UDP) {
885 csum_info->xmit.udp_csum = 1;
891 /* setup TCP segmentation offload */
892 rndis_msg_size += RNDIS_TSO_PPI_SIZE;
893 rppi = hv_set_rppi_data(rndis_mesg, RNDIS_TSO_PPI_SIZE,
894 tcp_large_send_info);
896 tso_info = (rndis_tcp_tso_info *)((char *)rppi +
897 rppi->per_packet_info_offset);
898 tso_info->lso_v2_xmit.type =
899 RNDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
902 if (trans_proto_type & (TYPE_IPV4 << 16)) {
904 (struct ip *)(m_head->m_data + ether_len);
905 unsigned long iph_len = ip->ip_hl << 2;
907 (struct tcphdr *)((caddr_t)ip + iph_len);
909 tso_info->lso_v2_xmit.ip_version =
910 RNDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
914 th->th_sum = in_pseudo(ip->ip_src.s_addr,
919 #if defined(INET6) && defined(INET)
924 struct ip6_hdr *ip6 =
925 (struct ip6_hdr *)(m_head->m_data + ether_len);
926 struct tcphdr *th = (struct tcphdr *)(ip6 + 1);
928 tso_info->lso_v2_xmit.ip_version =
929 RNDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
931 th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
934 tso_info->lso_v2_xmit.tcp_header_offset = 0;
935 tso_info->lso_v2_xmit.mss = m_head->m_pkthdr.tso_segsz;
938 rndis_mesg->msg_len = packet->tot_data_buf_len + rndis_msg_size;
939 packet->tot_data_buf_len = rndis_mesg->msg_len;
941 /* send packet with send buffer */
942 if (packet->tot_data_buf_len < sc->hn_tx_chimney_size) {
943 send_buf_section_idx =
944 hv_nv_get_next_send_section(net_dev);
945 if (send_buf_section_idx !=
946 NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX) {
947 char *dest = ((char *)net_dev->send_buf +
948 send_buf_section_idx *
949 net_dev->send_section_size);
951 memcpy(dest, rndis_mesg, rndis_msg_size);
952 dest += rndis_msg_size;
953 for (m = m_head; m != NULL; m = m->m_next) {
956 (void *)mtod(m, vm_offset_t),
962 packet->send_buf_section_idx =
963 send_buf_section_idx;
964 packet->send_buf_section_size =
965 packet->tot_data_buf_len;
966 packet->page_buf_count = 0;
972 error = hn_txdesc_dmamap_load(sc, txd, &m_head, segs, &nsegs);
977 * This mbuf is not linked w/ the txd yet, so free
981 freed = hn_txdesc_put(sc, txd);
983 ("fail to free txd upon txdma error"));
985 sc->hn_txdma_failed++;
986 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
990 packet->page_buf_count = nsegs +
991 HV_RF_NUM_TX_RESERVED_PAGE_BUFS;
993 /* send packet with page buffer */
994 packet->page_buffers[0].pfn = atop(txd->rndis_msg_paddr);
995 packet->page_buffers[0].offset =
996 txd->rndis_msg_paddr & PAGE_MASK;
997 packet->page_buffers[0].length = rndis_msg_size;
1000 * Fill the page buffers with mbuf info starting at index
1001 * HV_RF_NUM_TX_RESERVED_PAGE_BUFS.
1003 for (i = 0; i < nsegs; ++i) {
1004 hv_vmbus_page_buffer *pb = &packet->page_buffers[
1005 i + HV_RF_NUM_TX_RESERVED_PAGE_BUFS];
1007 pb->pfn = atop(segs[i].ds_addr);
1008 pb->offset = segs[i].ds_addr & PAGE_MASK;
1009 pb->length = segs[i].ds_len;
1012 packet->send_buf_section_idx =
1013 NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX;
1014 packet->send_buf_section_size = 0;
1019 /* Set the completion routine */
1020 packet->compl.send.on_send_completion = netvsc_xmit_completion;
1021 packet->compl.send.send_completion_context = packet;
1022 packet->compl.send.send_completion_tid =
1023 (uint64_t)(uintptr_t)txd;
1027 * Make sure that txd is not freed before ETHER_BPF_MTAP.
1029 hn_txdesc_hold(txd);
1030 error = hv_nv_on_send(device_ctx, packet);
1032 ETHER_BPF_MTAP(ifp, m_head);
1033 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1035 hn_txdesc_put(sc, txd);
1037 if (__predict_false(error)) {
1041 * This should "really rarely" happen.
1043 * XXX Too many RX to be acked or too many sideband
1044 * commands to run? Ask netvsc_channel_rollup()
1045 * to kick start later.
1049 sc->hn_send_failed++;
1052 * Try sending again after set hn_txeof;
1053 * in case that we missed the last
1054 * netvsc_channel_rollup().
1058 if_printf(ifp, "send failed\n");
1061 * This mbuf will be prepended, don't free it
1062 * in hn_txdesc_put(); only unload it from the
1063 * DMA map in hn_txdesc_put(), if it was loaded.
1066 freed = hn_txdesc_put(sc, txd);
1068 ("fail to free txd upon send error"));
1070 sc->hn_send_failed++;
1071 IF_PREPEND(&ifp->if_snd, m_head);
1072 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
1079 * Link up/down notification
1082 netvsc_linkstatus_callback(struct hv_device *device_obj, uint32_t status)
1084 hn_softc_t *sc = device_get_softc(device_obj->device);
1098 * Append the specified data to the indicated mbuf chain,
1099 * Extend the mbuf chain if the new data does not fit in
1102 * This is a minor rewrite of m_append() from sys/kern/uipc_mbuf.c.
1103 * There should be an equivalent in the kernel mbuf code,
1104 * but there does not appear to be one yet.
1106 * Differs from m_append() in that additional mbufs are
1107 * allocated with cluster size MJUMPAGESIZE, and filled
1110 * Return 1 if able to complete the job; otherwise 0.
1113 hv_m_append(struct mbuf *m0, int len, c_caddr_t cp)
1116 int remainder, space;
1118 for (m = m0; m->m_next != NULL; m = m->m_next)
1121 space = M_TRAILINGSPACE(m);
1124 * Copy into available space.
1126 if (space > remainder)
1128 bcopy(cp, mtod(m, caddr_t) + m->m_len, space);
1133 while (remainder > 0) {
1135 * Allocate a new mbuf; could check space
1136 * and allocate a cluster instead.
1138 n = m_getjcl(M_DONTWAIT, m->m_type, 0, MJUMPAGESIZE);
1141 n->m_len = min(MJUMPAGESIZE, remainder);
1142 bcopy(cp, mtod(n, caddr_t), n->m_len);
1144 remainder -= n->m_len;
1148 if (m0->m_flags & M_PKTHDR)
1149 m0->m_pkthdr.len += len - remainder;
1151 return (remainder == 0);
1156 * Called when we receive a data packet from the "wire" on the
1159 * Note: This is no longer used as a callback
1162 netvsc_recv(struct hv_device *device_ctx, netvsc_packet *packet,
1163 rndis_tcp_ip_csum_info *csum_info)
1165 hn_softc_t *sc = (hn_softc_t *)device_get_softc(device_ctx->device);
1168 device_t dev = device_ctx->device;
1169 int size, do_lro = 0;
1172 return (0); /* TODO: KYS how can this be! */
1177 ifp = sc->arpcom.ac_ifp;
1179 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1184 * Bail out if packet contains more data than configured MTU.
1186 if (packet->tot_data_buf_len > (ifp->if_mtu + ETHER_HDR_LEN)) {
1188 } else if (packet->tot_data_buf_len <= MHLEN) {
1189 m_new = m_gethdr(M_NOWAIT, MT_DATA);
1192 memcpy(mtod(m_new, void *), packet->data,
1193 packet->tot_data_buf_len);
1194 m_new->m_pkthdr.len = m_new->m_len = packet->tot_data_buf_len;
1195 sc->hn_small_pkts++;
1198 * Get an mbuf with a cluster. For packets 2K or less,
1199 * get a standard 2K cluster. For anything larger, get a
1200 * 4K cluster. Any buffers larger than 4K can cause problems
1201 * if looped around to the Hyper-V TX channel, so avoid them.
1204 if (packet->tot_data_buf_len > MCLBYTES) {
1206 size = MJUMPAGESIZE;
1209 m_new = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, size);
1210 if (m_new == NULL) {
1211 device_printf(dev, "alloc mbuf failed.\n");
1215 hv_m_append(m_new, packet->tot_data_buf_len, packet->data);
1217 m_new->m_pkthdr.rcvif = ifp;
1219 /* receive side checksum offload */
1220 if (NULL != csum_info) {
1221 /* IP csum offload */
1222 if (csum_info->receive.ip_csum_succeeded) {
1223 m_new->m_pkthdr.csum_flags |=
1224 (CSUM_IP_CHECKED | CSUM_IP_VALID);
1228 /* TCP csum offload */
1229 if (csum_info->receive.tcp_csum_succeeded) {
1230 m_new->m_pkthdr.csum_flags |=
1231 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1232 m_new->m_pkthdr.csum_data = 0xffff;
1236 if (csum_info->receive.ip_csum_succeeded &&
1237 csum_info->receive.tcp_csum_succeeded)
1240 const struct ether_header *eh;
1245 if (m_new->m_len < hoff)
1247 eh = mtod(m_new, struct ether_header *);
1248 etype = ntohs(eh->ether_type);
1249 if (etype == ETHERTYPE_VLAN) {
1250 const struct ether_vlan_header *evl;
1252 hoff = sizeof(*evl);
1253 if (m_new->m_len < hoff)
1255 evl = mtod(m_new, struct ether_vlan_header *);
1256 etype = ntohs(evl->evl_proto);
1259 if (etype == ETHERTYPE_IP) {
1262 pr = hn_check_iplen(m_new, hoff);
1263 if (pr == IPPROTO_TCP) {
1264 if (sc->hn_trust_hosttcp) {
1265 sc->hn_csum_trusted++;
1266 m_new->m_pkthdr.csum_flags |=
1267 (CSUM_IP_CHECKED | CSUM_IP_VALID |
1268 CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1269 m_new->m_pkthdr.csum_data = 0xffff;
1271 /* Rely on SW csum verification though... */
1277 if ((packet->vlan_tci != 0) &&
1278 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
1279 m_new->m_pkthdr.ether_vtag = packet->vlan_tci;
1280 m_new->m_flags |= M_VLANTAG;
1284 * Note: Moved RX completion back to hv_nv_on_receive() so all
1285 * messages (not just data messages) will trigger a response.
1290 if ((ifp->if_capenable & IFCAP_LRO) && do_lro) {
1291 #if defined(INET) || defined(INET6)
1292 struct lro_ctrl *lro = &sc->hn_lro;
1296 if (tcp_lro_rx(lro, m_new, 0) == 0) {
1304 /* We're not holding the lock here, so don't release it */
1305 (*ifp->if_input)(ifp, m_new);
1311 netvsc_recv_rollup(struct hv_device *device_ctx)
1313 #if defined(INET) || defined(INET6)
1314 hn_softc_t *sc = device_get_softc(device_ctx->device);
1315 struct lro_ctrl *lro = &sc->hn_lro;
1316 struct lro_entry *queued;
1318 while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
1319 SLIST_REMOVE_HEAD(&lro->lro_active, next);
1320 tcp_lro_flush(lro, queued);
1326 * Rules for using sc->temp_unusable:
1327 * 1. sc->temp_unusable can only be read or written while holding NV_LOCK()
1328 * 2. code reading sc->temp_unusable under NV_LOCK(), and finding
1329 * sc->temp_unusable set, must release NV_LOCK() and exit
1330 * 3. to retain exclusive control of the interface,
1331 * sc->temp_unusable must be set by code before releasing NV_LOCK()
1332 * 4. only code setting sc->temp_unusable can clear sc->temp_unusable
1333 * 5. code setting sc->temp_unusable must eventually clear sc->temp_unusable
1337 * Standard ioctl entry point. Called when the user wants to configure
1341 hn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1343 hn_softc_t *sc = ifp->if_softc;
1344 struct ifreq *ifr = (struct ifreq *)data;
1346 struct ifaddr *ifa = (struct ifaddr *)data;
1348 netvsc_device_info device_info;
1349 struct hv_device *hn_dev;
1350 int mask, error = 0;
1351 int retry_cnt = 500;
1357 if (ifa->ifa_addr->sa_family == AF_INET) {
1358 ifp->if_flags |= IFF_UP;
1359 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1361 arp_ifinit(ifp, ifa);
1364 error = ether_ioctl(ifp, cmd, data);
1367 hn_dev = vmbus_get_devctx(sc->hn_dev);
1369 /* Check MTU value change */
1370 if (ifp->if_mtu == ifr->ifr_mtu)
1373 if (ifr->ifr_mtu > NETVSC_MAX_CONFIGURABLE_MTU) {
1378 /* Obtain and record requested MTU */
1379 ifp->if_mtu = ifr->ifr_mtu;
1381 * Make sure that LRO high watermark is still valid,
1382 * after MTU change (the 2*MTU limit).
1384 if (!HN_LRO_HIWAT_ISVALID(sc, sc->hn_lro_hiwat))
1385 hn_set_lro_hiwat(sc, HN_LRO_HIWAT_MTULIM(ifp));
1389 if (!sc->temp_unusable) {
1390 sc->temp_unusable = TRUE;
1394 if (retry_cnt > 0) {
1398 } while (retry_cnt > 0);
1400 if (retry_cnt == 0) {
1405 /* We must remove and add back the device to cause the new
1406 * MTU to take effect. This includes tearing down, but not
1407 * deleting the channel, then bringing it back up.
1409 error = hv_rf_on_device_remove(hn_dev, HV_RF_NV_RETAIN_CHANNEL);
1412 sc->temp_unusable = FALSE;
1416 error = hv_rf_on_device_add(hn_dev, &device_info);
1419 sc->temp_unusable = FALSE;
1424 sc->hn_tx_chimney_max = sc->net_dev->send_section_size;
1425 if (sc->hn_tx_chimney_size > sc->hn_tx_chimney_max)
1426 sc->hn_tx_chimney_size = sc->hn_tx_chimney_max;
1427 hn_ifinit_locked(sc);
1430 sc->temp_unusable = FALSE;
1436 if (!sc->temp_unusable) {
1437 sc->temp_unusable = TRUE;
1441 if (retry_cnt > 0) {
1445 } while (retry_cnt > 0);
1447 if (retry_cnt == 0) {
1452 if (ifp->if_flags & IFF_UP) {
1454 * If only the state of the PROMISC flag changed,
1455 * then just use the 'set promisc mode' command
1456 * instead of reinitializing the entire NIC. Doing
1457 * a full re-init means reloading the firmware and
1458 * waiting for it to start up, which may take a
1462 /* Fixme: Promiscuous mode? */
1463 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1464 ifp->if_flags & IFF_PROMISC &&
1465 !(sc->hn_if_flags & IFF_PROMISC)) {
1466 /* do something here for Hyper-V */
1467 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1468 !(ifp->if_flags & IFF_PROMISC) &&
1469 sc->hn_if_flags & IFF_PROMISC) {
1470 /* do something here for Hyper-V */
1473 hn_ifinit_locked(sc);
1475 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1480 sc->temp_unusable = FALSE;
1482 sc->hn_if_flags = ifp->if_flags;
1486 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1487 if (mask & IFCAP_TXCSUM) {
1488 if (IFCAP_TXCSUM & ifp->if_capenable) {
1489 ifp->if_capenable &= ~IFCAP_TXCSUM;
1490 ifp->if_hwassist &= ~(CSUM_TCP | CSUM_UDP);
1492 ifp->if_capenable |= IFCAP_TXCSUM;
1494 * Only enable UDP checksum offloading on
1495 * Windows Server 2012R2 or later releases.
1497 if (hv_vmbus_protocal_version >=
1498 HV_VMBUS_VERSION_WIN8_1) {
1500 (CSUM_TCP | CSUM_UDP);
1502 ifp->if_hwassist |= CSUM_TCP;
1507 if (mask & IFCAP_RXCSUM) {
1508 if (IFCAP_RXCSUM & ifp->if_capenable) {
1509 ifp->if_capenable &= ~IFCAP_RXCSUM;
1511 ifp->if_capenable |= IFCAP_RXCSUM;
1514 if (mask & IFCAP_LRO)
1515 ifp->if_capenable ^= IFCAP_LRO;
1517 if (mask & IFCAP_TSO4) {
1518 ifp->if_capenable ^= IFCAP_TSO4;
1519 ifp->if_hwassist ^= CSUM_IP_TSO;
1522 if (mask & IFCAP_TSO6) {
1523 ifp->if_capenable ^= IFCAP_TSO6;
1524 ifp->if_hwassist ^= CSUM_IP6_TSO;
1532 /* Fixme: Multicast mode? */
1533 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1535 netvsc_setmulti(sc);
1544 error = ifmedia_ioctl(ifp, ifr, &sc->hn_media, cmd);
1547 error = ether_ioctl(ifp, cmd, data);
1558 hn_stop(hn_softc_t *sc)
1562 struct hv_device *device_ctx = vmbus_get_devctx(sc->hn_dev);
1567 printf(" Closing Device ...\n");
1569 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
1570 if_link_state_change(ifp, LINK_STATE_DOWN);
1571 sc->hn_initdone = 0;
1573 ret = hv_rf_on_close(device_ctx);
1577 * FreeBSD transmit entry point
1580 hn_start(struct ifnet *ifp)
1586 if (sc->temp_unusable) {
1590 hn_start_locked(ifp);
1598 hn_ifinit_locked(hn_softc_t *sc)
1601 struct hv_device *device_ctx = vmbus_get_devctx(sc->hn_dev);
1606 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1610 hv_promisc_mode = 1;
1612 ret = hv_rf_on_open(device_ctx);
1616 sc->hn_initdone = 1;
1618 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1619 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
1620 if_link_state_change(ifp, LINK_STATE_UP);
1627 hn_ifinit(void *xsc)
1629 hn_softc_t *sc = xsc;
1632 if (sc->temp_unusable) {
1636 sc->temp_unusable = TRUE;
1639 hn_ifinit_locked(sc);
1642 sc->temp_unusable = FALSE;
1651 hn_watchdog(struct ifnet *ifp)
1656 printf("hn%d: watchdog timeout -- resetting\n", sc->hn_unit);
1657 hn_ifinit(sc); /*???*/
1664 hn_lro_hiwat_sysctl(SYSCTL_HANDLER_ARGS)
1666 struct hn_softc *sc = arg1;
1669 hiwat = sc->hn_lro_hiwat;
1670 error = sysctl_handle_int(oidp, &hiwat, 0, req);
1671 if (error || req->newptr == NULL)
1674 if (!HN_LRO_HIWAT_ISVALID(sc, hiwat))
1677 if (sc->hn_lro_hiwat != hiwat)
1678 hn_set_lro_hiwat(sc, hiwat);
1681 #endif /* HN_LRO_HIWAT */
1684 hn_tx_chimney_size_sysctl(SYSCTL_HANDLER_ARGS)
1686 struct hn_softc *sc = arg1;
1687 int chimney_size, error;
1689 chimney_size = sc->hn_tx_chimney_size;
1690 error = sysctl_handle_int(oidp, &chimney_size, 0, req);
1691 if (error || req->newptr == NULL)
1694 if (chimney_size > sc->hn_tx_chimney_max || chimney_size <= 0)
1697 if (sc->hn_tx_chimney_size != chimney_size)
1698 sc->hn_tx_chimney_size = chimney_size;
1703 hn_check_iplen(const struct mbuf *m, int hoff)
1705 const struct ip *ip;
1706 int len, iphlen, iplen;
1707 const struct tcphdr *th;
1708 int thoff; /* TCP data offset */
1710 len = hoff + sizeof(struct ip);
1712 /* The packet must be at least the size of an IP header. */
1713 if (m->m_pkthdr.len < len)
1714 return IPPROTO_DONE;
1716 /* The fixed IP header must reside completely in the first mbuf. */
1718 return IPPROTO_DONE;
1720 ip = mtodo(m, hoff);
1722 /* Bound check the packet's stated IP header length. */
1723 iphlen = ip->ip_hl << 2;
1724 if (iphlen < sizeof(struct ip)) /* minimum header length */
1725 return IPPROTO_DONE;
1727 /* The full IP header must reside completely in the one mbuf. */
1728 if (m->m_len < hoff + iphlen)
1729 return IPPROTO_DONE;
1731 iplen = ntohs(ip->ip_len);
1734 * Check that the amount of data in the buffers is as
1735 * at least much as the IP header would have us expect.
1737 if (m->m_pkthdr.len < hoff + iplen)
1738 return IPPROTO_DONE;
1741 * Ignore IP fragments.
1743 if (ntohs(ip->ip_off) & (IP_OFFMASK | IP_MF))
1744 return IPPROTO_DONE;
1747 * The TCP/IP or UDP/IP header must be entirely contained within
1748 * the first fragment of a packet.
1752 if (iplen < iphlen + sizeof(struct tcphdr))
1753 return IPPROTO_DONE;
1754 if (m->m_len < hoff + iphlen + sizeof(struct tcphdr))
1755 return IPPROTO_DONE;
1756 th = (const struct tcphdr *)((const uint8_t *)ip + iphlen);
1757 thoff = th->th_off << 2;
1758 if (thoff < sizeof(struct tcphdr) || thoff + iphlen > iplen)
1759 return IPPROTO_DONE;
1760 if (m->m_len < hoff + iphlen + thoff)
1761 return IPPROTO_DONE;
1764 if (iplen < iphlen + sizeof(struct udphdr))
1765 return IPPROTO_DONE;
1766 if (m->m_len < hoff + iphlen + sizeof(struct udphdr))
1767 return IPPROTO_DONE;
1771 return IPPROTO_DONE;
1778 hn_dma_map_paddr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1780 bus_addr_t *paddr = arg;
1785 KASSERT(nseg == 1, ("too many segments %d!", nseg));
1786 *paddr = segs->ds_addr;
1790 hn_create_tx_ring(struct hn_softc *sc)
1792 bus_dma_tag_t parent_dtag;
1795 sc->hn_txdesc_cnt = HN_TX_DESC_CNT;
1796 sc->hn_txdesc = malloc(sizeof(struct hn_txdesc) * sc->hn_txdesc_cnt,
1797 M_NETVSC, M_WAITOK | M_ZERO);
1798 SLIST_INIT(&sc->hn_txlist);
1799 mtx_init(&sc->hn_txlist_spin, "hn txlist", NULL, MTX_SPIN);
1801 parent_dtag = bus_get_dma_tag(sc->hn_dev);
1803 /* DMA tag for RNDIS messages. */
1804 error = bus_dma_tag_create(parent_dtag, /* parent */
1805 HN_RNDIS_MSG_ALIGN, /* alignment */
1806 HN_RNDIS_MSG_BOUNDARY, /* boundary */
1807 BUS_SPACE_MAXADDR, /* lowaddr */
1808 BUS_SPACE_MAXADDR, /* highaddr */
1809 NULL, NULL, /* filter, filterarg */
1810 HN_RNDIS_MSG_LEN, /* maxsize */
1812 HN_RNDIS_MSG_LEN, /* maxsegsize */
1814 NULL, /* lockfunc */
1815 NULL, /* lockfuncarg */
1816 &sc->hn_tx_rndis_dtag);
1818 device_printf(sc->hn_dev, "failed to create rndis dmatag\n");
1822 /* DMA tag for data. */
1823 error = bus_dma_tag_create(parent_dtag, /* parent */
1825 HN_TX_DATA_BOUNDARY, /* boundary */
1826 BUS_SPACE_MAXADDR, /* lowaddr */
1827 BUS_SPACE_MAXADDR, /* highaddr */
1828 NULL, NULL, /* filter, filterarg */
1829 HN_TX_DATA_MAXSIZE, /* maxsize */
1830 HN_TX_DATA_SEGCNT_MAX, /* nsegments */
1831 HN_TX_DATA_SEGSIZE, /* maxsegsize */
1833 NULL, /* lockfunc */
1834 NULL, /* lockfuncarg */
1835 &sc->hn_tx_data_dtag);
1837 device_printf(sc->hn_dev, "failed to create data dmatag\n");
1841 for (i = 0; i < sc->hn_txdesc_cnt; ++i) {
1842 struct hn_txdesc *txd = &sc->hn_txdesc[i];
1847 * Allocate and load RNDIS messages.
1849 error = bus_dmamem_alloc(sc->hn_tx_rndis_dtag,
1850 (void **)&txd->rndis_msg,
1851 BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1852 &txd->rndis_msg_dmap);
1854 device_printf(sc->hn_dev,
1855 "failed to allocate rndis_msg, %d\n", i);
1859 error = bus_dmamap_load(sc->hn_tx_rndis_dtag,
1860 txd->rndis_msg_dmap,
1861 txd->rndis_msg, HN_RNDIS_MSG_LEN,
1862 hn_dma_map_paddr, &txd->rndis_msg_paddr,
1865 device_printf(sc->hn_dev,
1866 "failed to load rndis_msg, %d\n", i);
1867 bus_dmamem_free(sc->hn_tx_rndis_dtag,
1868 txd->rndis_msg, txd->rndis_msg_dmap);
1872 /* DMA map for TX data. */
1873 error = bus_dmamap_create(sc->hn_tx_data_dtag, 0,
1876 device_printf(sc->hn_dev,
1877 "failed to allocate tx data dmamap\n");
1878 bus_dmamap_unload(sc->hn_tx_rndis_dtag,
1879 txd->rndis_msg_dmap);
1880 bus_dmamem_free(sc->hn_tx_rndis_dtag,
1881 txd->rndis_msg, txd->rndis_msg_dmap);
1885 /* All set, put it to list */
1886 txd->flags |= HN_TXD_FLAG_ONLIST;
1887 SLIST_INSERT_HEAD(&sc->hn_txlist, txd, link);
1889 sc->hn_txdesc_avail = sc->hn_txdesc_cnt;
1895 hn_destroy_tx_ring(struct hn_softc *sc)
1897 struct hn_txdesc *txd;
1899 while ((txd = SLIST_FIRST(&sc->hn_txlist)) != NULL) {
1900 KASSERT(txd->m == NULL, ("still has mbuf installed"));
1901 KASSERT((txd->flags & HN_TXD_FLAG_DMAMAP) == 0,
1902 ("still dma mapped"));
1903 SLIST_REMOVE_HEAD(&sc->hn_txlist, link);
1905 bus_dmamap_unload(sc->hn_tx_rndis_dtag,
1906 txd->rndis_msg_dmap);
1907 bus_dmamem_free(sc->hn_tx_rndis_dtag,
1908 txd->rndis_msg, txd->rndis_msg_dmap);
1910 bus_dmamap_destroy(sc->hn_tx_data_dtag, txd->data_dmap);
1913 if (sc->hn_tx_data_dtag != NULL)
1914 bus_dma_tag_destroy(sc->hn_tx_data_dtag);
1915 if (sc->hn_tx_rndis_dtag != NULL)
1916 bus_dma_tag_destroy(sc->hn_tx_rndis_dtag);
1917 free(sc->hn_txdesc, M_NETVSC);
1918 mtx_destroy(&sc->hn_txlist_spin);
1921 static device_method_t netvsc_methods[] = {
1922 /* Device interface */
1923 DEVMETHOD(device_probe, netvsc_probe),
1924 DEVMETHOD(device_attach, netvsc_attach),
1925 DEVMETHOD(device_detach, netvsc_detach),
1926 DEVMETHOD(device_shutdown, netvsc_shutdown),
1931 static driver_t netvsc_driver = {
1937 static devclass_t netvsc_devclass;
1939 DRIVER_MODULE(hn, vmbus, netvsc_driver, netvsc_devclass, 0, 0);
1940 MODULE_VERSION(hn, 1);
1941 MODULE_DEPEND(hn, vmbus, 1, 1, 1);