2 * Copyright (c) 2010-2012 Citrix Inc.
3 * Copyright (c) 2009-2012 Microsoft Corp.
4 * Copyright (c) 2012 NetApp Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * Copyright (c) 2004-2006 Kip Macy
31 * All rights reserved.
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 #include <sys/cdefs.h>
56 __FBSDID("$FreeBSD$");
58 #include "opt_inet6.h"
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/sockio.h>
65 #include <sys/malloc.h>
66 #include <sys/module.h>
67 #include <sys/kernel.h>
68 #include <sys/socket.h>
69 #include <sys/queue.h>
72 #include <sys/sysctl.h>
73 #include <sys/buf_ring.h>
76 #include <net/if_arp.h>
77 #include <net/ethernet.h>
78 #include <net/if_dl.h>
79 #include <net/if_media.h>
83 #include <net/if_var.h>
84 #include <net/if_types.h>
85 #include <net/if_vlan_var.h>
87 #include <netinet/in_systm.h>
88 #include <netinet/in.h>
89 #include <netinet/ip.h>
90 #include <netinet/if_ether.h>
91 #include <netinet/tcp.h>
92 #include <netinet/udp.h>
93 #include <netinet/ip6.h>
96 #include <vm/vm_param.h>
97 #include <vm/vm_kern.h>
100 #include <machine/bus.h>
101 #include <machine/resource.h>
102 #include <machine/frame.h>
103 #include <machine/vmparam.h>
106 #include <sys/rman.h>
107 #include <sys/mutex.h>
108 #include <sys/errno.h>
109 #include <sys/types.h>
110 #include <machine/atomic.h>
112 #include <machine/intr_machdep.h>
114 #include <machine/in_cksum.h>
116 #include <dev/hyperv/include/hyperv.h>
117 #include "hv_net_vsc.h"
118 #include "hv_rndis.h"
119 #include "hv_rndis_filter.h"
122 /* Short for Hyper-V network interface */
123 #define NETVSC_DEVNAME "hn"
126 * It looks like offset 0 of buf is reserved to hold the softc pointer.
127 * The sc pointer evidently not needed, and is not presently populated.
128 * The packet offset is where the netvsc_packet starts in the buffer.
130 #define HV_NV_SC_PTR_OFFSET_IN_BUF 0
131 #define HV_NV_PACKET_OFFSET_IN_BUF 16
133 /* YYY should get it from the underlying channel */
134 #define HN_TX_DESC_CNT 512
136 #define HN_LROENT_CNT_DEF 128
138 #define HN_RNDIS_MSG_LEN \
139 (sizeof(rndis_msg) + \
140 RNDIS_VLAN_PPI_SIZE + \
141 RNDIS_TSO_PPI_SIZE + \
143 #define HN_RNDIS_MSG_BOUNDARY PAGE_SIZE
144 #define HN_RNDIS_MSG_ALIGN CACHE_LINE_SIZE
146 #define HN_TX_DATA_BOUNDARY PAGE_SIZE
147 #define HN_TX_DATA_MAXSIZE IP_MAXPACKET
148 #define HN_TX_DATA_SEGSIZE PAGE_SIZE
149 #define HN_TX_DATA_SEGCNT_MAX \
150 (NETVSC_PACKET_MAXPAGE - HV_RF_NUM_TX_RESERVED_PAGE_BUFS)
152 #define HN_DIRECT_TX_SIZE_DEF 128
155 #ifndef HN_USE_TXDESC_BUFRING
156 SLIST_ENTRY(hn_txdesc) link;
159 struct hn_tx_ring *txr;
161 uint32_t flags; /* HN_TXD_FLAG_ */
162 netvsc_packet netvsc_pkt; /* XXX to be removed */
164 bus_dmamap_t data_dmap;
166 bus_addr_t rndis_msg_paddr;
167 rndis_msg *rndis_msg;
168 bus_dmamap_t rndis_msg_dmap;
171 #define HN_TXD_FLAG_ONLIST 0x1
172 #define HN_TXD_FLAG_DMAMAP 0x2
175 * Only enable UDP checksum offloading when it is on 2012R2 or
176 * later. UDP checksum offloading doesn't work on earlier
179 #define HN_CSUM_ASSIST_WIN8 (CSUM_TCP)
180 #define HN_CSUM_ASSIST (CSUM_IP | CSUM_UDP | CSUM_TCP)
182 #define HN_LRO_LENLIM_DEF (25 * ETHERMTU)
183 /* YYY 2*MTU is a bit rough, but should be good enough. */
184 #define HN_LRO_LENLIM_MIN(ifp) (2 * (ifp)->if_mtu)
186 #define HN_LRO_ACKCNT_DEF 1
189 * Be aware that this sleepable mutex will exhibit WITNESS errors when
190 * certain TCP and ARP code paths are taken. This appears to be a
191 * well-known condition, as all other drivers checked use a sleeping
192 * mutex to protect their transmit paths.
193 * Also Be aware that mutexes do not play well with semaphores, and there
194 * is a conflicting semaphore in a certain channel code path.
196 #define NV_LOCK_INIT(_sc, _name) \
197 mtx_init(&(_sc)->hn_lock, _name, MTX_NETWORK_LOCK, MTX_DEF)
198 #define NV_LOCK(_sc) mtx_lock(&(_sc)->hn_lock)
199 #define NV_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->hn_lock, MA_OWNED)
200 #define NV_UNLOCK(_sc) mtx_unlock(&(_sc)->hn_lock)
201 #define NV_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->hn_lock)
208 int hv_promisc_mode = 0; /* normal mode by default */
210 SYSCTL_NODE(_hw, OID_AUTO, hn, CTLFLAG_RD, NULL, "Hyper-V network interface");
212 /* Trust tcp segements verification on host side. */
213 static int hn_trust_hosttcp = 1;
214 SYSCTL_INT(_hw_hn, OID_AUTO, trust_hosttcp, CTLFLAG_RDTUN,
215 &hn_trust_hosttcp, 0,
216 "Trust tcp segement verification on host side, "
217 "when csum info is missing (global setting)");
219 /* Trust udp datagrams verification on host side. */
220 static int hn_trust_hostudp = 1;
221 SYSCTL_INT(_hw_hn, OID_AUTO, trust_hostudp, CTLFLAG_RDTUN,
222 &hn_trust_hostudp, 0,
223 "Trust udp datagram verification on host side, "
224 "when csum info is missing (global setting)");
226 /* Trust ip packets verification on host side. */
227 static int hn_trust_hostip = 1;
228 SYSCTL_INT(_hw_hn, OID_AUTO, trust_hostip, CTLFLAG_RDTUN,
230 "Trust ip packet verification on host side, "
231 "when csum info is missing (global setting)");
233 #if __FreeBSD_version >= 1100045
234 /* Limit TSO burst size */
235 static int hn_tso_maxlen = 0;
236 SYSCTL_INT(_hw_hn, OID_AUTO, tso_maxlen, CTLFLAG_RDTUN,
237 &hn_tso_maxlen, 0, "TSO burst limit");
240 /* Limit chimney send size */
241 static int hn_tx_chimney_size = 0;
242 SYSCTL_INT(_hw_hn, OID_AUTO, tx_chimney_size, CTLFLAG_RDTUN,
243 &hn_tx_chimney_size, 0, "Chimney send packet size limit");
245 /* Limit the size of packet for direct transmission */
246 static int hn_direct_tx_size = HN_DIRECT_TX_SIZE_DEF;
247 SYSCTL_INT(_hw_hn, OID_AUTO, direct_tx_size, CTLFLAG_RDTUN,
248 &hn_direct_tx_size, 0, "Size of the packet for direct transmission");
250 #if defined(INET) || defined(INET6)
251 #if __FreeBSD_version >= 1100095
252 static int hn_lro_entry_count = HN_LROENT_CNT_DEF;
253 SYSCTL_INT(_hw_hn, OID_AUTO, lro_entry_count, CTLFLAG_RDTUN,
254 &hn_lro_entry_count, 0, "LRO entry count");
258 static int hn_share_tx_taskq = 0;
259 SYSCTL_INT(_hw_hn, OID_AUTO, share_tx_taskq, CTLFLAG_RDTUN,
260 &hn_share_tx_taskq, 0, "Enable shared TX taskqueue");
262 static struct taskqueue *hn_tx_taskq;
264 #ifndef HN_USE_TXDESC_BUFRING
265 static int hn_use_txdesc_bufring = 0;
267 static int hn_use_txdesc_bufring = 1;
269 SYSCTL_INT(_hw_hn, OID_AUTO, use_txdesc_bufring, CTLFLAG_RD,
270 &hn_use_txdesc_bufring, 0, "Use buf_ring for TX descriptors");
273 * Forward declarations
275 static void hn_stop(hn_softc_t *sc);
276 static void hn_ifinit_locked(hn_softc_t *sc);
277 static void hn_ifinit(void *xsc);
278 static int hn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
279 static int hn_start_locked(struct hn_tx_ring *txr, int len);
280 static void hn_start(struct ifnet *ifp);
281 static void hn_start_txeof(struct hn_tx_ring *);
282 static int hn_ifmedia_upd(struct ifnet *ifp);
283 static void hn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
284 static int hn_lro_lenlim_sysctl(SYSCTL_HANDLER_ARGS);
285 static int hn_lro_ackcnt_sysctl(SYSCTL_HANDLER_ARGS);
286 static int hn_trust_hcsum_sysctl(SYSCTL_HANDLER_ARGS);
287 static int hn_tx_chimney_size_sysctl(SYSCTL_HANDLER_ARGS);
288 static int hn_rx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS);
289 static int hn_rx_stat_u64_sysctl(SYSCTL_HANDLER_ARGS);
290 static int hn_tx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS);
291 static int hn_tx_conf_int_sysctl(SYSCTL_HANDLER_ARGS);
292 static int hn_check_iplen(const struct mbuf *, int);
293 static int hn_create_tx_ring(struct hn_softc *, int);
294 static void hn_destroy_tx_ring(struct hn_tx_ring *);
295 static int hn_create_tx_data(struct hn_softc *);
296 static void hn_destroy_tx_data(struct hn_softc *);
297 static void hn_start_taskfunc(void *xsc, int pending);
298 static void hn_txeof_taskfunc(void *xsc, int pending);
299 static void hn_stop_tx_tasks(struct hn_softc *);
300 static int hn_encap(struct hn_tx_ring *, struct hn_txdesc *, struct mbuf **);
301 static void hn_create_rx_data(struct hn_softc *sc);
302 static void hn_destroy_rx_data(struct hn_softc *sc);
303 static void hn_set_tx_chimney_size(struct hn_softc *, int);
306 hn_ifmedia_upd(struct ifnet *ifp __unused)
313 hn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
315 struct hn_softc *sc = ifp->if_softc;
317 ifmr->ifm_status = IFM_AVALID;
318 ifmr->ifm_active = IFM_ETHER;
320 if (!sc->hn_carrier) {
321 ifmr->ifm_active |= IFM_NONE;
324 ifmr->ifm_status |= IFM_ACTIVE;
325 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
328 /* {F8615163-DF3E-46c5-913F-F2D2F965ED0E} */
329 static const hv_guid g_net_vsc_device_type = {
330 .data = {0x63, 0x51, 0x61, 0xF8, 0x3E, 0xDF, 0xc5, 0x46,
331 0x91, 0x3F, 0xF2, 0xD2, 0xF9, 0x65, 0xED, 0x0E}
335 * Standard probe entry point.
339 netvsc_probe(device_t dev)
343 p = vmbus_get_type(dev);
344 if (!memcmp(p, &g_net_vsc_device_type.data, sizeof(hv_guid))) {
345 device_set_desc(dev, "Synthetic Network Interface");
347 printf("Netvsc probe... DONE \n");
349 return (BUS_PROBE_DEFAULT);
356 * Standard attach entry point.
358 * Called when the driver is loaded. It allocates needed resources,
359 * and initializes the "hardware" and software.
362 netvsc_attach(device_t dev)
364 struct hv_device *device_ctx = vmbus_get_devctx(dev);
365 netvsc_device_info device_info;
367 int unit = device_get_unit(dev);
368 struct ifnet *ifp = NULL;
370 #if __FreeBSD_version >= 1100045
374 sc = device_get_softc(dev);
379 bzero(sc, sizeof(hn_softc_t));
383 if (hn_tx_taskq == NULL) {
384 sc->hn_tx_taskq = taskqueue_create("hn_tx", M_WAITOK,
385 taskqueue_thread_enqueue, &sc->hn_tx_taskq);
386 taskqueue_start_threads(&sc->hn_tx_taskq, 1, PI_NET, "%s tx",
387 device_get_nameunit(dev));
389 sc->hn_tx_taskq = hn_tx_taskq;
391 NV_LOCK_INIT(sc, "NetVSCLock");
393 sc->hn_dev_obj = device_ctx;
395 ifp = sc->hn_ifp = if_alloc(IFT_ETHER);
398 error = hn_create_tx_data(sc);
402 hn_create_rx_data(sc);
404 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
405 ifp->if_dunit = unit;
406 ifp->if_dname = NETVSC_DEVNAME;
408 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
409 ifp->if_ioctl = hn_ioctl;
410 ifp->if_start = hn_start;
411 ifp->if_init = hn_ifinit;
412 /* needed by hv_rf_on_device_add() code */
413 ifp->if_mtu = ETHERMTU;
414 IFQ_SET_MAXLEN(&ifp->if_snd, 512);
415 ifp->if_snd.ifq_drv_maxlen = 511;
416 IFQ_SET_READY(&ifp->if_snd);
418 ifmedia_init(&sc->hn_media, 0, hn_ifmedia_upd, hn_ifmedia_sts);
419 ifmedia_add(&sc->hn_media, IFM_ETHER | IFM_AUTO, 0, NULL);
420 ifmedia_set(&sc->hn_media, IFM_ETHER | IFM_AUTO);
421 /* XXX ifmedia_set really should do this for us */
422 sc->hn_media.ifm_media = sc->hn_media.ifm_cur->ifm_media;
425 * Tell upper layers that we support full VLAN capability.
427 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
428 ifp->if_capabilities |=
429 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_TSO |
432 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_TSO |
434 ifp->if_hwassist = sc->hn_tx_ring[0].hn_csum_assist | CSUM_TSO;
436 error = hv_rf_on_device_add(device_ctx, &device_info);
440 if (device_info.link_state == 0) {
444 #if __FreeBSD_version >= 1100045
445 tso_maxlen = hn_tso_maxlen;
446 if (tso_maxlen <= 0 || tso_maxlen > IP_MAXPACKET)
447 tso_maxlen = IP_MAXPACKET;
449 ifp->if_hw_tsomaxsegcount = HN_TX_DATA_SEGCNT_MAX;
450 ifp->if_hw_tsomaxsegsize = PAGE_SIZE;
451 ifp->if_hw_tsomax = tso_maxlen -
452 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
455 ether_ifattach(ifp, device_info.mac_addr);
457 #if __FreeBSD_version >= 1100045
458 if_printf(ifp, "TSO: %u/%u/%u\n", ifp->if_hw_tsomax,
459 ifp->if_hw_tsomaxsegcount, ifp->if_hw_tsomaxsegsize);
462 sc->hn_tx_chimney_max = sc->net_dev->send_section_size;
463 hn_set_tx_chimney_size(sc, sc->hn_tx_chimney_max);
464 if (hn_tx_chimney_size > 0 &&
465 hn_tx_chimney_size < sc->hn_tx_chimney_max)
466 hn_set_tx_chimney_size(sc, hn_tx_chimney_size);
470 hn_destroy_tx_data(sc);
477 * Standard detach entry point
480 netvsc_detach(device_t dev)
482 struct hn_softc *sc = device_get_softc(dev);
483 struct hv_device *hv_device = vmbus_get_devctx(dev);
486 printf("netvsc_detach\n");
489 * XXXKYS: Need to clean up all our
490 * driver state; this is the driver
495 * XXXKYS: Need to stop outgoing traffic and unregister
499 hv_rf_on_device_remove(hv_device, HV_RF_NV_DESTROY_CHANNEL);
501 hn_stop_tx_tasks(sc);
503 ifmedia_removeall(&sc->hn_media);
504 hn_destroy_rx_data(sc);
505 hn_destroy_tx_data(sc);
507 if (sc->hn_tx_taskq != hn_tx_taskq)
508 taskqueue_free(sc->hn_tx_taskq);
514 * Standard shutdown entry point
517 netvsc_shutdown(device_t dev)
523 hn_txdesc_dmamap_load(struct hn_tx_ring *txr, struct hn_txdesc *txd,
524 struct mbuf **m_head, bus_dma_segment_t *segs, int *nsegs)
526 struct mbuf *m = *m_head;
529 error = bus_dmamap_load_mbuf_sg(txr->hn_tx_data_dtag, txd->data_dmap,
530 m, segs, nsegs, BUS_DMA_NOWAIT);
531 if (error == EFBIG) {
534 m_new = m_collapse(m, M_NOWAIT, HN_TX_DATA_SEGCNT_MAX);
539 txr->hn_tx_collapsed++;
541 error = bus_dmamap_load_mbuf_sg(txr->hn_tx_data_dtag,
542 txd->data_dmap, m, segs, nsegs, BUS_DMA_NOWAIT);
545 bus_dmamap_sync(txr->hn_tx_data_dtag, txd->data_dmap,
546 BUS_DMASYNC_PREWRITE);
547 txd->flags |= HN_TXD_FLAG_DMAMAP;
553 hn_txdesc_dmamap_unload(struct hn_tx_ring *txr, struct hn_txdesc *txd)
556 if (txd->flags & HN_TXD_FLAG_DMAMAP) {
557 bus_dmamap_sync(txr->hn_tx_data_dtag,
558 txd->data_dmap, BUS_DMASYNC_POSTWRITE);
559 bus_dmamap_unload(txr->hn_tx_data_dtag,
561 txd->flags &= ~HN_TXD_FLAG_DMAMAP;
566 hn_txdesc_put(struct hn_tx_ring *txr, struct hn_txdesc *txd)
569 KASSERT((txd->flags & HN_TXD_FLAG_ONLIST) == 0,
570 ("put an onlist txd %#x", txd->flags));
572 KASSERT(txd->refs > 0, ("invalid txd refs %d", txd->refs));
573 if (atomic_fetchadd_int(&txd->refs, -1) != 1)
576 hn_txdesc_dmamap_unload(txr, txd);
577 if (txd->m != NULL) {
582 txd->flags |= HN_TXD_FLAG_ONLIST;
584 #ifndef HN_USE_TXDESC_BUFRING
585 mtx_lock_spin(&txr->hn_txlist_spin);
586 KASSERT(txr->hn_txdesc_avail >= 0 &&
587 txr->hn_txdesc_avail < txr->hn_txdesc_cnt,
588 ("txdesc_put: invalid txd avail %d", txr->hn_txdesc_avail));
589 txr->hn_txdesc_avail++;
590 SLIST_INSERT_HEAD(&txr->hn_txlist, txd, link);
591 mtx_unlock_spin(&txr->hn_txlist_spin);
593 atomic_add_int(&txr->hn_txdesc_avail, 1);
594 buf_ring_enqueue(txr->hn_txdesc_br, txd);
600 static __inline struct hn_txdesc *
601 hn_txdesc_get(struct hn_tx_ring *txr)
603 struct hn_txdesc *txd;
605 #ifndef HN_USE_TXDESC_BUFRING
606 mtx_lock_spin(&txr->hn_txlist_spin);
607 txd = SLIST_FIRST(&txr->hn_txlist);
609 KASSERT(txr->hn_txdesc_avail > 0,
610 ("txdesc_get: invalid txd avail %d", txr->hn_txdesc_avail));
611 txr->hn_txdesc_avail--;
612 SLIST_REMOVE_HEAD(&txr->hn_txlist, link);
614 mtx_unlock_spin(&txr->hn_txlist_spin);
616 txd = buf_ring_dequeue_sc(txr->hn_txdesc_br);
620 #ifdef HN_USE_TXDESC_BUFRING
621 atomic_subtract_int(&txr->hn_txdesc_avail, 1);
623 KASSERT(txd->m == NULL && txd->refs == 0 &&
624 (txd->flags & HN_TXD_FLAG_ONLIST), ("invalid txd"));
625 txd->flags &= ~HN_TXD_FLAG_ONLIST;
632 hn_txdesc_hold(struct hn_txdesc *txd)
635 /* 0->1 transition will never work */
636 KASSERT(txd->refs > 0, ("invalid refs %d", txd->refs));
637 atomic_add_int(&txd->refs, 1);
641 * Send completion processing
643 * Note: It looks like offset 0 of buf is reserved to hold the softc
644 * pointer. The sc pointer is not currently needed in this function, and
645 * it is not presently populated by the TX function.
648 netvsc_xmit_completion(void *context)
650 netvsc_packet *packet = context;
651 struct hn_txdesc *txd;
652 struct hn_tx_ring *txr;
654 txd = (struct hn_txdesc *)(uintptr_t)
655 packet->compl.send.send_completion_tid;
659 hn_txdesc_put(txr, txd);
663 netvsc_channel_rollup(struct hv_device *device_ctx)
665 struct hn_softc *sc = device_get_softc(device_ctx->device);
666 struct hn_tx_ring *txr = &sc->hn_tx_ring[0]; /* TODO: vRSS */
667 #if defined(INET) || defined(INET6)
668 struct hn_rx_ring *rxr = &sc->hn_rx_ring[0]; /* TODO: vRSS */
669 struct lro_ctrl *lro = &rxr->hn_lro;
670 struct lro_entry *queued;
672 while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
673 SLIST_REMOVE_HEAD(&lro->lro_active, next);
674 tcp_lro_flush(lro, queued);
687 * If this function fails, then both txd and m_head0 will be freed.
690 hn_encap(struct hn_tx_ring *txr, struct hn_txdesc *txd, struct mbuf **m_head0)
692 bus_dma_segment_t segs[HN_TX_DATA_SEGCNT_MAX];
694 struct mbuf *m_head = *m_head0;
695 netvsc_packet *packet;
696 rndis_msg *rndis_mesg;
697 rndis_packet *rndis_pkt;
698 rndis_per_packet_info *rppi;
699 uint32_t rndis_msg_size;
701 packet = &txd->netvsc_pkt;
702 packet->is_data_pkt = TRUE;
703 packet->tot_data_buf_len = m_head->m_pkthdr.len;
706 * extension points to the area reserved for the
707 * rndis_filter_packet, which is placed just after
708 * the netvsc_packet (and rppi struct, if present;
709 * length is updated later).
711 rndis_mesg = txd->rndis_msg;
712 /* XXX not necessary */
713 memset(rndis_mesg, 0, HN_RNDIS_MSG_LEN);
714 rndis_mesg->ndis_msg_type = REMOTE_NDIS_PACKET_MSG;
716 rndis_pkt = &rndis_mesg->msg.packet;
717 rndis_pkt->data_offset = sizeof(rndis_packet);
718 rndis_pkt->data_length = packet->tot_data_buf_len;
719 rndis_pkt->per_pkt_info_offset = sizeof(rndis_packet);
721 rndis_msg_size = RNDIS_MESSAGE_SIZE(rndis_packet);
723 if (m_head->m_flags & M_VLANTAG) {
724 ndis_8021q_info *rppi_vlan_info;
726 rndis_msg_size += RNDIS_VLAN_PPI_SIZE;
727 rppi = hv_set_rppi_data(rndis_mesg, RNDIS_VLAN_PPI_SIZE,
730 rppi_vlan_info = (ndis_8021q_info *)((uint8_t *)rppi +
731 rppi->per_packet_info_offset);
732 rppi_vlan_info->u1.s1.vlan_id =
733 m_head->m_pkthdr.ether_vtag & 0xfff;
736 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
737 rndis_tcp_tso_info *tso_info;
738 struct ether_vlan_header *eh;
742 * XXX need m_pullup and use mtodo
744 eh = mtod(m_head, struct ether_vlan_header*);
745 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
746 ether_len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
748 ether_len = ETHER_HDR_LEN;
750 rndis_msg_size += RNDIS_TSO_PPI_SIZE;
751 rppi = hv_set_rppi_data(rndis_mesg, RNDIS_TSO_PPI_SIZE,
752 tcp_large_send_info);
754 tso_info = (rndis_tcp_tso_info *)((uint8_t *)rppi +
755 rppi->per_packet_info_offset);
756 tso_info->lso_v2_xmit.type =
757 RNDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
760 if (m_head->m_pkthdr.csum_flags & CSUM_IP_TSO) {
762 (struct ip *)(m_head->m_data + ether_len);
763 unsigned long iph_len = ip->ip_hl << 2;
765 (struct tcphdr *)((caddr_t)ip + iph_len);
767 tso_info->lso_v2_xmit.ip_version =
768 RNDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
772 th->th_sum = in_pseudo(ip->ip_src.s_addr,
773 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
776 #if defined(INET6) && defined(INET)
781 struct ip6_hdr *ip6 = (struct ip6_hdr *)
782 (m_head->m_data + ether_len);
783 struct tcphdr *th = (struct tcphdr *)(ip6 + 1);
785 tso_info->lso_v2_xmit.ip_version =
786 RNDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
788 th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
791 tso_info->lso_v2_xmit.tcp_header_offset = 0;
792 tso_info->lso_v2_xmit.mss = m_head->m_pkthdr.tso_segsz;
793 } else if (m_head->m_pkthdr.csum_flags & txr->hn_csum_assist) {
794 rndis_tcp_ip_csum_info *csum_info;
796 rndis_msg_size += RNDIS_CSUM_PPI_SIZE;
797 rppi = hv_set_rppi_data(rndis_mesg, RNDIS_CSUM_PPI_SIZE,
799 csum_info = (rndis_tcp_ip_csum_info *)((uint8_t *)rppi +
800 rppi->per_packet_info_offset);
802 csum_info->xmit.is_ipv4 = 1;
803 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
804 csum_info->xmit.ip_header_csum = 1;
806 if (m_head->m_pkthdr.csum_flags & CSUM_TCP) {
807 csum_info->xmit.tcp_csum = 1;
808 csum_info->xmit.tcp_header_offset = 0;
809 } else if (m_head->m_pkthdr.csum_flags & CSUM_UDP) {
810 csum_info->xmit.udp_csum = 1;
814 rndis_mesg->msg_len = packet->tot_data_buf_len + rndis_msg_size;
815 packet->tot_data_buf_len = rndis_mesg->msg_len;
818 * Chimney send, if the packet could fit into one chimney buffer.
820 if (packet->tot_data_buf_len < txr->hn_tx_chimney_size) {
821 netvsc_dev *net_dev = txr->hn_sc->net_dev;
822 uint32_t send_buf_section_idx;
824 send_buf_section_idx =
825 hv_nv_get_next_send_section(net_dev);
826 if (send_buf_section_idx !=
827 NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX) {
828 uint8_t *dest = ((uint8_t *)net_dev->send_buf +
829 (send_buf_section_idx *
830 net_dev->send_section_size));
832 memcpy(dest, rndis_mesg, rndis_msg_size);
833 dest += rndis_msg_size;
834 m_copydata(m_head, 0, m_head->m_pkthdr.len, dest);
836 packet->send_buf_section_idx = send_buf_section_idx;
837 packet->send_buf_section_size =
838 packet->tot_data_buf_len;
839 packet->page_buf_count = 0;
840 txr->hn_tx_chimney++;
845 error = hn_txdesc_dmamap_load(txr, txd, &m_head, segs, &nsegs);
850 * This mbuf is not linked w/ the txd yet, so free it now.
855 freed = hn_txdesc_put(txr, txd);
857 ("fail to free txd upon txdma error"));
859 txr->hn_txdma_failed++;
860 if_inc_counter(txr->hn_sc->hn_ifp, IFCOUNTER_OERRORS, 1);
865 packet->page_buf_count = nsegs + HV_RF_NUM_TX_RESERVED_PAGE_BUFS;
867 /* send packet with page buffer */
868 packet->page_buffers[0].pfn = atop(txd->rndis_msg_paddr);
869 packet->page_buffers[0].offset = txd->rndis_msg_paddr & PAGE_MASK;
870 packet->page_buffers[0].length = rndis_msg_size;
873 * Fill the page buffers with mbuf info starting at index
874 * HV_RF_NUM_TX_RESERVED_PAGE_BUFS.
876 for (i = 0; i < nsegs; ++i) {
877 hv_vmbus_page_buffer *pb = &packet->page_buffers[
878 i + HV_RF_NUM_TX_RESERVED_PAGE_BUFS];
880 pb->pfn = atop(segs[i].ds_addr);
881 pb->offset = segs[i].ds_addr & PAGE_MASK;
882 pb->length = segs[i].ds_len;
885 packet->send_buf_section_idx =
886 NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX;
887 packet->send_buf_section_size = 0;
891 /* Set the completion routine */
892 packet->compl.send.on_send_completion = netvsc_xmit_completion;
893 packet->compl.send.send_completion_context = packet;
894 packet->compl.send.send_completion_tid = (uint64_t)(uintptr_t)txd;
900 * Start a transmit of one or more packets
903 hn_start_locked(struct hn_tx_ring *txr, int len)
905 struct hn_softc *sc = txr->hn_sc;
906 struct ifnet *ifp = sc->hn_ifp;
907 struct hv_device *device_ctx = vmbus_get_devctx(sc->hn_dev);
909 KASSERT(txr == &sc->hn_tx_ring[0], ("not the first TX ring"));
910 mtx_assert(&txr->hn_tx_lock, MA_OWNED);
912 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
916 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
917 int error, send_failed = 0;
918 struct hn_txdesc *txd;
921 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
925 if (len > 0 && m_head->m_pkthdr.len > len) {
927 * This sending could be time consuming; let callers
928 * dispatch this packet sending (and sending of any
929 * following up packets) to tx taskqueue.
931 IF_PREPEND(&ifp->if_snd, m_head);
935 txd = hn_txdesc_get(txr);
937 txr->hn_no_txdescs++;
938 IF_PREPEND(&ifp->if_snd, m_head);
939 atomic_set_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
943 error = hn_encap(txr, txd, &m_head);
945 /* Both txd and m_head are freed */
950 * Make sure that txd is not freed before ETHER_BPF_MTAP.
953 error = hv_nv_on_send(device_ctx, &txd->netvsc_pkt);
955 ETHER_BPF_MTAP(ifp, m_head);
956 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
958 hn_txdesc_put(txr, txd);
960 if (__predict_false(error)) {
964 * This should "really rarely" happen.
966 * XXX Too many RX to be acked or too many sideband
967 * commands to run? Ask netvsc_channel_rollup()
968 * to kick start later.
972 txr->hn_send_failed++;
975 * Try sending again after set hn_txeof;
976 * in case that we missed the last
977 * netvsc_channel_rollup().
981 if_printf(ifp, "send failed\n");
984 * This mbuf will be prepended, don't free it
985 * in hn_txdesc_put(); only unload it from the
986 * DMA map in hn_txdesc_put(), if it was loaded.
989 freed = hn_txdesc_put(txr, txd);
991 ("fail to free txd upon send error"));
993 txr->hn_send_failed++;
994 IF_PREPEND(&ifp->if_snd, m_head);
995 atomic_set_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
1003 * Link up/down notification
1006 netvsc_linkstatus_callback(struct hv_device *device_obj, uint32_t status)
1008 hn_softc_t *sc = device_get_softc(device_obj->device);
1022 * Append the specified data to the indicated mbuf chain,
1023 * Extend the mbuf chain if the new data does not fit in
1026 * This is a minor rewrite of m_append() from sys/kern/uipc_mbuf.c.
1027 * There should be an equivalent in the kernel mbuf code,
1028 * but there does not appear to be one yet.
1030 * Differs from m_append() in that additional mbufs are
1031 * allocated with cluster size MJUMPAGESIZE, and filled
1034 * Return 1 if able to complete the job; otherwise 0.
1037 hv_m_append(struct mbuf *m0, int len, c_caddr_t cp)
1040 int remainder, space;
1042 for (m = m0; m->m_next != NULL; m = m->m_next)
1045 space = M_TRAILINGSPACE(m);
1048 * Copy into available space.
1050 if (space > remainder)
1052 bcopy(cp, mtod(m, caddr_t) + m->m_len, space);
1057 while (remainder > 0) {
1059 * Allocate a new mbuf; could check space
1060 * and allocate a cluster instead.
1062 n = m_getjcl(M_NOWAIT, m->m_type, 0, MJUMPAGESIZE);
1065 n->m_len = min(MJUMPAGESIZE, remainder);
1066 bcopy(cp, mtod(n, caddr_t), n->m_len);
1068 remainder -= n->m_len;
1072 if (m0->m_flags & M_PKTHDR)
1073 m0->m_pkthdr.len += len - remainder;
1075 return (remainder == 0);
1080 * Called when we receive a data packet from the "wire" on the
1083 * Note: This is no longer used as a callback
1086 netvsc_recv(struct hv_device *device_ctx, netvsc_packet *packet,
1087 rndis_tcp_ip_csum_info *csum_info)
1089 struct hn_softc *sc = device_get_softc(device_ctx->device);
1090 struct hn_rx_ring *rxr = &sc->hn_rx_ring[0]; /* TODO: vRSS */
1093 int size, do_lro = 0, do_csum = 1;
1096 return (0); /* TODO: KYS how can this be! */
1101 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1106 * Bail out if packet contains more data than configured MTU.
1108 if (packet->tot_data_buf_len > (ifp->if_mtu + ETHER_HDR_LEN)) {
1110 } else if (packet->tot_data_buf_len <= MHLEN) {
1111 m_new = m_gethdr(M_NOWAIT, MT_DATA);
1114 memcpy(mtod(m_new, void *), packet->data,
1115 packet->tot_data_buf_len);
1116 m_new->m_pkthdr.len = m_new->m_len = packet->tot_data_buf_len;
1117 rxr->hn_small_pkts++;
1120 * Get an mbuf with a cluster. For packets 2K or less,
1121 * get a standard 2K cluster. For anything larger, get a
1122 * 4K cluster. Any buffers larger than 4K can cause problems
1123 * if looped around to the Hyper-V TX channel, so avoid them.
1126 if (packet->tot_data_buf_len > MCLBYTES) {
1128 size = MJUMPAGESIZE;
1131 m_new = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, size);
1132 if (m_new == NULL) {
1133 if_printf(ifp, "alloc mbuf failed.\n");
1137 hv_m_append(m_new, packet->tot_data_buf_len, packet->data);
1139 m_new->m_pkthdr.rcvif = ifp;
1141 if (__predict_false((ifp->if_capenable & IFCAP_RXCSUM) == 0))
1144 /* receive side checksum offload */
1145 if (csum_info != NULL) {
1146 /* IP csum offload */
1147 if (csum_info->receive.ip_csum_succeeded && do_csum) {
1148 m_new->m_pkthdr.csum_flags |=
1149 (CSUM_IP_CHECKED | CSUM_IP_VALID);
1153 /* TCP/UDP csum offload */
1154 if ((csum_info->receive.tcp_csum_succeeded ||
1155 csum_info->receive.udp_csum_succeeded) && do_csum) {
1156 m_new->m_pkthdr.csum_flags |=
1157 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1158 m_new->m_pkthdr.csum_data = 0xffff;
1159 if (csum_info->receive.tcp_csum_succeeded)
1165 if (csum_info->receive.ip_csum_succeeded &&
1166 csum_info->receive.tcp_csum_succeeded)
1169 const struct ether_header *eh;
1174 if (m_new->m_len < hoff)
1176 eh = mtod(m_new, struct ether_header *);
1177 etype = ntohs(eh->ether_type);
1178 if (etype == ETHERTYPE_VLAN) {
1179 const struct ether_vlan_header *evl;
1181 hoff = sizeof(*evl);
1182 if (m_new->m_len < hoff)
1184 evl = mtod(m_new, struct ether_vlan_header *);
1185 etype = ntohs(evl->evl_proto);
1188 if (etype == ETHERTYPE_IP) {
1191 pr = hn_check_iplen(m_new, hoff);
1192 if (pr == IPPROTO_TCP) {
1194 (rxr->hn_trust_hcsum &
1195 HN_TRUST_HCSUM_TCP)) {
1196 rxr->hn_csum_trusted++;
1197 m_new->m_pkthdr.csum_flags |=
1198 (CSUM_IP_CHECKED | CSUM_IP_VALID |
1199 CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1200 m_new->m_pkthdr.csum_data = 0xffff;
1202 /* Rely on SW csum verification though... */
1204 } else if (pr == IPPROTO_UDP) {
1206 (rxr->hn_trust_hcsum &
1207 HN_TRUST_HCSUM_UDP)) {
1208 rxr->hn_csum_trusted++;
1209 m_new->m_pkthdr.csum_flags |=
1210 (CSUM_IP_CHECKED | CSUM_IP_VALID |
1211 CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1212 m_new->m_pkthdr.csum_data = 0xffff;
1214 } else if (pr != IPPROTO_DONE && do_csum &&
1215 (rxr->hn_trust_hcsum & HN_TRUST_HCSUM_IP)) {
1216 rxr->hn_csum_trusted++;
1217 m_new->m_pkthdr.csum_flags |=
1218 (CSUM_IP_CHECKED | CSUM_IP_VALID);
1223 if ((packet->vlan_tci != 0) &&
1224 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
1225 m_new->m_pkthdr.ether_vtag = packet->vlan_tci;
1226 m_new->m_flags |= M_VLANTAG;
1230 * Note: Moved RX completion back to hv_nv_on_receive() so all
1231 * messages (not just data messages) will trigger a response.
1234 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1236 if ((ifp->if_capenable & IFCAP_LRO) && do_lro) {
1237 #if defined(INET) || defined(INET6)
1238 struct lro_ctrl *lro = &rxr->hn_lro;
1241 rxr->hn_lro_tried++;
1242 if (tcp_lro_rx(lro, m_new, 0) == 0) {
1250 /* We're not holding the lock here, so don't release it */
1251 (*ifp->if_input)(ifp, m_new);
1257 netvsc_recv_rollup(struct hv_device *device_ctx __unused)
1262 * Rules for using sc->temp_unusable:
1263 * 1. sc->temp_unusable can only be read or written while holding NV_LOCK()
1264 * 2. code reading sc->temp_unusable under NV_LOCK(), and finding
1265 * sc->temp_unusable set, must release NV_LOCK() and exit
1266 * 3. to retain exclusive control of the interface,
1267 * sc->temp_unusable must be set by code before releasing NV_LOCK()
1268 * 4. only code setting sc->temp_unusable can clear sc->temp_unusable
1269 * 5. code setting sc->temp_unusable must eventually clear sc->temp_unusable
1273 * Standard ioctl entry point. Called when the user wants to configure
1277 hn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1279 hn_softc_t *sc = ifp->if_softc;
1280 struct ifreq *ifr = (struct ifreq *)data;
1282 struct ifaddr *ifa = (struct ifaddr *)data;
1284 netvsc_device_info device_info;
1285 struct hv_device *hn_dev;
1286 int mask, error = 0;
1287 int retry_cnt = 500;
1293 if (ifa->ifa_addr->sa_family == AF_INET) {
1294 ifp->if_flags |= IFF_UP;
1295 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1297 arp_ifinit(ifp, ifa);
1300 error = ether_ioctl(ifp, cmd, data);
1303 hn_dev = vmbus_get_devctx(sc->hn_dev);
1305 /* Check MTU value change */
1306 if (ifp->if_mtu == ifr->ifr_mtu)
1309 if (ifr->ifr_mtu > NETVSC_MAX_CONFIGURABLE_MTU) {
1314 /* Obtain and record requested MTU */
1315 ifp->if_mtu = ifr->ifr_mtu;
1318 * Make sure that LRO aggregation length limit is still
1319 * valid, after the MTU change.
1322 if (sc->hn_rx_ring[0].hn_lro.lro_length_lim <
1323 HN_LRO_LENLIM_MIN(ifp)) {
1326 for (i = 0; i < sc->hn_rx_ring_cnt; ++i) {
1327 sc->hn_rx_ring[i].hn_lro.lro_length_lim =
1328 HN_LRO_LENLIM_MIN(ifp);
1335 if (!sc->temp_unusable) {
1336 sc->temp_unusable = TRUE;
1340 if (retry_cnt > 0) {
1344 } while (retry_cnt > 0);
1346 if (retry_cnt == 0) {
1351 /* We must remove and add back the device to cause the new
1352 * MTU to take effect. This includes tearing down, but not
1353 * deleting the channel, then bringing it back up.
1355 error = hv_rf_on_device_remove(hn_dev, HV_RF_NV_RETAIN_CHANNEL);
1358 sc->temp_unusable = FALSE;
1362 error = hv_rf_on_device_add(hn_dev, &device_info);
1365 sc->temp_unusable = FALSE;
1370 sc->hn_tx_chimney_max = sc->net_dev->send_section_size;
1371 if (sc->hn_tx_ring[0].hn_tx_chimney_size >
1372 sc->hn_tx_chimney_max)
1373 hn_set_tx_chimney_size(sc, sc->hn_tx_chimney_max);
1375 hn_ifinit_locked(sc);
1378 sc->temp_unusable = FALSE;
1384 if (!sc->temp_unusable) {
1385 sc->temp_unusable = TRUE;
1389 if (retry_cnt > 0) {
1393 } while (retry_cnt > 0);
1395 if (retry_cnt == 0) {
1400 if (ifp->if_flags & IFF_UP) {
1402 * If only the state of the PROMISC flag changed,
1403 * then just use the 'set promisc mode' command
1404 * instead of reinitializing the entire NIC. Doing
1405 * a full re-init means reloading the firmware and
1406 * waiting for it to start up, which may take a
1410 /* Fixme: Promiscuous mode? */
1411 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1412 ifp->if_flags & IFF_PROMISC &&
1413 !(sc->hn_if_flags & IFF_PROMISC)) {
1414 /* do something here for Hyper-V */
1415 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1416 !(ifp->if_flags & IFF_PROMISC) &&
1417 sc->hn_if_flags & IFF_PROMISC) {
1418 /* do something here for Hyper-V */
1421 hn_ifinit_locked(sc);
1423 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1428 sc->temp_unusable = FALSE;
1430 sc->hn_if_flags = ifp->if_flags;
1436 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1437 if (mask & IFCAP_TXCSUM) {
1438 ifp->if_capenable ^= IFCAP_TXCSUM;
1439 if (ifp->if_capenable & IFCAP_TXCSUM) {
1441 sc->hn_tx_ring[0].hn_csum_assist;
1444 ~sc->hn_tx_ring[0].hn_csum_assist;
1448 if (mask & IFCAP_RXCSUM)
1449 ifp->if_capenable ^= IFCAP_RXCSUM;
1451 if (mask & IFCAP_LRO)
1452 ifp->if_capenable ^= IFCAP_LRO;
1454 if (mask & IFCAP_TSO4) {
1455 ifp->if_capenable ^= IFCAP_TSO4;
1456 if (ifp->if_capenable & IFCAP_TSO4)
1457 ifp->if_hwassist |= CSUM_IP_TSO;
1459 ifp->if_hwassist &= ~CSUM_IP_TSO;
1462 if (mask & IFCAP_TSO6) {
1463 ifp->if_capenable ^= IFCAP_TSO6;
1464 if (ifp->if_capenable & IFCAP_TSO6)
1465 ifp->if_hwassist |= CSUM_IP6_TSO;
1467 ifp->if_hwassist &= ~CSUM_IP6_TSO;
1476 /* Fixme: Multicast mode? */
1477 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1479 netvsc_setmulti(sc);
1488 error = ifmedia_ioctl(ifp, ifr, &sc->hn_media, cmd);
1491 error = ether_ioctl(ifp, cmd, data);
1502 hn_stop(hn_softc_t *sc)
1506 struct hv_device *device_ctx = vmbus_get_devctx(sc->hn_dev);
1511 printf(" Closing Device ...\n");
1513 atomic_clear_int(&ifp->if_drv_flags,
1514 (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
1515 if_link_state_change(ifp, LINK_STATE_DOWN);
1516 sc->hn_initdone = 0;
1518 ret = hv_rf_on_close(device_ctx);
1522 * FreeBSD transmit entry point
1525 hn_start(struct ifnet *ifp)
1527 struct hn_softc *sc = ifp->if_softc;
1528 struct hn_tx_ring *txr = &sc->hn_tx_ring[0];
1530 if (txr->hn_sched_tx)
1533 if (mtx_trylock(&txr->hn_tx_lock)) {
1536 sched = hn_start_locked(txr, txr->hn_direct_tx_size);
1537 mtx_unlock(&txr->hn_tx_lock);
1542 taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_start_task);
1546 hn_start_txeof(struct hn_tx_ring *txr)
1548 struct hn_softc *sc = txr->hn_sc;
1549 struct ifnet *ifp = sc->hn_ifp;
1551 KASSERT(txr == &sc->hn_tx_ring[0], ("not the first TX ring"));
1553 if (txr->hn_sched_tx)
1556 if (mtx_trylock(&txr->hn_tx_lock)) {
1559 atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
1560 sched = hn_start_locked(txr, txr->hn_direct_tx_size);
1561 mtx_unlock(&txr->hn_tx_lock);
1563 taskqueue_enqueue(txr->hn_tx_taskq,
1564 &txr->hn_start_task);
1569 * Release the OACTIVE earlier, with the hope, that
1570 * others could catch up. The task will clear the
1571 * flag again with the hn_tx_lock to avoid possible
1574 atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
1575 taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_txeof_task);
1583 hn_ifinit_locked(hn_softc_t *sc)
1586 struct hv_device *device_ctx = vmbus_get_devctx(sc->hn_dev);
1591 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1595 hv_promisc_mode = 1;
1597 ret = hv_rf_on_open(device_ctx);
1601 sc->hn_initdone = 1;
1603 atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
1604 atomic_set_int(&ifp->if_drv_flags, IFF_DRV_RUNNING);
1605 if_link_state_change(ifp, LINK_STATE_UP);
1612 hn_ifinit(void *xsc)
1614 hn_softc_t *sc = xsc;
1617 if (sc->temp_unusable) {
1621 sc->temp_unusable = TRUE;
1624 hn_ifinit_locked(sc);
1627 sc->temp_unusable = FALSE;
1636 hn_watchdog(struct ifnet *ifp)
1641 printf("hn%d: watchdog timeout -- resetting\n", sc->hn_unit);
1642 hn_ifinit(sc); /*???*/
1643 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1648 hn_lro_lenlim_sysctl(SYSCTL_HANDLER_ARGS)
1650 struct hn_softc *sc = arg1;
1651 unsigned int lenlim;
1654 lenlim = sc->hn_rx_ring[0].hn_lro.lro_length_lim;
1655 error = sysctl_handle_int(oidp, &lenlim, 0, req);
1656 if (error || req->newptr == NULL)
1659 if (lenlim < HN_LRO_LENLIM_MIN(sc->hn_ifp) ||
1660 lenlim > TCP_LRO_LENGTH_MAX)
1664 for (i = 0; i < sc->hn_rx_ring_cnt; ++i)
1665 sc->hn_rx_ring[i].hn_lro.lro_length_lim = lenlim;
1671 hn_lro_ackcnt_sysctl(SYSCTL_HANDLER_ARGS)
1673 struct hn_softc *sc = arg1;
1674 int ackcnt, error, i;
1677 * lro_ackcnt_lim is append count limit,
1678 * +1 to turn it into aggregation limit.
1680 ackcnt = sc->hn_rx_ring[0].hn_lro.lro_ackcnt_lim + 1;
1681 error = sysctl_handle_int(oidp, &ackcnt, 0, req);
1682 if (error || req->newptr == NULL)
1685 if (ackcnt < 2 || ackcnt > (TCP_LRO_ACKCNT_MAX + 1))
1689 * Convert aggregation limit back to append
1694 for (i = 0; i < sc->hn_rx_ring_cnt; ++i)
1695 sc->hn_rx_ring[i].hn_lro.lro_ackcnt_lim = ackcnt;
1701 hn_trust_hcsum_sysctl(SYSCTL_HANDLER_ARGS)
1703 struct hn_softc *sc = arg1;
1708 if (sc->hn_rx_ring[0].hn_trust_hcsum & hcsum)
1711 error = sysctl_handle_int(oidp, &on, 0, req);
1712 if (error || req->newptr == NULL)
1716 for (i = 0; i < sc->hn_rx_ring_cnt; ++i) {
1717 struct hn_rx_ring *rxr = &sc->hn_rx_ring[i];
1720 rxr->hn_trust_hcsum |= hcsum;
1722 rxr->hn_trust_hcsum &= ~hcsum;
1729 hn_tx_chimney_size_sysctl(SYSCTL_HANDLER_ARGS)
1731 struct hn_softc *sc = arg1;
1732 int chimney_size, error;
1734 chimney_size = sc->hn_tx_ring[0].hn_tx_chimney_size;
1735 error = sysctl_handle_int(oidp, &chimney_size, 0, req);
1736 if (error || req->newptr == NULL)
1739 if (chimney_size > sc->hn_tx_chimney_max || chimney_size <= 0)
1742 hn_set_tx_chimney_size(sc, chimney_size);
1747 hn_rx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS)
1749 struct hn_softc *sc = arg1;
1750 int ofs = arg2, i, error;
1751 struct hn_rx_ring *rxr;
1755 for (i = 0; i < sc->hn_rx_ring_cnt; ++i) {
1756 rxr = &sc->hn_rx_ring[i];
1757 stat += *((u_long *)((uint8_t *)rxr + ofs));
1760 error = sysctl_handle_long(oidp, &stat, 0, req);
1761 if (error || req->newptr == NULL)
1764 /* Zero out this stat. */
1765 for (i = 0; i < sc->hn_rx_ring_cnt; ++i) {
1766 rxr = &sc->hn_rx_ring[i];
1767 *((u_long *)((uint8_t *)rxr + ofs)) = 0;
1773 hn_rx_stat_u64_sysctl(SYSCTL_HANDLER_ARGS)
1775 struct hn_softc *sc = arg1;
1776 int ofs = arg2, i, error;
1777 struct hn_rx_ring *rxr;
1781 for (i = 0; i < sc->hn_rx_ring_cnt; ++i) {
1782 rxr = &sc->hn_rx_ring[i];
1783 stat += *((uint64_t *)((uint8_t *)rxr + ofs));
1786 error = sysctl_handle_64(oidp, &stat, 0, req);
1787 if (error || req->newptr == NULL)
1790 /* Zero out this stat. */
1791 for (i = 0; i < sc->hn_rx_ring_cnt; ++i) {
1792 rxr = &sc->hn_rx_ring[i];
1793 *((uint64_t *)((uint8_t *)rxr + ofs)) = 0;
1799 hn_tx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS)
1801 struct hn_softc *sc = arg1;
1802 int ofs = arg2, i, error;
1803 struct hn_tx_ring *txr;
1807 for (i = 0; i < sc->hn_tx_ring_cnt; ++i) {
1808 txr = &sc->hn_tx_ring[i];
1809 stat += *((u_long *)((uint8_t *)txr + ofs));
1812 error = sysctl_handle_long(oidp, &stat, 0, req);
1813 if (error || req->newptr == NULL)
1816 /* Zero out this stat. */
1817 for (i = 0; i < sc->hn_tx_ring_cnt; ++i) {
1818 txr = &sc->hn_tx_ring[i];
1819 *((u_long *)((uint8_t *)txr + ofs)) = 0;
1825 hn_tx_conf_int_sysctl(SYSCTL_HANDLER_ARGS)
1827 struct hn_softc *sc = arg1;
1828 int ofs = arg2, i, error, conf;
1829 struct hn_tx_ring *txr;
1831 txr = &sc->hn_tx_ring[0];
1832 conf = *((int *)((uint8_t *)txr + ofs));
1834 error = sysctl_handle_int(oidp, &conf, 0, req);
1835 if (error || req->newptr == NULL)
1839 for (i = 0; i < sc->hn_tx_ring_cnt; ++i) {
1840 txr = &sc->hn_tx_ring[i];
1841 *((int *)((uint8_t *)txr + ofs)) = conf;
1849 hn_check_iplen(const struct mbuf *m, int hoff)
1851 const struct ip *ip;
1852 int len, iphlen, iplen;
1853 const struct tcphdr *th;
1854 int thoff; /* TCP data offset */
1856 len = hoff + sizeof(struct ip);
1858 /* The packet must be at least the size of an IP header. */
1859 if (m->m_pkthdr.len < len)
1860 return IPPROTO_DONE;
1862 /* The fixed IP header must reside completely in the first mbuf. */
1864 return IPPROTO_DONE;
1866 ip = mtodo(m, hoff);
1868 /* Bound check the packet's stated IP header length. */
1869 iphlen = ip->ip_hl << 2;
1870 if (iphlen < sizeof(struct ip)) /* minimum header length */
1871 return IPPROTO_DONE;
1873 /* The full IP header must reside completely in the one mbuf. */
1874 if (m->m_len < hoff + iphlen)
1875 return IPPROTO_DONE;
1877 iplen = ntohs(ip->ip_len);
1880 * Check that the amount of data in the buffers is as
1881 * at least much as the IP header would have us expect.
1883 if (m->m_pkthdr.len < hoff + iplen)
1884 return IPPROTO_DONE;
1887 * Ignore IP fragments.
1889 if (ntohs(ip->ip_off) & (IP_OFFMASK | IP_MF))
1890 return IPPROTO_DONE;
1893 * The TCP/IP or UDP/IP header must be entirely contained within
1894 * the first fragment of a packet.
1898 if (iplen < iphlen + sizeof(struct tcphdr))
1899 return IPPROTO_DONE;
1900 if (m->m_len < hoff + iphlen + sizeof(struct tcphdr))
1901 return IPPROTO_DONE;
1902 th = (const struct tcphdr *)((const uint8_t *)ip + iphlen);
1903 thoff = th->th_off << 2;
1904 if (thoff < sizeof(struct tcphdr) || thoff + iphlen > iplen)
1905 return IPPROTO_DONE;
1906 if (m->m_len < hoff + iphlen + thoff)
1907 return IPPROTO_DONE;
1910 if (iplen < iphlen + sizeof(struct udphdr))
1911 return IPPROTO_DONE;
1912 if (m->m_len < hoff + iphlen + sizeof(struct udphdr))
1913 return IPPROTO_DONE;
1917 return IPPROTO_DONE;
1924 hn_dma_map_paddr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1926 bus_addr_t *paddr = arg;
1931 KASSERT(nseg == 1, ("too many segments %d!", nseg));
1932 *paddr = segs->ds_addr;
1936 hn_create_rx_data(struct hn_softc *sc)
1938 struct sysctl_oid_list *child;
1939 struct sysctl_ctx_list *ctx;
1940 device_t dev = sc->hn_dev;
1941 #if defined(INET) || defined(INET6)
1942 #if __FreeBSD_version >= 1100095
1948 sc->hn_rx_ring_cnt = 1; /* TODO: vRSS */
1949 sc->hn_rx_ring = malloc(sizeof(struct hn_rx_ring) * sc->hn_rx_ring_cnt,
1950 M_NETVSC, M_WAITOK | M_ZERO);
1952 #if defined(INET) || defined(INET6)
1953 #if __FreeBSD_version >= 1100095
1954 lroent_cnt = hn_lro_entry_count;
1955 if (lroent_cnt < TCP_LRO_ENTRIES)
1956 lroent_cnt = TCP_LRO_ENTRIES;
1957 device_printf(dev, "LRO: entry count %d\n", lroent_cnt);
1959 #endif /* INET || INET6 */
1961 for (i = 0; i < sc->hn_rx_ring_cnt; ++i) {
1962 struct hn_rx_ring *rxr = &sc->hn_rx_ring[i];
1964 if (hn_trust_hosttcp)
1965 rxr->hn_trust_hcsum |= HN_TRUST_HCSUM_TCP;
1966 if (hn_trust_hostudp)
1967 rxr->hn_trust_hcsum |= HN_TRUST_HCSUM_UDP;
1968 if (hn_trust_hostip)
1969 rxr->hn_trust_hcsum |= HN_TRUST_HCSUM_IP;
1974 #if defined(INET) || defined(INET6)
1975 #if __FreeBSD_version >= 1100095
1976 tcp_lro_init_args(&rxr->hn_lro, sc->hn_ifp, lroent_cnt, 0);
1978 tcp_lro_init(&rxr->hn_lro);
1979 rxr->hn_lro.ifp = sc->hn_ifp;
1981 rxr->hn_lro.lro_length_lim = HN_LRO_LENLIM_DEF;
1982 rxr->hn_lro.lro_ackcnt_lim = HN_LRO_ACKCNT_DEF;
1983 #endif /* INET || INET6 */
1986 ctx = device_get_sysctl_ctx(dev);
1987 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
1989 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_queued",
1990 CTLTYPE_U64 | CTLFLAG_RW, sc,
1991 __offsetof(struct hn_rx_ring, hn_lro.lro_queued),
1992 hn_rx_stat_u64_sysctl, "LU", "LRO queued");
1993 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_flushed",
1994 CTLTYPE_U64 | CTLFLAG_RW, sc,
1995 __offsetof(struct hn_rx_ring, hn_lro.lro_flushed),
1996 hn_rx_stat_u64_sysctl, "LU", "LRO flushed");
1997 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_tried",
1998 CTLTYPE_ULONG | CTLFLAG_RW, sc,
1999 __offsetof(struct hn_rx_ring, hn_lro_tried),
2000 hn_rx_stat_ulong_sysctl, "LU", "# of LRO tries");
2001 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_length_lim",
2002 CTLTYPE_UINT | CTLFLAG_RW, sc, 0, hn_lro_lenlim_sysctl, "IU",
2003 "Max # of data bytes to be aggregated by LRO");
2004 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_ackcnt_lim",
2005 CTLTYPE_INT | CTLFLAG_RW, sc, 0, hn_lro_ackcnt_sysctl, "I",
2006 "Max # of ACKs to be aggregated by LRO");
2007 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hosttcp",
2008 CTLTYPE_INT | CTLFLAG_RW, sc, HN_TRUST_HCSUM_TCP,
2009 hn_trust_hcsum_sysctl, "I",
2010 "Trust tcp segement verification on host side, "
2011 "when csum info is missing");
2012 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hostudp",
2013 CTLTYPE_INT | CTLFLAG_RW, sc, HN_TRUST_HCSUM_UDP,
2014 hn_trust_hcsum_sysctl, "I",
2015 "Trust udp datagram verification on host side, "
2016 "when csum info is missing");
2017 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hostip",
2018 CTLTYPE_INT | CTLFLAG_RW, sc, HN_TRUST_HCSUM_IP,
2019 hn_trust_hcsum_sysctl, "I",
2020 "Trust ip packet verification on host side, "
2021 "when csum info is missing");
2022 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_ip",
2023 CTLTYPE_ULONG | CTLFLAG_RW, sc,
2024 __offsetof(struct hn_rx_ring, hn_csum_ip),
2025 hn_rx_stat_ulong_sysctl, "LU", "RXCSUM IP");
2026 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_tcp",
2027 CTLTYPE_ULONG | CTLFLAG_RW, sc,
2028 __offsetof(struct hn_rx_ring, hn_csum_tcp),
2029 hn_rx_stat_ulong_sysctl, "LU", "RXCSUM TCP");
2030 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_udp",
2031 CTLTYPE_ULONG | CTLFLAG_RW, sc,
2032 __offsetof(struct hn_rx_ring, hn_csum_udp),
2033 hn_rx_stat_ulong_sysctl, "LU", "RXCSUM UDP");
2034 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_trusted",
2035 CTLTYPE_ULONG | CTLFLAG_RW, sc,
2036 __offsetof(struct hn_rx_ring, hn_csum_trusted),
2037 hn_rx_stat_ulong_sysctl, "LU",
2038 "# of packets that we trust host's csum verification");
2039 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "small_pkts",
2040 CTLTYPE_ULONG | CTLFLAG_RW, sc,
2041 __offsetof(struct hn_rx_ring, hn_small_pkts),
2042 hn_rx_stat_ulong_sysctl, "LU", "# of small packets received");
2046 hn_destroy_rx_data(struct hn_softc *sc)
2048 #if defined(INET) || defined(INET6)
2052 if (sc->hn_rx_ring_cnt == 0)
2055 #if defined(INET) || defined(INET6)
2056 for (i = 0; i < sc->hn_rx_ring_cnt; ++i)
2057 tcp_lro_free(&sc->hn_rx_ring[i].hn_lro);
2059 free(sc->hn_rx_ring, M_NETVSC);
2060 sc->hn_rx_ring = NULL;
2062 sc->hn_rx_ring_cnt = 0;
2066 hn_create_tx_ring(struct hn_softc *sc, int id)
2068 struct hn_tx_ring *txr = &sc->hn_tx_ring[id];
2069 bus_dma_tag_t parent_dtag;
2074 #ifndef HN_USE_TXDESC_BUFRING
2075 mtx_init(&txr->hn_txlist_spin, "hn txlist", NULL, MTX_SPIN);
2077 mtx_init(&txr->hn_tx_lock, "hn tx", NULL, MTX_DEF);
2079 txr->hn_txdesc_cnt = HN_TX_DESC_CNT;
2080 txr->hn_txdesc = malloc(sizeof(struct hn_txdesc) * txr->hn_txdesc_cnt,
2081 M_NETVSC, M_WAITOK | M_ZERO);
2082 #ifndef HN_USE_TXDESC_BUFRING
2083 SLIST_INIT(&txr->hn_txlist);
2085 txr->hn_txdesc_br = buf_ring_alloc(txr->hn_txdesc_cnt, M_NETVSC,
2086 M_WAITOK, &txr->hn_tx_lock);
2089 txr->hn_tx_taskq = sc->hn_tx_taskq;
2090 TASK_INIT(&txr->hn_start_task, 0, hn_start_taskfunc, txr);
2091 TASK_INIT(&txr->hn_txeof_task, 0, hn_txeof_taskfunc, txr);
2093 txr->hn_direct_tx_size = hn_direct_tx_size;
2094 if (hv_vmbus_protocal_version >= HV_VMBUS_VERSION_WIN8_1)
2095 txr->hn_csum_assist = HN_CSUM_ASSIST;
2097 txr->hn_csum_assist = HN_CSUM_ASSIST_WIN8;
2100 * Always schedule transmission instead of trying to do direct
2101 * transmission. This one gives the best performance so far.
2103 txr->hn_sched_tx = 1;
2105 parent_dtag = bus_get_dma_tag(sc->hn_dev);
2107 /* DMA tag for RNDIS messages. */
2108 error = bus_dma_tag_create(parent_dtag, /* parent */
2109 HN_RNDIS_MSG_ALIGN, /* alignment */
2110 HN_RNDIS_MSG_BOUNDARY, /* boundary */
2111 BUS_SPACE_MAXADDR, /* lowaddr */
2112 BUS_SPACE_MAXADDR, /* highaddr */
2113 NULL, NULL, /* filter, filterarg */
2114 HN_RNDIS_MSG_LEN, /* maxsize */
2116 HN_RNDIS_MSG_LEN, /* maxsegsize */
2118 NULL, /* lockfunc */
2119 NULL, /* lockfuncarg */
2120 &txr->hn_tx_rndis_dtag);
2122 device_printf(sc->hn_dev, "failed to create rndis dmatag\n");
2126 /* DMA tag for data. */
2127 error = bus_dma_tag_create(parent_dtag, /* parent */
2129 HN_TX_DATA_BOUNDARY, /* boundary */
2130 BUS_SPACE_MAXADDR, /* lowaddr */
2131 BUS_SPACE_MAXADDR, /* highaddr */
2132 NULL, NULL, /* filter, filterarg */
2133 HN_TX_DATA_MAXSIZE, /* maxsize */
2134 HN_TX_DATA_SEGCNT_MAX, /* nsegments */
2135 HN_TX_DATA_SEGSIZE, /* maxsegsize */
2137 NULL, /* lockfunc */
2138 NULL, /* lockfuncarg */
2139 &txr->hn_tx_data_dtag);
2141 device_printf(sc->hn_dev, "failed to create data dmatag\n");
2145 for (i = 0; i < txr->hn_txdesc_cnt; ++i) {
2146 struct hn_txdesc *txd = &txr->hn_txdesc[i];
2151 * Allocate and load RNDIS messages.
2153 error = bus_dmamem_alloc(txr->hn_tx_rndis_dtag,
2154 (void **)&txd->rndis_msg,
2155 BUS_DMA_WAITOK | BUS_DMA_COHERENT,
2156 &txd->rndis_msg_dmap);
2158 device_printf(sc->hn_dev,
2159 "failed to allocate rndis_msg, %d\n", i);
2163 error = bus_dmamap_load(txr->hn_tx_rndis_dtag,
2164 txd->rndis_msg_dmap,
2165 txd->rndis_msg, HN_RNDIS_MSG_LEN,
2166 hn_dma_map_paddr, &txd->rndis_msg_paddr,
2169 device_printf(sc->hn_dev,
2170 "failed to load rndis_msg, %d\n", i);
2171 bus_dmamem_free(txr->hn_tx_rndis_dtag,
2172 txd->rndis_msg, txd->rndis_msg_dmap);
2176 /* DMA map for TX data. */
2177 error = bus_dmamap_create(txr->hn_tx_data_dtag, 0,
2180 device_printf(sc->hn_dev,
2181 "failed to allocate tx data dmamap\n");
2182 bus_dmamap_unload(txr->hn_tx_rndis_dtag,
2183 txd->rndis_msg_dmap);
2184 bus_dmamem_free(txr->hn_tx_rndis_dtag,
2185 txd->rndis_msg, txd->rndis_msg_dmap);
2189 /* All set, put it to list */
2190 txd->flags |= HN_TXD_FLAG_ONLIST;
2191 #ifndef HN_USE_TXDESC_BUFRING
2192 SLIST_INSERT_HEAD(&txr->hn_txlist, txd, link);
2194 buf_ring_enqueue(txr->hn_txdesc_br, txd);
2197 txr->hn_txdesc_avail = txr->hn_txdesc_cnt;
2199 if (sc->hn_tx_sysctl_tree != NULL) {
2200 struct sysctl_oid_list *child;
2201 struct sysctl_ctx_list *ctx;
2205 * Create per TX ring sysctl tree:
2206 * dev.hn.UNIT.tx.RINGID
2208 ctx = device_get_sysctl_ctx(sc->hn_dev);
2209 child = SYSCTL_CHILDREN(sc->hn_tx_sysctl_tree);
2211 snprintf(name, sizeof(name), "%d", id);
2212 txr->hn_tx_sysctl_tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO,
2213 name, CTLFLAG_RD, 0, "");
2215 if (txr->hn_tx_sysctl_tree != NULL) {
2216 child = SYSCTL_CHILDREN(txr->hn_tx_sysctl_tree);
2218 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "txdesc_avail",
2219 CTLFLAG_RD, &txr->hn_txdesc_avail, 0,
2220 "# of available TX descs");
2228 hn_txdesc_dmamap_destroy(struct hn_txdesc *txd)
2230 struct hn_tx_ring *txr = txd->txr;
2232 KASSERT(txd->m == NULL, ("still has mbuf installed"));
2233 KASSERT((txd->flags & HN_TXD_FLAG_DMAMAP) == 0, ("still dma mapped"));
2235 bus_dmamap_unload(txr->hn_tx_rndis_dtag, txd->rndis_msg_dmap);
2236 bus_dmamem_free(txr->hn_tx_rndis_dtag, txd->rndis_msg,
2237 txd->rndis_msg_dmap);
2238 bus_dmamap_destroy(txr->hn_tx_data_dtag, txd->data_dmap);
2242 hn_destroy_tx_ring(struct hn_tx_ring *txr)
2244 struct hn_txdesc *txd;
2246 if (txr->hn_txdesc == NULL)
2249 #ifndef HN_USE_TXDESC_BUFRING
2250 while ((txd = SLIST_FIRST(&txr->hn_txlist)) != NULL) {
2251 SLIST_REMOVE_HEAD(&txr->hn_txlist, link);
2252 hn_txdesc_dmamap_destroy(txd);
2255 while ((txd = buf_ring_dequeue_sc(txr->hn_txdesc_br)) != NULL)
2256 hn_txdesc_dmamap_destroy(txd);
2259 if (txr->hn_tx_data_dtag != NULL)
2260 bus_dma_tag_destroy(txr->hn_tx_data_dtag);
2261 if (txr->hn_tx_rndis_dtag != NULL)
2262 bus_dma_tag_destroy(txr->hn_tx_rndis_dtag);
2263 free(txr->hn_txdesc, M_NETVSC);
2264 txr->hn_txdesc = NULL;
2266 #ifndef HN_USE_TXDESC_BUFRING
2267 mtx_destroy(&txr->hn_txlist_spin);
2269 mtx_destroy(&txr->hn_tx_lock);
2273 hn_create_tx_data(struct hn_softc *sc)
2275 struct sysctl_oid_list *child;
2276 struct sysctl_ctx_list *ctx;
2279 sc->hn_tx_ring_cnt = 1; /* TODO: vRSS */
2280 sc->hn_tx_ring = malloc(sizeof(struct hn_tx_ring) * sc->hn_tx_ring_cnt,
2281 M_NETVSC, M_WAITOK | M_ZERO);
2283 ctx = device_get_sysctl_ctx(sc->hn_dev);
2284 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->hn_dev));
2286 /* Create dev.hn.UNIT.tx sysctl tree */
2287 sc->hn_tx_sysctl_tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "tx",
2290 for (i = 0; i < sc->hn_tx_ring_cnt; ++i) {
2293 error = hn_create_tx_ring(sc, i);
2298 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "no_txdescs",
2299 CTLTYPE_ULONG | CTLFLAG_RW, sc,
2300 __offsetof(struct hn_tx_ring, hn_no_txdescs),
2301 hn_tx_stat_ulong_sysctl, "LU", "# of times short of TX descs");
2302 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "send_failed",
2303 CTLTYPE_ULONG | CTLFLAG_RW, sc,
2304 __offsetof(struct hn_tx_ring, hn_send_failed),
2305 hn_tx_stat_ulong_sysctl, "LU", "# of hyper-v sending failure");
2306 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txdma_failed",
2307 CTLTYPE_ULONG | CTLFLAG_RW, sc,
2308 __offsetof(struct hn_tx_ring, hn_txdma_failed),
2309 hn_tx_stat_ulong_sysctl, "LU", "# of TX DMA failure");
2310 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_collapsed",
2311 CTLTYPE_ULONG | CTLFLAG_RW, sc,
2312 __offsetof(struct hn_tx_ring, hn_tx_collapsed),
2313 hn_tx_stat_ulong_sysctl, "LU", "# of TX mbuf collapsed");
2314 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_chimney",
2315 CTLTYPE_ULONG | CTLFLAG_RW, sc,
2316 __offsetof(struct hn_tx_ring, hn_tx_chimney),
2317 hn_tx_stat_ulong_sysctl, "LU", "# of chimney send");
2318 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "txdesc_cnt",
2319 CTLFLAG_RD, &sc->hn_tx_ring[0].hn_txdesc_cnt, 0,
2320 "# of total TX descs");
2321 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_chimney_max",
2322 CTLFLAG_RD, &sc->hn_tx_chimney_max, 0,
2323 "Chimney send packet size upper boundary");
2324 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_chimney_size",
2325 CTLTYPE_INT | CTLFLAG_RW, sc, 0, hn_tx_chimney_size_sysctl,
2326 "I", "Chimney send packet size limit");
2327 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "direct_tx_size",
2328 CTLTYPE_INT | CTLFLAG_RW, sc,
2329 __offsetof(struct hn_tx_ring, hn_direct_tx_size),
2330 hn_tx_conf_int_sysctl, "I",
2331 "Size of the packet for direct transmission");
2332 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "sched_tx",
2333 CTLTYPE_INT | CTLFLAG_RW, sc,
2334 __offsetof(struct hn_tx_ring, hn_sched_tx),
2335 hn_tx_conf_int_sysctl, "I",
2336 "Always schedule transmission "
2337 "instead of doing direct transmission");
2343 hn_set_tx_chimney_size(struct hn_softc *sc, int chimney_size)
2348 for (i = 0; i < sc->hn_tx_ring_cnt; ++i)
2349 sc->hn_tx_ring[i].hn_tx_chimney_size = chimney_size;
2354 hn_destroy_tx_data(struct hn_softc *sc)
2358 if (sc->hn_tx_ring_cnt == 0)
2361 for (i = 0; i < sc->hn_tx_ring_cnt; ++i)
2362 hn_destroy_tx_ring(&sc->hn_tx_ring[i]);
2364 free(sc->hn_tx_ring, M_NETVSC);
2365 sc->hn_tx_ring = NULL;
2367 sc->hn_tx_ring_cnt = 0;
2371 hn_start_taskfunc(void *xtxr, int pending __unused)
2373 struct hn_tx_ring *txr = xtxr;
2375 mtx_lock(&txr->hn_tx_lock);
2376 hn_start_locked(txr, 0);
2377 mtx_unlock(&txr->hn_tx_lock);
2381 hn_txeof_taskfunc(void *xtxr, int pending __unused)
2383 struct hn_tx_ring *txr = xtxr;
2385 mtx_lock(&txr->hn_tx_lock);
2386 atomic_clear_int(&txr->hn_sc->hn_ifp->if_drv_flags, IFF_DRV_OACTIVE);
2387 hn_start_locked(txr, 0);
2388 mtx_unlock(&txr->hn_tx_lock);
2392 hn_stop_tx_tasks(struct hn_softc *sc)
2396 for (i = 0; i < sc->hn_tx_ring_cnt; ++i) {
2397 struct hn_tx_ring *txr = &sc->hn_tx_ring[i];
2399 taskqueue_drain(txr->hn_tx_taskq, &txr->hn_start_task);
2400 taskqueue_drain(txr->hn_tx_taskq, &txr->hn_txeof_task);
2405 hn_tx_taskq_create(void *arg __unused)
2407 if (!hn_share_tx_taskq)
2410 hn_tx_taskq = taskqueue_create("hn_tx", M_WAITOK,
2411 taskqueue_thread_enqueue, &hn_tx_taskq);
2412 taskqueue_start_threads(&hn_tx_taskq, 1, PI_NET, "hn tx");
2414 SYSINIT(hn_txtq_create, SI_SUB_DRIVERS, SI_ORDER_FIRST,
2415 hn_tx_taskq_create, NULL);
2418 hn_tx_taskq_destroy(void *arg __unused)
2420 if (hn_tx_taskq != NULL)
2421 taskqueue_free(hn_tx_taskq);
2423 SYSUNINIT(hn_txtq_destroy, SI_SUB_DRIVERS, SI_ORDER_FIRST,
2424 hn_tx_taskq_destroy, NULL);
2426 static device_method_t netvsc_methods[] = {
2427 /* Device interface */
2428 DEVMETHOD(device_probe, netvsc_probe),
2429 DEVMETHOD(device_attach, netvsc_attach),
2430 DEVMETHOD(device_detach, netvsc_detach),
2431 DEVMETHOD(device_shutdown, netvsc_shutdown),
2436 static driver_t netvsc_driver = {
2442 static devclass_t netvsc_devclass;
2444 DRIVER_MODULE(hn, vmbus, netvsc_driver, netvsc_devclass, 0, 0);
2445 MODULE_VERSION(hn, 1);
2446 MODULE_DEPEND(hn, vmbus, 1, 1, 1);