2 * Copyright (c) 2010-2012 Citrix Inc.
3 * Copyright (c) 2009-2012 Microsoft Corp.
4 * Copyright (c) 2012 NetApp Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * Copyright (c) 2004-2006 Kip Macy
31 * All rights reserved.
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 #include <sys/cdefs.h>
56 __FBSDID("$FreeBSD$");
58 #include "opt_inet6.h"
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/sockio.h>
65 #include <sys/malloc.h>
66 #include <sys/module.h>
67 #include <sys/kernel.h>
68 #include <sys/socket.h>
69 #include <sys/queue.h>
72 #include <sys/sysctl.h>
75 #include <net/if_arp.h>
76 #include <net/ethernet.h>
77 #include <net/if_dl.h>
78 #include <net/if_media.h>
82 #include <net/if_var.h>
83 #include <net/if_types.h>
84 #include <net/if_vlan_var.h>
86 #include <netinet/in_systm.h>
87 #include <netinet/in.h>
88 #include <netinet/ip.h>
89 #include <netinet/if_ether.h>
90 #include <netinet/tcp.h>
91 #include <netinet/udp.h>
92 #include <netinet/ip6.h>
95 #include <vm/vm_param.h>
96 #include <vm/vm_kern.h>
99 #include <machine/bus.h>
100 #include <machine/resource.h>
101 #include <machine/frame.h>
102 #include <machine/vmparam.h>
105 #include <sys/rman.h>
106 #include <sys/mutex.h>
107 #include <sys/errno.h>
108 #include <sys/types.h>
109 #include <machine/atomic.h>
111 #include <machine/intr_machdep.h>
113 #include <machine/in_cksum.h>
115 #include <dev/hyperv/include/hyperv.h>
116 #include "hv_net_vsc.h"
117 #include "hv_rndis.h"
118 #include "hv_rndis_filter.h"
121 /* Short for Hyper-V network interface */
122 #define NETVSC_DEVNAME "hn"
125 * It looks like offset 0 of buf is reserved to hold the softc pointer.
126 * The sc pointer evidently not needed, and is not presently populated.
127 * The packet offset is where the netvsc_packet starts in the buffer.
129 #define HV_NV_SC_PTR_OFFSET_IN_BUF 0
130 #define HV_NV_PACKET_OFFSET_IN_BUF 16
132 /* YYY should get it from the underlying channel */
133 #define HN_TX_DESC_CNT 512
135 #define HN_LROENT_CNT_DEF 128
137 #define HN_RNDIS_MSG_LEN \
138 (sizeof(rndis_msg) + \
139 RNDIS_VLAN_PPI_SIZE + \
140 RNDIS_TSO_PPI_SIZE + \
142 #define HN_RNDIS_MSG_BOUNDARY PAGE_SIZE
143 #define HN_RNDIS_MSG_ALIGN CACHE_LINE_SIZE
145 #define HN_TX_DATA_BOUNDARY PAGE_SIZE
146 #define HN_TX_DATA_MAXSIZE IP_MAXPACKET
147 #define HN_TX_DATA_SEGSIZE PAGE_SIZE
148 #define HN_TX_DATA_SEGCNT_MAX \
149 (NETVSC_PACKET_MAXPAGE - HV_RF_NUM_TX_RESERVED_PAGE_BUFS)
151 #define HN_DIRECT_TX_SIZE_DEF 128
154 SLIST_ENTRY(hn_txdesc) link;
158 uint32_t flags; /* HN_TXD_FLAG_ */
159 netvsc_packet netvsc_pkt; /* XXX to be removed */
161 bus_dmamap_t data_dmap;
163 bus_addr_t rndis_msg_paddr;
164 rndis_msg *rndis_msg;
165 bus_dmamap_t rndis_msg_dmap;
168 #define HN_TXD_FLAG_ONLIST 0x1
169 #define HN_TXD_FLAG_DMAMAP 0x2
172 * Only enable UDP checksum offloading when it is on 2012R2 or
173 * later. UDP checksum offloading doesn't work on earlier
176 #define HN_CSUM_ASSIST_WIN8 (CSUM_TCP)
177 #define HN_CSUM_ASSIST (CSUM_IP | CSUM_UDP | CSUM_TCP)
179 #define HN_LRO_LENLIM_DEF (25 * ETHERMTU)
180 /* YYY 2*MTU is a bit rough, but should be good enough. */
181 #define HN_LRO_LENLIM_MIN(ifp) (2 * (ifp)->if_mtu)
183 #define HN_LRO_ACKCNT_DEF 1
186 * Be aware that this sleepable mutex will exhibit WITNESS errors when
187 * certain TCP and ARP code paths are taken. This appears to be a
188 * well-known condition, as all other drivers checked use a sleeping
189 * mutex to protect their transmit paths.
190 * Also Be aware that mutexes do not play well with semaphores, and there
191 * is a conflicting semaphore in a certain channel code path.
193 #define NV_LOCK_INIT(_sc, _name) \
194 mtx_init(&(_sc)->hn_lock, _name, MTX_NETWORK_LOCK, MTX_DEF)
195 #define NV_LOCK(_sc) mtx_lock(&(_sc)->hn_lock)
196 #define NV_TRYLOCK(_sc) mtx_trylock(&(_sc)->hn_lock)
197 #define NV_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->hn_lock, MA_OWNED)
198 #define NV_UNLOCK(_sc) mtx_unlock(&(_sc)->hn_lock)
199 #define NV_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->hn_lock)
206 int hv_promisc_mode = 0; /* normal mode by default */
208 SYSCTL_NODE(_hw, OID_AUTO, hn, CTLFLAG_RD, NULL, "Hyper-V network interface");
210 /* Trust tcp segements verification on host side. */
211 static int hn_trust_hosttcp = 1;
212 SYSCTL_INT(_hw_hn, OID_AUTO, trust_hosttcp, CTLFLAG_RDTUN,
213 &hn_trust_hosttcp, 0,
214 "Trust tcp segement verification on host side, "
215 "when csum info is missing (global setting)");
217 /* Trust udp datagrams verification on host side. */
218 static int hn_trust_hostudp = 1;
219 SYSCTL_INT(_hw_hn, OID_AUTO, trust_hostudp, CTLFLAG_RDTUN,
220 &hn_trust_hostudp, 0,
221 "Trust udp datagram verification on host side, "
222 "when csum info is missing (global setting)");
224 /* Trust ip packets verification on host side. */
225 static int hn_trust_hostip = 1;
226 SYSCTL_INT(_hw_hn, OID_AUTO, trust_hostip, CTLFLAG_RDTUN,
228 "Trust ip packet verification on host side, "
229 "when csum info is missing (global setting)");
231 #if __FreeBSD_version >= 1100045
232 /* Limit TSO burst size */
233 static int hn_tso_maxlen = 0;
234 SYSCTL_INT(_hw_hn, OID_AUTO, tso_maxlen, CTLFLAG_RDTUN,
235 &hn_tso_maxlen, 0, "TSO burst limit");
238 /* Limit chimney send size */
239 static int hn_tx_chimney_size = 0;
240 SYSCTL_INT(_hw_hn, OID_AUTO, tx_chimney_size, CTLFLAG_RDTUN,
241 &hn_tx_chimney_size, 0, "Chimney send packet size limit");
243 /* Limit the size of packet for direct transmission */
244 static int hn_direct_tx_size = HN_DIRECT_TX_SIZE_DEF;
245 SYSCTL_INT(_hw_hn, OID_AUTO, direct_tx_size, CTLFLAG_RDTUN,
246 &hn_direct_tx_size, 0, "Size of the packet for direct transmission");
248 #if defined(INET) || defined(INET6)
249 #if __FreeBSD_version >= 1100095
250 static int hn_lro_entry_count = HN_LROENT_CNT_DEF;
251 SYSCTL_INT(_hw_hn, OID_AUTO, lro_entry_count, CTLFLAG_RDTUN,
252 &hn_lro_entry_count, 0, "LRO entry count");
256 static int hn_share_tx_taskq = 0;
257 SYSCTL_INT(_hw_hn, OID_AUTO, share_tx_taskq, CTLFLAG_RDTUN,
258 &hn_share_tx_taskq, 0, "Enable shared TX taskqueue");
260 static struct taskqueue *hn_tx_taskq;
263 * Forward declarations
265 static void hn_stop(hn_softc_t *sc);
266 static void hn_ifinit_locked(hn_softc_t *sc);
267 static void hn_ifinit(void *xsc);
268 static int hn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
269 static int hn_start_locked(struct ifnet *ifp, int len);
270 static void hn_start(struct ifnet *ifp);
271 static void hn_start_txeof(struct ifnet *ifp);
272 static int hn_ifmedia_upd(struct ifnet *ifp);
273 static void hn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
274 static int hn_lro_lenlim_sysctl(SYSCTL_HANDLER_ARGS);
275 static int hn_lro_ackcnt_sysctl(SYSCTL_HANDLER_ARGS);
276 static int hn_trust_hcsum_sysctl(SYSCTL_HANDLER_ARGS);
277 static int hn_tx_chimney_size_sysctl(SYSCTL_HANDLER_ARGS);
278 static int hn_rx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS);
279 static int hn_rx_stat_u64_sysctl(SYSCTL_HANDLER_ARGS);
280 static int hn_check_iplen(const struct mbuf *, int);
281 static int hn_create_tx_ring(struct hn_softc *sc);
282 static void hn_destroy_tx_ring(struct hn_softc *sc);
283 static void hn_start_taskfunc(void *xsc, int pending);
284 static void hn_txeof_taskfunc(void *xsc, int pending);
285 static int hn_encap(struct hn_softc *, struct hn_txdesc *, struct mbuf **);
286 static void hn_create_rx_data(struct hn_softc *sc);
287 static void hn_destroy_rx_data(struct hn_softc *sc);
290 hn_ifmedia_upd(struct ifnet *ifp __unused)
297 hn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
299 struct hn_softc *sc = ifp->if_softc;
301 ifmr->ifm_status = IFM_AVALID;
302 ifmr->ifm_active = IFM_ETHER;
304 if (!sc->hn_carrier) {
305 ifmr->ifm_active |= IFM_NONE;
308 ifmr->ifm_status |= IFM_ACTIVE;
309 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
312 /* {F8615163-DF3E-46c5-913F-F2D2F965ED0E} */
313 static const hv_guid g_net_vsc_device_type = {
314 .data = {0x63, 0x51, 0x61, 0xF8, 0x3E, 0xDF, 0xc5, 0x46,
315 0x91, 0x3F, 0xF2, 0xD2, 0xF9, 0x65, 0xED, 0x0E}
319 * Standard probe entry point.
323 netvsc_probe(device_t dev)
327 p = vmbus_get_type(dev);
328 if (!memcmp(p, &g_net_vsc_device_type.data, sizeof(hv_guid))) {
329 device_set_desc(dev, "Synthetic Network Interface");
331 printf("Netvsc probe... DONE \n");
333 return (BUS_PROBE_DEFAULT);
340 * Standard attach entry point.
342 * Called when the driver is loaded. It allocates needed resources,
343 * and initializes the "hardware" and software.
346 netvsc_attach(device_t dev)
348 struct hv_device *device_ctx = vmbus_get_devctx(dev);
349 netvsc_device_info device_info;
351 int unit = device_get_unit(dev);
352 struct ifnet *ifp = NULL;
353 struct sysctl_oid_list *child;
354 struct sysctl_ctx_list *ctx;
356 #if __FreeBSD_version >= 1100045
360 sc = device_get_softc(dev);
365 bzero(sc, sizeof(hn_softc_t));
368 sc->hn_direct_tx_size = hn_direct_tx_size;
370 if (hn_tx_taskq == NULL) {
371 sc->hn_tx_taskq = taskqueue_create_fast("hn_tx", M_WAITOK,
372 taskqueue_thread_enqueue, &sc->hn_tx_taskq);
373 taskqueue_start_threads(&sc->hn_tx_taskq, 1, PI_NET, "%s tx",
374 device_get_nameunit(dev));
376 sc->hn_tx_taskq = hn_tx_taskq;
378 TASK_INIT(&sc->hn_start_task, 0, hn_start_taskfunc, sc);
379 TASK_INIT(&sc->hn_txeof_task, 0, hn_txeof_taskfunc, sc);
381 error = hn_create_tx_ring(sc);
385 NV_LOCK_INIT(sc, "NetVSCLock");
387 sc->hn_dev_obj = device_ctx;
389 ifp = sc->hn_ifp = if_alloc(IFT_ETHER);
392 hn_create_rx_data(sc);
394 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
395 ifp->if_dunit = unit;
396 ifp->if_dname = NETVSC_DEVNAME;
398 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
399 ifp->if_ioctl = hn_ioctl;
400 ifp->if_start = hn_start;
401 ifp->if_init = hn_ifinit;
402 /* needed by hv_rf_on_device_add() code */
403 ifp->if_mtu = ETHERMTU;
404 IFQ_SET_MAXLEN(&ifp->if_snd, 512);
405 ifp->if_snd.ifq_drv_maxlen = 511;
406 IFQ_SET_READY(&ifp->if_snd);
408 ifmedia_init(&sc->hn_media, 0, hn_ifmedia_upd, hn_ifmedia_sts);
409 ifmedia_add(&sc->hn_media, IFM_ETHER | IFM_AUTO, 0, NULL);
410 ifmedia_set(&sc->hn_media, IFM_ETHER | IFM_AUTO);
411 /* XXX ifmedia_set really should do this for us */
412 sc->hn_media.ifm_media = sc->hn_media.ifm_cur->ifm_media;
415 * Tell upper layers that we support full VLAN capability.
417 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
418 ifp->if_capabilities |=
419 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_TSO |
422 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_TSO |
425 if (hv_vmbus_protocal_version >= HV_VMBUS_VERSION_WIN8_1)
426 sc->hn_csum_assist = HN_CSUM_ASSIST;
428 sc->hn_csum_assist = HN_CSUM_ASSIST_WIN8;
429 ifp->if_hwassist = sc->hn_csum_assist | CSUM_TSO;
431 error = hv_rf_on_device_add(device_ctx, &device_info);
435 if (device_info.link_state == 0) {
439 #if __FreeBSD_version >= 1100045
440 tso_maxlen = hn_tso_maxlen;
441 if (tso_maxlen <= 0 || tso_maxlen > IP_MAXPACKET)
442 tso_maxlen = IP_MAXPACKET;
444 ifp->if_hw_tsomaxsegcount = HN_TX_DATA_SEGCNT_MAX;
445 ifp->if_hw_tsomaxsegsize = PAGE_SIZE;
446 ifp->if_hw_tsomax = tso_maxlen -
447 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
450 ether_ifattach(ifp, device_info.mac_addr);
452 #if __FreeBSD_version >= 1100045
453 if_printf(ifp, "TSO: %u/%u/%u\n", ifp->if_hw_tsomax,
454 ifp->if_hw_tsomaxsegcount, ifp->if_hw_tsomaxsegsize);
457 sc->hn_tx_chimney_max = sc->net_dev->send_section_size;
458 sc->hn_tx_chimney_size = sc->hn_tx_chimney_max;
459 if (hn_tx_chimney_size > 0 &&
460 hn_tx_chimney_size < sc->hn_tx_chimney_max)
461 sc->hn_tx_chimney_size = hn_tx_chimney_size;
464 * Always schedule transmission instead of trying
465 * to do direct transmission. This one gives the
466 * best performance so far.
470 ctx = device_get_sysctl_ctx(dev);
471 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
473 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "no_txdescs",
474 CTLFLAG_RW, &sc->hn_no_txdescs, "# of times short of TX descs");
475 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "send_failed",
476 CTLFLAG_RW, &sc->hn_send_failed, "# of hyper-v sending failure");
477 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "txdma_failed",
478 CTLFLAG_RW, &sc->hn_txdma_failed, "# of TX DMA failure");
479 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_collapsed",
480 CTLFLAG_RW, &sc->hn_tx_collapsed, "# of TX mbuf collapsed");
481 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_chimney",
482 CTLFLAG_RW, &sc->hn_tx_chimney, "# of chimney send");
483 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "txdesc_cnt",
484 CTLFLAG_RD, &sc->hn_txdesc_cnt, 0, "# of total TX descs");
485 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "txdesc_avail",
486 CTLFLAG_RD, &sc->hn_txdesc_avail, 0, "# of available TX descs");
487 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_chimney_max",
488 CTLFLAG_RD, &sc->hn_tx_chimney_max, 0,
489 "Chimney send packet size upper boundary");
490 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_chimney_size",
491 CTLTYPE_INT | CTLFLAG_RW, sc, 0, hn_tx_chimney_size_sysctl,
492 "I", "Chimney send packet size limit");
493 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "direct_tx_size",
494 CTLFLAG_RW, &sc->hn_direct_tx_size, 0,
495 "Size of the packet for direct transmission");
496 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "sched_tx",
497 CTLFLAG_RW, &sc->hn_sched_tx, 0,
498 "Always schedule transmission "
499 "instead of doing direct transmission");
503 hn_destroy_tx_ring(sc);
510 * Standard detach entry point
513 netvsc_detach(device_t dev)
515 struct hn_softc *sc = device_get_softc(dev);
516 struct hv_device *hv_device = vmbus_get_devctx(dev);
519 printf("netvsc_detach\n");
522 * XXXKYS: Need to clean up all our
523 * driver state; this is the driver
528 * XXXKYS: Need to stop outgoing traffic and unregister
532 hv_rf_on_device_remove(hv_device, HV_RF_NV_DESTROY_CHANNEL);
534 taskqueue_drain(sc->hn_tx_taskq, &sc->hn_start_task);
535 taskqueue_drain(sc->hn_tx_taskq, &sc->hn_txeof_task);
536 if (sc->hn_tx_taskq != hn_tx_taskq)
537 taskqueue_free(sc->hn_tx_taskq);
539 ifmedia_removeall(&sc->hn_media);
540 hn_destroy_rx_data(sc);
541 hn_destroy_tx_ring(sc);
547 * Standard shutdown entry point
550 netvsc_shutdown(device_t dev)
556 hn_txdesc_dmamap_load(struct hn_softc *sc, struct hn_txdesc *txd,
557 struct mbuf **m_head, bus_dma_segment_t *segs, int *nsegs)
559 struct mbuf *m = *m_head;
562 error = bus_dmamap_load_mbuf_sg(sc->hn_tx_data_dtag, txd->data_dmap,
563 m, segs, nsegs, BUS_DMA_NOWAIT);
564 if (error == EFBIG) {
567 m_new = m_collapse(m, M_NOWAIT, HN_TX_DATA_SEGCNT_MAX);
572 sc->hn_tx_collapsed++;
574 error = bus_dmamap_load_mbuf_sg(sc->hn_tx_data_dtag,
575 txd->data_dmap, m, segs, nsegs, BUS_DMA_NOWAIT);
578 bus_dmamap_sync(sc->hn_tx_data_dtag, txd->data_dmap,
579 BUS_DMASYNC_PREWRITE);
580 txd->flags |= HN_TXD_FLAG_DMAMAP;
586 hn_txdesc_dmamap_unload(struct hn_softc *sc, struct hn_txdesc *txd)
589 if (txd->flags & HN_TXD_FLAG_DMAMAP) {
590 bus_dmamap_sync(sc->hn_tx_data_dtag,
591 txd->data_dmap, BUS_DMASYNC_POSTWRITE);
592 bus_dmamap_unload(sc->hn_tx_data_dtag,
594 txd->flags &= ~HN_TXD_FLAG_DMAMAP;
599 hn_txdesc_put(struct hn_softc *sc, struct hn_txdesc *txd)
602 KASSERT((txd->flags & HN_TXD_FLAG_ONLIST) == 0,
603 ("put an onlist txd %#x", txd->flags));
605 KASSERT(txd->refs > 0, ("invalid txd refs %d", txd->refs));
606 if (atomic_fetchadd_int(&txd->refs, -1) != 1)
609 hn_txdesc_dmamap_unload(sc, txd);
610 if (txd->m != NULL) {
615 txd->flags |= HN_TXD_FLAG_ONLIST;
617 mtx_lock_spin(&sc->hn_txlist_spin);
618 KASSERT(sc->hn_txdesc_avail >= 0 &&
619 sc->hn_txdesc_avail < sc->hn_txdesc_cnt,
620 ("txdesc_put: invalid txd avail %d", sc->hn_txdesc_avail));
621 sc->hn_txdesc_avail++;
622 SLIST_INSERT_HEAD(&sc->hn_txlist, txd, link);
623 mtx_unlock_spin(&sc->hn_txlist_spin);
628 static __inline struct hn_txdesc *
629 hn_txdesc_get(struct hn_softc *sc)
631 struct hn_txdesc *txd;
633 mtx_lock_spin(&sc->hn_txlist_spin);
634 txd = SLIST_FIRST(&sc->hn_txlist);
636 KASSERT(sc->hn_txdesc_avail > 0,
637 ("txdesc_get: invalid txd avail %d", sc->hn_txdesc_avail));
638 sc->hn_txdesc_avail--;
639 SLIST_REMOVE_HEAD(&sc->hn_txlist, link);
641 mtx_unlock_spin(&sc->hn_txlist_spin);
644 KASSERT(txd->m == NULL && txd->refs == 0 &&
645 (txd->flags & HN_TXD_FLAG_ONLIST), ("invalid txd"));
646 txd->flags &= ~HN_TXD_FLAG_ONLIST;
653 hn_txdesc_hold(struct hn_txdesc *txd)
656 /* 0->1 transition will never work */
657 KASSERT(txd->refs > 0, ("invalid refs %d", txd->refs));
658 atomic_add_int(&txd->refs, 1);
662 * Send completion processing
664 * Note: It looks like offset 0 of buf is reserved to hold the softc
665 * pointer. The sc pointer is not currently needed in this function, and
666 * it is not presently populated by the TX function.
669 netvsc_xmit_completion(void *context)
671 netvsc_packet *packet = context;
672 struct hn_txdesc *txd;
675 txd = (struct hn_txdesc *)(uintptr_t)
676 packet->compl.send.send_completion_tid;
680 hn_txdesc_put(sc, txd);
684 netvsc_channel_rollup(struct hv_device *device_ctx)
686 struct hn_softc *sc = device_get_softc(device_ctx->device);
687 #if defined(INET) || defined(INET6)
688 struct hn_rx_ring *rxr = &sc->hn_rx_ring[0]; /* TODO: vRSS */
689 struct lro_ctrl *lro = &rxr->hn_lro;
690 struct lro_entry *queued;
692 while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
693 SLIST_REMOVE_HEAD(&lro->lro_active, next);
694 tcp_lro_flush(lro, queued);
702 hn_start_txeof(sc->hn_ifp);
707 * If this function fails, then both txd and m_head0 will be freed.
710 hn_encap(struct hn_softc *sc, struct hn_txdesc *txd, struct mbuf **m_head0)
712 bus_dma_segment_t segs[HN_TX_DATA_SEGCNT_MAX];
714 struct mbuf *m_head = *m_head0;
715 netvsc_packet *packet;
716 rndis_msg *rndis_mesg;
717 rndis_packet *rndis_pkt;
718 rndis_per_packet_info *rppi;
719 uint32_t rndis_msg_size;
721 packet = &txd->netvsc_pkt;
722 packet->is_data_pkt = TRUE;
723 packet->tot_data_buf_len = m_head->m_pkthdr.len;
726 * extension points to the area reserved for the
727 * rndis_filter_packet, which is placed just after
728 * the netvsc_packet (and rppi struct, if present;
729 * length is updated later).
731 rndis_mesg = txd->rndis_msg;
732 /* XXX not necessary */
733 memset(rndis_mesg, 0, HN_RNDIS_MSG_LEN);
734 rndis_mesg->ndis_msg_type = REMOTE_NDIS_PACKET_MSG;
736 rndis_pkt = &rndis_mesg->msg.packet;
737 rndis_pkt->data_offset = sizeof(rndis_packet);
738 rndis_pkt->data_length = packet->tot_data_buf_len;
739 rndis_pkt->per_pkt_info_offset = sizeof(rndis_packet);
741 rndis_msg_size = RNDIS_MESSAGE_SIZE(rndis_packet);
743 if (m_head->m_flags & M_VLANTAG) {
744 ndis_8021q_info *rppi_vlan_info;
746 rndis_msg_size += RNDIS_VLAN_PPI_SIZE;
747 rppi = hv_set_rppi_data(rndis_mesg, RNDIS_VLAN_PPI_SIZE,
750 rppi_vlan_info = (ndis_8021q_info *)((uint8_t *)rppi +
751 rppi->per_packet_info_offset);
752 rppi_vlan_info->u1.s1.vlan_id =
753 m_head->m_pkthdr.ether_vtag & 0xfff;
756 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
757 rndis_tcp_tso_info *tso_info;
758 struct ether_vlan_header *eh;
762 * XXX need m_pullup and use mtodo
764 eh = mtod(m_head, struct ether_vlan_header*);
765 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
766 ether_len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
768 ether_len = ETHER_HDR_LEN;
770 rndis_msg_size += RNDIS_TSO_PPI_SIZE;
771 rppi = hv_set_rppi_data(rndis_mesg, RNDIS_TSO_PPI_SIZE,
772 tcp_large_send_info);
774 tso_info = (rndis_tcp_tso_info *)((uint8_t *)rppi +
775 rppi->per_packet_info_offset);
776 tso_info->lso_v2_xmit.type =
777 RNDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
780 if (m_head->m_pkthdr.csum_flags & CSUM_IP_TSO) {
782 (struct ip *)(m_head->m_data + ether_len);
783 unsigned long iph_len = ip->ip_hl << 2;
785 (struct tcphdr *)((caddr_t)ip + iph_len);
787 tso_info->lso_v2_xmit.ip_version =
788 RNDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
792 th->th_sum = in_pseudo(ip->ip_src.s_addr,
793 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
796 #if defined(INET6) && defined(INET)
801 struct ip6_hdr *ip6 = (struct ip6_hdr *)
802 (m_head->m_data + ether_len);
803 struct tcphdr *th = (struct tcphdr *)(ip6 + 1);
805 tso_info->lso_v2_xmit.ip_version =
806 RNDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
808 th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
811 tso_info->lso_v2_xmit.tcp_header_offset = 0;
812 tso_info->lso_v2_xmit.mss = m_head->m_pkthdr.tso_segsz;
813 } else if (m_head->m_pkthdr.csum_flags & sc->hn_csum_assist) {
814 rndis_tcp_ip_csum_info *csum_info;
816 rndis_msg_size += RNDIS_CSUM_PPI_SIZE;
817 rppi = hv_set_rppi_data(rndis_mesg, RNDIS_CSUM_PPI_SIZE,
819 csum_info = (rndis_tcp_ip_csum_info *)((uint8_t *)rppi +
820 rppi->per_packet_info_offset);
822 csum_info->xmit.is_ipv4 = 1;
823 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
824 csum_info->xmit.ip_header_csum = 1;
826 if (m_head->m_pkthdr.csum_flags & CSUM_TCP) {
827 csum_info->xmit.tcp_csum = 1;
828 csum_info->xmit.tcp_header_offset = 0;
829 } else if (m_head->m_pkthdr.csum_flags & CSUM_UDP) {
830 csum_info->xmit.udp_csum = 1;
834 rndis_mesg->msg_len = packet->tot_data_buf_len + rndis_msg_size;
835 packet->tot_data_buf_len = rndis_mesg->msg_len;
838 * Chimney send, if the packet could fit into one chimney buffer.
840 if (packet->tot_data_buf_len < sc->hn_tx_chimney_size) {
841 netvsc_dev *net_dev = sc->net_dev;
842 uint32_t send_buf_section_idx;
844 send_buf_section_idx =
845 hv_nv_get_next_send_section(net_dev);
846 if (send_buf_section_idx !=
847 NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX) {
848 uint8_t *dest = ((uint8_t *)net_dev->send_buf +
849 (send_buf_section_idx *
850 net_dev->send_section_size));
852 memcpy(dest, rndis_mesg, rndis_msg_size);
853 dest += rndis_msg_size;
854 m_copydata(m_head, 0, m_head->m_pkthdr.len, dest);
856 packet->send_buf_section_idx = send_buf_section_idx;
857 packet->send_buf_section_size =
858 packet->tot_data_buf_len;
859 packet->page_buf_count = 0;
865 error = hn_txdesc_dmamap_load(sc, txd, &m_head, segs, &nsegs);
870 * This mbuf is not linked w/ the txd yet, so free it now.
875 freed = hn_txdesc_put(sc, txd);
877 ("fail to free txd upon txdma error"));
879 sc->hn_txdma_failed++;
880 if_inc_counter(sc->hn_ifp, IFCOUNTER_OERRORS, 1);
885 packet->page_buf_count = nsegs + HV_RF_NUM_TX_RESERVED_PAGE_BUFS;
887 /* send packet with page buffer */
888 packet->page_buffers[0].pfn = atop(txd->rndis_msg_paddr);
889 packet->page_buffers[0].offset = txd->rndis_msg_paddr & PAGE_MASK;
890 packet->page_buffers[0].length = rndis_msg_size;
893 * Fill the page buffers with mbuf info starting at index
894 * HV_RF_NUM_TX_RESERVED_PAGE_BUFS.
896 for (i = 0; i < nsegs; ++i) {
897 hv_vmbus_page_buffer *pb = &packet->page_buffers[
898 i + HV_RF_NUM_TX_RESERVED_PAGE_BUFS];
900 pb->pfn = atop(segs[i].ds_addr);
901 pb->offset = segs[i].ds_addr & PAGE_MASK;
902 pb->length = segs[i].ds_len;
905 packet->send_buf_section_idx =
906 NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX;
907 packet->send_buf_section_size = 0;
911 /* Set the completion routine */
912 packet->compl.send.on_send_completion = netvsc_xmit_completion;
913 packet->compl.send.send_completion_context = packet;
914 packet->compl.send.send_completion_tid = (uint64_t)(uintptr_t)txd;
920 * Start a transmit of one or more packets
923 hn_start_locked(struct ifnet *ifp, int len)
925 struct hn_softc *sc = ifp->if_softc;
926 struct hv_device *device_ctx = vmbus_get_devctx(sc->hn_dev);
928 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
932 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
933 int error, send_failed = 0;
934 struct hn_txdesc *txd;
937 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
941 if (len > 0 && m_head->m_pkthdr.len > len) {
943 * This sending could be time consuming; let callers
944 * dispatch this packet sending (and sending of any
945 * following up packets) to tx taskqueue.
947 IF_PREPEND(&ifp->if_snd, m_head);
951 txd = hn_txdesc_get(sc);
954 IF_PREPEND(&ifp->if_snd, m_head);
955 atomic_set_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
959 error = hn_encap(sc, txd, &m_head);
961 /* Both txd and m_head are freed */
966 * Make sure that txd is not freed before ETHER_BPF_MTAP.
969 error = hv_nv_on_send(device_ctx, &txd->netvsc_pkt);
971 ETHER_BPF_MTAP(ifp, m_head);
972 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
974 hn_txdesc_put(sc, txd);
976 if (__predict_false(error)) {
980 * This should "really rarely" happen.
982 * XXX Too many RX to be acked or too many sideband
983 * commands to run? Ask netvsc_channel_rollup()
984 * to kick start later.
988 sc->hn_send_failed++;
991 * Try sending again after set hn_txeof;
992 * in case that we missed the last
993 * netvsc_channel_rollup().
997 if_printf(ifp, "send failed\n");
1000 * This mbuf will be prepended, don't free it
1001 * in hn_txdesc_put(); only unload it from the
1002 * DMA map in hn_txdesc_put(), if it was loaded.
1005 freed = hn_txdesc_put(sc, txd);
1007 ("fail to free txd upon send error"));
1009 sc->hn_send_failed++;
1010 IF_PREPEND(&ifp->if_snd, m_head);
1011 atomic_set_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
1019 * Link up/down notification
1022 netvsc_linkstatus_callback(struct hv_device *device_obj, uint32_t status)
1024 hn_softc_t *sc = device_get_softc(device_obj->device);
1038 * Append the specified data to the indicated mbuf chain,
1039 * Extend the mbuf chain if the new data does not fit in
1042 * This is a minor rewrite of m_append() from sys/kern/uipc_mbuf.c.
1043 * There should be an equivalent in the kernel mbuf code,
1044 * but there does not appear to be one yet.
1046 * Differs from m_append() in that additional mbufs are
1047 * allocated with cluster size MJUMPAGESIZE, and filled
1050 * Return 1 if able to complete the job; otherwise 0.
1053 hv_m_append(struct mbuf *m0, int len, c_caddr_t cp)
1056 int remainder, space;
1058 for (m = m0; m->m_next != NULL; m = m->m_next)
1061 space = M_TRAILINGSPACE(m);
1064 * Copy into available space.
1066 if (space > remainder)
1068 bcopy(cp, mtod(m, caddr_t) + m->m_len, space);
1073 while (remainder > 0) {
1075 * Allocate a new mbuf; could check space
1076 * and allocate a cluster instead.
1078 n = m_getjcl(M_NOWAIT, m->m_type, 0, MJUMPAGESIZE);
1081 n->m_len = min(MJUMPAGESIZE, remainder);
1082 bcopy(cp, mtod(n, caddr_t), n->m_len);
1084 remainder -= n->m_len;
1088 if (m0->m_flags & M_PKTHDR)
1089 m0->m_pkthdr.len += len - remainder;
1091 return (remainder == 0);
1096 * Called when we receive a data packet from the "wire" on the
1099 * Note: This is no longer used as a callback
1102 netvsc_recv(struct hv_device *device_ctx, netvsc_packet *packet,
1103 rndis_tcp_ip_csum_info *csum_info)
1105 struct hn_softc *sc = device_get_softc(device_ctx->device);
1106 struct hn_rx_ring *rxr = &sc->hn_rx_ring[0]; /* TODO: vRSS */
1109 int size, do_lro = 0, do_csum = 1;
1112 return (0); /* TODO: KYS how can this be! */
1117 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
1122 * Bail out if packet contains more data than configured MTU.
1124 if (packet->tot_data_buf_len > (ifp->if_mtu + ETHER_HDR_LEN)) {
1126 } else if (packet->tot_data_buf_len <= MHLEN) {
1127 m_new = m_gethdr(M_NOWAIT, MT_DATA);
1130 memcpy(mtod(m_new, void *), packet->data,
1131 packet->tot_data_buf_len);
1132 m_new->m_pkthdr.len = m_new->m_len = packet->tot_data_buf_len;
1133 rxr->hn_small_pkts++;
1136 * Get an mbuf with a cluster. For packets 2K or less,
1137 * get a standard 2K cluster. For anything larger, get a
1138 * 4K cluster. Any buffers larger than 4K can cause problems
1139 * if looped around to the Hyper-V TX channel, so avoid them.
1142 if (packet->tot_data_buf_len > MCLBYTES) {
1144 size = MJUMPAGESIZE;
1147 m_new = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, size);
1148 if (m_new == NULL) {
1149 if_printf(ifp, "alloc mbuf failed.\n");
1153 hv_m_append(m_new, packet->tot_data_buf_len, packet->data);
1155 m_new->m_pkthdr.rcvif = ifp;
1157 if (__predict_false((ifp->if_capenable & IFCAP_RXCSUM) == 0))
1160 /* receive side checksum offload */
1161 if (csum_info != NULL) {
1162 /* IP csum offload */
1163 if (csum_info->receive.ip_csum_succeeded && do_csum) {
1164 m_new->m_pkthdr.csum_flags |=
1165 (CSUM_IP_CHECKED | CSUM_IP_VALID);
1169 /* TCP/UDP csum offload */
1170 if ((csum_info->receive.tcp_csum_succeeded ||
1171 csum_info->receive.udp_csum_succeeded) && do_csum) {
1172 m_new->m_pkthdr.csum_flags |=
1173 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1174 m_new->m_pkthdr.csum_data = 0xffff;
1175 if (csum_info->receive.tcp_csum_succeeded)
1181 if (csum_info->receive.ip_csum_succeeded &&
1182 csum_info->receive.tcp_csum_succeeded)
1185 const struct ether_header *eh;
1190 if (m_new->m_len < hoff)
1192 eh = mtod(m_new, struct ether_header *);
1193 etype = ntohs(eh->ether_type);
1194 if (etype == ETHERTYPE_VLAN) {
1195 const struct ether_vlan_header *evl;
1197 hoff = sizeof(*evl);
1198 if (m_new->m_len < hoff)
1200 evl = mtod(m_new, struct ether_vlan_header *);
1201 etype = ntohs(evl->evl_proto);
1204 if (etype == ETHERTYPE_IP) {
1207 pr = hn_check_iplen(m_new, hoff);
1208 if (pr == IPPROTO_TCP) {
1210 (rxr->hn_trust_hcsum &
1211 HN_TRUST_HCSUM_TCP)) {
1212 rxr->hn_csum_trusted++;
1213 m_new->m_pkthdr.csum_flags |=
1214 (CSUM_IP_CHECKED | CSUM_IP_VALID |
1215 CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1216 m_new->m_pkthdr.csum_data = 0xffff;
1218 /* Rely on SW csum verification though... */
1220 } else if (pr == IPPROTO_UDP) {
1222 (rxr->hn_trust_hcsum &
1223 HN_TRUST_HCSUM_UDP)) {
1224 rxr->hn_csum_trusted++;
1225 m_new->m_pkthdr.csum_flags |=
1226 (CSUM_IP_CHECKED | CSUM_IP_VALID |
1227 CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1228 m_new->m_pkthdr.csum_data = 0xffff;
1230 } else if (pr != IPPROTO_DONE && do_csum &&
1231 (rxr->hn_trust_hcsum & HN_TRUST_HCSUM_IP)) {
1232 rxr->hn_csum_trusted++;
1233 m_new->m_pkthdr.csum_flags |=
1234 (CSUM_IP_CHECKED | CSUM_IP_VALID);
1239 if ((packet->vlan_tci != 0) &&
1240 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
1241 m_new->m_pkthdr.ether_vtag = packet->vlan_tci;
1242 m_new->m_flags |= M_VLANTAG;
1246 * Note: Moved RX completion back to hv_nv_on_receive() so all
1247 * messages (not just data messages) will trigger a response.
1250 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1252 if ((ifp->if_capenable & IFCAP_LRO) && do_lro) {
1253 #if defined(INET) || defined(INET6)
1254 struct lro_ctrl *lro = &rxr->hn_lro;
1257 rxr->hn_lro_tried++;
1258 if (tcp_lro_rx(lro, m_new, 0) == 0) {
1266 /* We're not holding the lock here, so don't release it */
1267 (*ifp->if_input)(ifp, m_new);
1273 netvsc_recv_rollup(struct hv_device *device_ctx __unused)
1278 * Rules for using sc->temp_unusable:
1279 * 1. sc->temp_unusable can only be read or written while holding NV_LOCK()
1280 * 2. code reading sc->temp_unusable under NV_LOCK(), and finding
1281 * sc->temp_unusable set, must release NV_LOCK() and exit
1282 * 3. to retain exclusive control of the interface,
1283 * sc->temp_unusable must be set by code before releasing NV_LOCK()
1284 * 4. only code setting sc->temp_unusable can clear sc->temp_unusable
1285 * 5. code setting sc->temp_unusable must eventually clear sc->temp_unusable
1289 * Standard ioctl entry point. Called when the user wants to configure
1293 hn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1295 hn_softc_t *sc = ifp->if_softc;
1296 struct ifreq *ifr = (struct ifreq *)data;
1298 struct ifaddr *ifa = (struct ifaddr *)data;
1300 netvsc_device_info device_info;
1301 struct hv_device *hn_dev;
1302 int mask, error = 0;
1303 int retry_cnt = 500;
1309 if (ifa->ifa_addr->sa_family == AF_INET) {
1310 ifp->if_flags |= IFF_UP;
1311 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1313 arp_ifinit(ifp, ifa);
1316 error = ether_ioctl(ifp, cmd, data);
1319 hn_dev = vmbus_get_devctx(sc->hn_dev);
1321 /* Check MTU value change */
1322 if (ifp->if_mtu == ifr->ifr_mtu)
1325 if (ifr->ifr_mtu > NETVSC_MAX_CONFIGURABLE_MTU) {
1330 /* Obtain and record requested MTU */
1331 ifp->if_mtu = ifr->ifr_mtu;
1334 * Make sure that LRO aggregation length limit is still
1335 * valid, after the MTU change.
1338 if (sc->hn_rx_ring[0].hn_lro.lro_length_lim <
1339 HN_LRO_LENLIM_MIN(ifp)) {
1342 for (i = 0; i < sc->hn_rx_ring_cnt; ++i) {
1343 sc->hn_rx_ring[i].hn_lro.lro_length_lim =
1344 HN_LRO_LENLIM_MIN(ifp);
1351 if (!sc->temp_unusable) {
1352 sc->temp_unusable = TRUE;
1356 if (retry_cnt > 0) {
1360 } while (retry_cnt > 0);
1362 if (retry_cnt == 0) {
1367 /* We must remove and add back the device to cause the new
1368 * MTU to take effect. This includes tearing down, but not
1369 * deleting the channel, then bringing it back up.
1371 error = hv_rf_on_device_remove(hn_dev, HV_RF_NV_RETAIN_CHANNEL);
1374 sc->temp_unusable = FALSE;
1378 error = hv_rf_on_device_add(hn_dev, &device_info);
1381 sc->temp_unusable = FALSE;
1386 sc->hn_tx_chimney_max = sc->net_dev->send_section_size;
1387 if (sc->hn_tx_chimney_size > sc->hn_tx_chimney_max)
1388 sc->hn_tx_chimney_size = sc->hn_tx_chimney_max;
1389 hn_ifinit_locked(sc);
1392 sc->temp_unusable = FALSE;
1398 if (!sc->temp_unusable) {
1399 sc->temp_unusable = TRUE;
1403 if (retry_cnt > 0) {
1407 } while (retry_cnt > 0);
1409 if (retry_cnt == 0) {
1414 if (ifp->if_flags & IFF_UP) {
1416 * If only the state of the PROMISC flag changed,
1417 * then just use the 'set promisc mode' command
1418 * instead of reinitializing the entire NIC. Doing
1419 * a full re-init means reloading the firmware and
1420 * waiting for it to start up, which may take a
1424 /* Fixme: Promiscuous mode? */
1425 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1426 ifp->if_flags & IFF_PROMISC &&
1427 !(sc->hn_if_flags & IFF_PROMISC)) {
1428 /* do something here for Hyper-V */
1429 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1430 !(ifp->if_flags & IFF_PROMISC) &&
1431 sc->hn_if_flags & IFF_PROMISC) {
1432 /* do something here for Hyper-V */
1435 hn_ifinit_locked(sc);
1437 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1442 sc->temp_unusable = FALSE;
1444 sc->hn_if_flags = ifp->if_flags;
1450 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1451 if (mask & IFCAP_TXCSUM) {
1452 ifp->if_capenable ^= IFCAP_TXCSUM;
1453 if (ifp->if_capenable & IFCAP_TXCSUM)
1454 ifp->if_hwassist |= sc->hn_csum_assist;
1456 ifp->if_hwassist &= ~sc->hn_csum_assist;
1459 if (mask & IFCAP_RXCSUM)
1460 ifp->if_capenable ^= IFCAP_RXCSUM;
1462 if (mask & IFCAP_LRO)
1463 ifp->if_capenable ^= IFCAP_LRO;
1465 if (mask & IFCAP_TSO4) {
1466 ifp->if_capenable ^= IFCAP_TSO4;
1467 if (ifp->if_capenable & IFCAP_TSO4)
1468 ifp->if_hwassist |= CSUM_IP_TSO;
1470 ifp->if_hwassist &= ~CSUM_IP_TSO;
1473 if (mask & IFCAP_TSO6) {
1474 ifp->if_capenable ^= IFCAP_TSO6;
1475 if (ifp->if_capenable & IFCAP_TSO6)
1476 ifp->if_hwassist |= CSUM_IP6_TSO;
1478 ifp->if_hwassist &= ~CSUM_IP6_TSO;
1487 /* Fixme: Multicast mode? */
1488 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1490 netvsc_setmulti(sc);
1499 error = ifmedia_ioctl(ifp, ifr, &sc->hn_media, cmd);
1502 error = ether_ioctl(ifp, cmd, data);
1513 hn_stop(hn_softc_t *sc)
1517 struct hv_device *device_ctx = vmbus_get_devctx(sc->hn_dev);
1522 printf(" Closing Device ...\n");
1524 atomic_clear_int(&ifp->if_drv_flags,
1525 (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
1526 if_link_state_change(ifp, LINK_STATE_DOWN);
1527 sc->hn_initdone = 0;
1529 ret = hv_rf_on_close(device_ctx);
1533 * FreeBSD transmit entry point
1536 hn_start(struct ifnet *ifp)
1538 struct hn_softc *sc = ifp->if_softc;
1540 if (sc->hn_sched_tx)
1543 if (NV_TRYLOCK(sc)) {
1546 sched = hn_start_locked(ifp, sc->hn_direct_tx_size);
1552 taskqueue_enqueue(sc->hn_tx_taskq, &sc->hn_start_task);
1556 hn_start_txeof(struct ifnet *ifp)
1558 struct hn_softc *sc = ifp->if_softc;
1560 if (sc->hn_sched_tx)
1563 if (NV_TRYLOCK(sc)) {
1566 atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
1567 sched = hn_start_locked(ifp, sc->hn_direct_tx_size);
1570 taskqueue_enqueue(sc->hn_tx_taskq, &sc->hn_start_task);
1574 * Release the OACTIVE earlier, with the hope, that
1575 * others could catch up. The task will clear the
1576 * flag again with the NV_LOCK to avoid possible
1579 atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
1580 taskqueue_enqueue(sc->hn_tx_taskq, &sc->hn_txeof_task);
1588 hn_ifinit_locked(hn_softc_t *sc)
1591 struct hv_device *device_ctx = vmbus_get_devctx(sc->hn_dev);
1596 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1600 hv_promisc_mode = 1;
1602 ret = hv_rf_on_open(device_ctx);
1606 sc->hn_initdone = 1;
1608 atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
1609 atomic_set_int(&ifp->if_drv_flags, IFF_DRV_RUNNING);
1610 if_link_state_change(ifp, LINK_STATE_UP);
1617 hn_ifinit(void *xsc)
1619 hn_softc_t *sc = xsc;
1622 if (sc->temp_unusable) {
1626 sc->temp_unusable = TRUE;
1629 hn_ifinit_locked(sc);
1632 sc->temp_unusable = FALSE;
1641 hn_watchdog(struct ifnet *ifp)
1646 printf("hn%d: watchdog timeout -- resetting\n", sc->hn_unit);
1647 hn_ifinit(sc); /*???*/
1648 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1653 hn_lro_lenlim_sysctl(SYSCTL_HANDLER_ARGS)
1655 struct hn_softc *sc = arg1;
1656 unsigned int lenlim;
1659 lenlim = sc->hn_rx_ring[0].hn_lro.lro_length_lim;
1660 error = sysctl_handle_int(oidp, &lenlim, 0, req);
1661 if (error || req->newptr == NULL)
1664 if (lenlim < HN_LRO_LENLIM_MIN(sc->hn_ifp) ||
1665 lenlim > TCP_LRO_LENGTH_MAX)
1669 for (i = 0; i < sc->hn_rx_ring_cnt; ++i)
1670 sc->hn_rx_ring[i].hn_lro.lro_length_lim = lenlim;
1676 hn_lro_ackcnt_sysctl(SYSCTL_HANDLER_ARGS)
1678 struct hn_softc *sc = arg1;
1679 int ackcnt, error, i;
1682 * lro_ackcnt_lim is append count limit,
1683 * +1 to turn it into aggregation limit.
1685 ackcnt = sc->hn_rx_ring[0].hn_lro.lro_ackcnt_lim + 1;
1686 error = sysctl_handle_int(oidp, &ackcnt, 0, req);
1687 if (error || req->newptr == NULL)
1690 if (ackcnt < 2 || ackcnt > (TCP_LRO_ACKCNT_MAX + 1))
1694 * Convert aggregation limit back to append
1699 for (i = 0; i < sc->hn_rx_ring_cnt; ++i)
1700 sc->hn_rx_ring[i].hn_lro.lro_ackcnt_lim = ackcnt;
1706 hn_trust_hcsum_sysctl(SYSCTL_HANDLER_ARGS)
1708 struct hn_softc *sc = arg1;
1713 if (sc->hn_rx_ring[0].hn_trust_hcsum & hcsum)
1716 error = sysctl_handle_int(oidp, &on, 0, req);
1717 if (error || req->newptr == NULL)
1721 for (i = 0; i < sc->hn_rx_ring_cnt; ++i) {
1722 struct hn_rx_ring *rxr = &sc->hn_rx_ring[i];
1725 rxr->hn_trust_hcsum |= hcsum;
1727 rxr->hn_trust_hcsum &= ~hcsum;
1734 hn_tx_chimney_size_sysctl(SYSCTL_HANDLER_ARGS)
1736 struct hn_softc *sc = arg1;
1737 int chimney_size, error;
1739 chimney_size = sc->hn_tx_chimney_size;
1740 error = sysctl_handle_int(oidp, &chimney_size, 0, req);
1741 if (error || req->newptr == NULL)
1744 if (chimney_size > sc->hn_tx_chimney_max || chimney_size <= 0)
1747 if (sc->hn_tx_chimney_size != chimney_size)
1748 sc->hn_tx_chimney_size = chimney_size;
1753 hn_rx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS)
1755 struct hn_softc *sc = arg1;
1756 int ofs = arg2, i, error;
1757 struct hn_rx_ring *rxr;
1761 for (i = 0; i < sc->hn_rx_ring_cnt; ++i) {
1762 rxr = &sc->hn_rx_ring[i];
1763 stat += *((u_long *)((uint8_t *)rxr + ofs));
1766 error = sysctl_handle_long(oidp, &stat, 0, req);
1767 if (error || req->newptr == NULL)
1770 /* Zero out this stat. */
1771 for (i = 0; i < sc->hn_rx_ring_cnt; ++i) {
1772 rxr = &sc->hn_rx_ring[i];
1773 *((u_long *)((uint8_t *)rxr + ofs)) = 0;
1779 hn_rx_stat_u64_sysctl(SYSCTL_HANDLER_ARGS)
1781 struct hn_softc *sc = arg1;
1782 int ofs = arg2, i, error;
1783 struct hn_rx_ring *rxr;
1787 for (i = 0; i < sc->hn_rx_ring_cnt; ++i) {
1788 rxr = &sc->hn_rx_ring[i];
1789 stat += *((uint64_t *)((uint8_t *)rxr + ofs));
1792 error = sysctl_handle_64(oidp, &stat, 0, req);
1793 if (error || req->newptr == NULL)
1796 /* Zero out this stat. */
1797 for (i = 0; i < sc->hn_rx_ring_cnt; ++i) {
1798 rxr = &sc->hn_rx_ring[i];
1799 *((uint64_t *)((uint8_t *)rxr + ofs)) = 0;
1805 hn_check_iplen(const struct mbuf *m, int hoff)
1807 const struct ip *ip;
1808 int len, iphlen, iplen;
1809 const struct tcphdr *th;
1810 int thoff; /* TCP data offset */
1812 len = hoff + sizeof(struct ip);
1814 /* The packet must be at least the size of an IP header. */
1815 if (m->m_pkthdr.len < len)
1816 return IPPROTO_DONE;
1818 /* The fixed IP header must reside completely in the first mbuf. */
1820 return IPPROTO_DONE;
1822 ip = mtodo(m, hoff);
1824 /* Bound check the packet's stated IP header length. */
1825 iphlen = ip->ip_hl << 2;
1826 if (iphlen < sizeof(struct ip)) /* minimum header length */
1827 return IPPROTO_DONE;
1829 /* The full IP header must reside completely in the one mbuf. */
1830 if (m->m_len < hoff + iphlen)
1831 return IPPROTO_DONE;
1833 iplen = ntohs(ip->ip_len);
1836 * Check that the amount of data in the buffers is as
1837 * at least much as the IP header would have us expect.
1839 if (m->m_pkthdr.len < hoff + iplen)
1840 return IPPROTO_DONE;
1843 * Ignore IP fragments.
1845 if (ntohs(ip->ip_off) & (IP_OFFMASK | IP_MF))
1846 return IPPROTO_DONE;
1849 * The TCP/IP or UDP/IP header must be entirely contained within
1850 * the first fragment of a packet.
1854 if (iplen < iphlen + sizeof(struct tcphdr))
1855 return IPPROTO_DONE;
1856 if (m->m_len < hoff + iphlen + sizeof(struct tcphdr))
1857 return IPPROTO_DONE;
1858 th = (const struct tcphdr *)((const uint8_t *)ip + iphlen);
1859 thoff = th->th_off << 2;
1860 if (thoff < sizeof(struct tcphdr) || thoff + iphlen > iplen)
1861 return IPPROTO_DONE;
1862 if (m->m_len < hoff + iphlen + thoff)
1863 return IPPROTO_DONE;
1866 if (iplen < iphlen + sizeof(struct udphdr))
1867 return IPPROTO_DONE;
1868 if (m->m_len < hoff + iphlen + sizeof(struct udphdr))
1869 return IPPROTO_DONE;
1873 return IPPROTO_DONE;
1880 hn_dma_map_paddr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1882 bus_addr_t *paddr = arg;
1887 KASSERT(nseg == 1, ("too many segments %d!", nseg));
1888 *paddr = segs->ds_addr;
1892 hn_create_rx_data(struct hn_softc *sc)
1894 struct sysctl_oid_list *child;
1895 struct sysctl_ctx_list *ctx;
1896 device_t dev = sc->hn_dev;
1897 #if defined(INET) || defined(INET6)
1898 #if __FreeBSD_version >= 1100095
1904 sc->hn_rx_ring_cnt = 1; /* TODO: vRSS */
1905 sc->hn_rx_ring = malloc(sizeof(struct hn_rx_ring) * sc->hn_rx_ring_cnt,
1906 M_NETVSC, M_WAITOK | M_ZERO);
1908 #if defined(INET) || defined(INET6)
1909 #if __FreeBSD_version >= 1100095
1910 lroent_cnt = hn_lro_entry_count;
1911 if (lroent_cnt < TCP_LRO_ENTRIES)
1912 lroent_cnt = TCP_LRO_ENTRIES;
1913 device_printf(dev, "LRO: entry count %d\n", lroent_cnt);
1915 #endif /* INET || INET6 */
1917 for (i = 0; i < sc->hn_rx_ring_cnt; ++i) {
1918 struct hn_rx_ring *rxr = &sc->hn_rx_ring[i];
1920 if (hn_trust_hosttcp)
1921 rxr->hn_trust_hcsum |= HN_TRUST_HCSUM_TCP;
1922 if (hn_trust_hostudp)
1923 rxr->hn_trust_hcsum |= HN_TRUST_HCSUM_UDP;
1924 if (hn_trust_hostip)
1925 rxr->hn_trust_hcsum |= HN_TRUST_HCSUM_IP;
1930 #if defined(INET) || defined(INET6)
1931 #if __FreeBSD_version >= 1100095
1932 tcp_lro_init_args(&rxr->hn_lro, sc->hn_ifp, lroent_cnt, 0);
1934 tcp_lro_init(&rxr->hn_lro);
1935 rxr->hn_lro.ifp = sc->hn_ifp;
1937 rxr->hn_lro.lro_length_lim = HN_LRO_LENLIM_DEF;
1938 rxr->hn_lro.lro_ackcnt_lim = HN_LRO_ACKCNT_DEF;
1939 #endif /* INET || INET6 */
1942 ctx = device_get_sysctl_ctx(dev);
1943 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
1945 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_queued",
1946 CTLTYPE_U64 | CTLFLAG_RW, sc,
1947 __offsetof(struct hn_rx_ring, hn_lro.lro_queued),
1948 hn_rx_stat_u64_sysctl, "LU", "LRO queued");
1949 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_flushed",
1950 CTLTYPE_U64 | CTLFLAG_RW, sc,
1951 __offsetof(struct hn_rx_ring, hn_lro.lro_flushed),
1952 hn_rx_stat_u64_sysctl, "LU", "LRO flushed");
1953 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_tried",
1954 CTLTYPE_ULONG | CTLFLAG_RW, sc,
1955 __offsetof(struct hn_rx_ring, hn_lro_tried),
1956 hn_rx_stat_ulong_sysctl, "LU", "# of LRO tries");
1957 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_length_lim",
1958 CTLTYPE_UINT | CTLFLAG_RW, sc, 0, hn_lro_lenlim_sysctl, "IU",
1959 "Max # of data bytes to be aggregated by LRO");
1960 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_ackcnt_lim",
1961 CTLTYPE_INT | CTLFLAG_RW, sc, 0, hn_lro_ackcnt_sysctl, "I",
1962 "Max # of ACKs to be aggregated by LRO");
1963 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hosttcp",
1964 CTLTYPE_INT | CTLFLAG_RW, sc, HN_TRUST_HCSUM_TCP,
1965 hn_trust_hcsum_sysctl, "I",
1966 "Trust tcp segement verification on host side, "
1967 "when csum info is missing");
1968 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hostudp",
1969 CTLTYPE_INT | CTLFLAG_RW, sc, HN_TRUST_HCSUM_UDP,
1970 hn_trust_hcsum_sysctl, "I",
1971 "Trust udp datagram verification on host side, "
1972 "when csum info is missing");
1973 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hostip",
1974 CTLTYPE_INT | CTLFLAG_RW, sc, HN_TRUST_HCSUM_IP,
1975 hn_trust_hcsum_sysctl, "I",
1976 "Trust ip packet verification on host side, "
1977 "when csum info is missing");
1978 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_ip",
1979 CTLTYPE_ULONG | CTLFLAG_RW, sc,
1980 __offsetof(struct hn_rx_ring, hn_csum_ip),
1981 hn_rx_stat_ulong_sysctl, "LU", "RXCSUM IP");
1982 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_tcp",
1983 CTLTYPE_ULONG | CTLFLAG_RW, sc,
1984 __offsetof(struct hn_rx_ring, hn_csum_tcp),
1985 hn_rx_stat_ulong_sysctl, "LU", "RXCSUM TCP");
1986 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_udp",
1987 CTLTYPE_ULONG | CTLFLAG_RW, sc,
1988 __offsetof(struct hn_rx_ring, hn_csum_udp),
1989 hn_rx_stat_ulong_sysctl, "LU", "RXCSUM UDP");
1990 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_trusted",
1991 CTLTYPE_ULONG | CTLFLAG_RW, sc,
1992 __offsetof(struct hn_rx_ring, hn_csum_trusted),
1993 hn_rx_stat_ulong_sysctl, "LU",
1994 "# of packets that we trust host's csum verification");
1995 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "small_pkts",
1996 CTLTYPE_ULONG | CTLFLAG_RW, sc,
1997 __offsetof(struct hn_rx_ring, hn_small_pkts),
1998 hn_rx_stat_ulong_sysctl, "LU", "# of small packets received");
2002 hn_destroy_rx_data(struct hn_softc *sc)
2004 #if defined(INET) || defined(INET6)
2008 if (sc->hn_rx_ring_cnt == 0)
2011 #if defined(INET) || defined(INET6)
2012 for (i = 0; i < sc->hn_rx_ring_cnt; ++i)
2013 tcp_lro_free(&sc->hn_rx_ring[i].hn_lro);
2015 free(sc->hn_rx_ring, M_NETVSC);
2016 sc->hn_rx_ring = NULL;
2018 sc->hn_rx_ring_cnt = 0;
2022 hn_create_tx_ring(struct hn_softc *sc)
2024 bus_dma_tag_t parent_dtag;
2027 sc->hn_txdesc_cnt = HN_TX_DESC_CNT;
2028 sc->hn_txdesc = malloc(sizeof(struct hn_txdesc) * sc->hn_txdesc_cnt,
2029 M_NETVSC, M_WAITOK | M_ZERO);
2030 SLIST_INIT(&sc->hn_txlist);
2031 mtx_init(&sc->hn_txlist_spin, "hn txlist", NULL, MTX_SPIN);
2033 parent_dtag = bus_get_dma_tag(sc->hn_dev);
2035 /* DMA tag for RNDIS messages. */
2036 error = bus_dma_tag_create(parent_dtag, /* parent */
2037 HN_RNDIS_MSG_ALIGN, /* alignment */
2038 HN_RNDIS_MSG_BOUNDARY, /* boundary */
2039 BUS_SPACE_MAXADDR, /* lowaddr */
2040 BUS_SPACE_MAXADDR, /* highaddr */
2041 NULL, NULL, /* filter, filterarg */
2042 HN_RNDIS_MSG_LEN, /* maxsize */
2044 HN_RNDIS_MSG_LEN, /* maxsegsize */
2046 NULL, /* lockfunc */
2047 NULL, /* lockfuncarg */
2048 &sc->hn_tx_rndis_dtag);
2050 device_printf(sc->hn_dev, "failed to create rndis dmatag\n");
2054 /* DMA tag for data. */
2055 error = bus_dma_tag_create(parent_dtag, /* parent */
2057 HN_TX_DATA_BOUNDARY, /* boundary */
2058 BUS_SPACE_MAXADDR, /* lowaddr */
2059 BUS_SPACE_MAXADDR, /* highaddr */
2060 NULL, NULL, /* filter, filterarg */
2061 HN_TX_DATA_MAXSIZE, /* maxsize */
2062 HN_TX_DATA_SEGCNT_MAX, /* nsegments */
2063 HN_TX_DATA_SEGSIZE, /* maxsegsize */
2065 NULL, /* lockfunc */
2066 NULL, /* lockfuncarg */
2067 &sc->hn_tx_data_dtag);
2069 device_printf(sc->hn_dev, "failed to create data dmatag\n");
2073 for (i = 0; i < sc->hn_txdesc_cnt; ++i) {
2074 struct hn_txdesc *txd = &sc->hn_txdesc[i];
2079 * Allocate and load RNDIS messages.
2081 error = bus_dmamem_alloc(sc->hn_tx_rndis_dtag,
2082 (void **)&txd->rndis_msg,
2083 BUS_DMA_WAITOK | BUS_DMA_COHERENT,
2084 &txd->rndis_msg_dmap);
2086 device_printf(sc->hn_dev,
2087 "failed to allocate rndis_msg, %d\n", i);
2091 error = bus_dmamap_load(sc->hn_tx_rndis_dtag,
2092 txd->rndis_msg_dmap,
2093 txd->rndis_msg, HN_RNDIS_MSG_LEN,
2094 hn_dma_map_paddr, &txd->rndis_msg_paddr,
2097 device_printf(sc->hn_dev,
2098 "failed to load rndis_msg, %d\n", i);
2099 bus_dmamem_free(sc->hn_tx_rndis_dtag,
2100 txd->rndis_msg, txd->rndis_msg_dmap);
2104 /* DMA map for TX data. */
2105 error = bus_dmamap_create(sc->hn_tx_data_dtag, 0,
2108 device_printf(sc->hn_dev,
2109 "failed to allocate tx data dmamap\n");
2110 bus_dmamap_unload(sc->hn_tx_rndis_dtag,
2111 txd->rndis_msg_dmap);
2112 bus_dmamem_free(sc->hn_tx_rndis_dtag,
2113 txd->rndis_msg, txd->rndis_msg_dmap);
2117 /* All set, put it to list */
2118 txd->flags |= HN_TXD_FLAG_ONLIST;
2119 SLIST_INSERT_HEAD(&sc->hn_txlist, txd, link);
2121 sc->hn_txdesc_avail = sc->hn_txdesc_cnt;
2127 hn_destroy_tx_ring(struct hn_softc *sc)
2129 struct hn_txdesc *txd;
2131 while ((txd = SLIST_FIRST(&sc->hn_txlist)) != NULL) {
2132 KASSERT(txd->m == NULL, ("still has mbuf installed"));
2133 KASSERT((txd->flags & HN_TXD_FLAG_DMAMAP) == 0,
2134 ("still dma mapped"));
2135 SLIST_REMOVE_HEAD(&sc->hn_txlist, link);
2137 bus_dmamap_unload(sc->hn_tx_rndis_dtag,
2138 txd->rndis_msg_dmap);
2139 bus_dmamem_free(sc->hn_tx_rndis_dtag,
2140 txd->rndis_msg, txd->rndis_msg_dmap);
2142 bus_dmamap_destroy(sc->hn_tx_data_dtag, txd->data_dmap);
2145 if (sc->hn_tx_data_dtag != NULL)
2146 bus_dma_tag_destroy(sc->hn_tx_data_dtag);
2147 if (sc->hn_tx_rndis_dtag != NULL)
2148 bus_dma_tag_destroy(sc->hn_tx_rndis_dtag);
2149 free(sc->hn_txdesc, M_NETVSC);
2150 mtx_destroy(&sc->hn_txlist_spin);
2154 hn_start_taskfunc(void *xsc, int pending __unused)
2156 struct hn_softc *sc = xsc;
2159 hn_start_locked(sc->hn_ifp, 0);
2164 hn_txeof_taskfunc(void *xsc, int pending __unused)
2166 struct hn_softc *sc = xsc;
2167 struct ifnet *ifp = sc->hn_ifp;
2170 atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
2171 hn_start_locked(ifp, 0);
2176 hn_tx_taskq_create(void *arg __unused)
2178 if (!hn_share_tx_taskq)
2181 hn_tx_taskq = taskqueue_create_fast("hn_tx", M_WAITOK,
2182 taskqueue_thread_enqueue, &hn_tx_taskq);
2183 taskqueue_start_threads(&hn_tx_taskq, 1, PI_NET, "hn tx");
2185 SYSINIT(hn_txtq_create, SI_SUB_DRIVERS, SI_ORDER_FIRST,
2186 hn_tx_taskq_create, NULL);
2189 hn_tx_taskq_destroy(void *arg __unused)
2191 if (hn_tx_taskq != NULL)
2192 taskqueue_free(hn_tx_taskq);
2194 SYSUNINIT(hn_txtq_destroy, SI_SUB_DRIVERS, SI_ORDER_FIRST,
2195 hn_tx_taskq_destroy, NULL);
2197 static device_method_t netvsc_methods[] = {
2198 /* Device interface */
2199 DEVMETHOD(device_probe, netvsc_probe),
2200 DEVMETHOD(device_attach, netvsc_attach),
2201 DEVMETHOD(device_detach, netvsc_detach),
2202 DEVMETHOD(device_shutdown, netvsc_shutdown),
2207 static driver_t netvsc_driver = {
2213 static devclass_t netvsc_devclass;
2215 DRIVER_MODULE(hn, vmbus, netvsc_driver, netvsc_devclass, 0, 0);
2216 MODULE_VERSION(hn, 1);
2217 MODULE_DEPEND(hn, vmbus, 1, 1, 1);