2 * Copyright (c) 2010-2012 Citrix Inc.
3 * Copyright (c) 2009-2012 Microsoft Corp.
4 * Copyright (c) 2012 NetApp Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * Copyright (c) 2004-2006 Kip Macy
31 * All rights reserved.
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 #include <sys/cdefs.h>
56 __FBSDID("$FreeBSD$");
58 #include "opt_inet6.h"
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/sockio.h>
65 #include <sys/malloc.h>
66 #include <sys/module.h>
67 #include <sys/kernel.h>
68 #include <sys/socket.h>
69 #include <sys/queue.h>
72 #include <sys/sysctl.h>
73 #include <sys/buf_ring.h>
76 #include <net/if_arp.h>
77 #include <net/ethernet.h>
78 #include <net/if_dl.h>
79 #include <net/if_media.h>
83 #include <net/if_var.h>
84 #include <net/if_types.h>
85 #include <net/if_vlan_var.h>
87 #include <netinet/in_systm.h>
88 #include <netinet/in.h>
89 #include <netinet/ip.h>
90 #include <netinet/if_ether.h>
91 #include <netinet/tcp.h>
92 #include <netinet/udp.h>
93 #include <netinet/ip6.h>
96 #include <vm/vm_param.h>
97 #include <vm/vm_kern.h>
100 #include <machine/bus.h>
101 #include <machine/resource.h>
102 #include <machine/frame.h>
105 #include <sys/rman.h>
106 #include <sys/mutex.h>
107 #include <sys/errno.h>
108 #include <sys/types.h>
109 #include <machine/atomic.h>
111 #include <machine/intr_machdep.h>
113 #include <machine/in_cksum.h>
115 #include <dev/hyperv/include/hyperv.h>
116 #include "hv_net_vsc.h"
117 #include "hv_rndis.h"
118 #include "hv_rndis_filter.h"
120 #define hv_chan_rxr hv_chan_priv1
121 #define hv_chan_txr hv_chan_priv2
123 /* Short for Hyper-V network interface */
124 #define NETVSC_DEVNAME "hn"
127 * It looks like offset 0 of buf is reserved to hold the softc pointer.
128 * The sc pointer evidently not needed, and is not presently populated.
129 * The packet offset is where the netvsc_packet starts in the buffer.
131 #define HV_NV_SC_PTR_OFFSET_IN_BUF 0
132 #define HV_NV_PACKET_OFFSET_IN_BUF 16
134 /* YYY should get it from the underlying channel */
135 #define HN_TX_DESC_CNT 512
137 #define HN_LROENT_CNT_DEF 128
139 #define HN_RING_CNT_DEF_MAX 8
141 #define HN_RNDIS_MSG_LEN \
142 (sizeof(rndis_msg) + \
143 RNDIS_HASH_PPI_SIZE + \
144 RNDIS_VLAN_PPI_SIZE + \
145 RNDIS_TSO_PPI_SIZE + \
147 #define HN_RNDIS_MSG_BOUNDARY PAGE_SIZE
148 #define HN_RNDIS_MSG_ALIGN CACHE_LINE_SIZE
150 #define HN_TX_DATA_BOUNDARY PAGE_SIZE
151 #define HN_TX_DATA_MAXSIZE IP_MAXPACKET
152 #define HN_TX_DATA_SEGSIZE PAGE_SIZE
153 #define HN_TX_DATA_SEGCNT_MAX \
154 (NETVSC_PACKET_MAXPAGE - HV_RF_NUM_TX_RESERVED_PAGE_BUFS)
156 #define HN_DIRECT_TX_SIZE_DEF 128
159 #ifndef HN_USE_TXDESC_BUFRING
160 SLIST_ENTRY(hn_txdesc) link;
163 struct hn_tx_ring *txr;
165 uint32_t flags; /* HN_TXD_FLAG_ */
166 netvsc_packet netvsc_pkt; /* XXX to be removed */
168 bus_dmamap_t data_dmap;
170 bus_addr_t rndis_msg_paddr;
171 rndis_msg *rndis_msg;
172 bus_dmamap_t rndis_msg_dmap;
175 #define HN_TXD_FLAG_ONLIST 0x1
176 #define HN_TXD_FLAG_DMAMAP 0x2
179 * Only enable UDP checksum offloading when it is on 2012R2 or
180 * later. UDP checksum offloading doesn't work on earlier
183 #define HN_CSUM_ASSIST_WIN8 (CSUM_IP | CSUM_TCP)
184 #define HN_CSUM_ASSIST (CSUM_IP | CSUM_UDP | CSUM_TCP)
186 #define HN_LRO_LENLIM_MULTIRX_DEF (12 * ETHERMTU)
187 #define HN_LRO_LENLIM_DEF (25 * ETHERMTU)
188 /* YYY 2*MTU is a bit rough, but should be good enough. */
189 #define HN_LRO_LENLIM_MIN(ifp) (2 * (ifp)->if_mtu)
191 #define HN_LRO_ACKCNT_DEF 1
194 * Be aware that this sleepable mutex will exhibit WITNESS errors when
195 * certain TCP and ARP code paths are taken. This appears to be a
196 * well-known condition, as all other drivers checked use a sleeping
197 * mutex to protect their transmit paths.
198 * Also Be aware that mutexes do not play well with semaphores, and there
199 * is a conflicting semaphore in a certain channel code path.
201 #define NV_LOCK_INIT(_sc, _name) \
202 mtx_init(&(_sc)->hn_lock, _name, MTX_NETWORK_LOCK, MTX_DEF)
203 #define NV_LOCK(_sc) mtx_lock(&(_sc)->hn_lock)
204 #define NV_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->hn_lock, MA_OWNED)
205 #define NV_UNLOCK(_sc) mtx_unlock(&(_sc)->hn_lock)
206 #define NV_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->hn_lock)
213 int hv_promisc_mode = 0; /* normal mode by default */
215 SYSCTL_NODE(_hw, OID_AUTO, hn, CTLFLAG_RD, NULL, "Hyper-V network interface");
217 /* Trust tcp segements verification on host side. */
218 static int hn_trust_hosttcp = 1;
219 SYSCTL_INT(_hw_hn, OID_AUTO, trust_hosttcp, CTLFLAG_RDTUN,
220 &hn_trust_hosttcp, 0,
221 "Trust tcp segement verification on host side, "
222 "when csum info is missing (global setting)");
224 /* Trust udp datagrams verification on host side. */
225 static int hn_trust_hostudp = 1;
226 SYSCTL_INT(_hw_hn, OID_AUTO, trust_hostudp, CTLFLAG_RDTUN,
227 &hn_trust_hostudp, 0,
228 "Trust udp datagram verification on host side, "
229 "when csum info is missing (global setting)");
231 /* Trust ip packets verification on host side. */
232 static int hn_trust_hostip = 1;
233 SYSCTL_INT(_hw_hn, OID_AUTO, trust_hostip, CTLFLAG_RDTUN,
235 "Trust ip packet verification on host side, "
236 "when csum info is missing (global setting)");
238 #if __FreeBSD_version >= 1100045
239 /* Limit TSO burst size */
240 static int hn_tso_maxlen = 0;
241 SYSCTL_INT(_hw_hn, OID_AUTO, tso_maxlen, CTLFLAG_RDTUN,
242 &hn_tso_maxlen, 0, "TSO burst limit");
245 /* Limit chimney send size */
246 static int hn_tx_chimney_size = 0;
247 SYSCTL_INT(_hw_hn, OID_AUTO, tx_chimney_size, CTLFLAG_RDTUN,
248 &hn_tx_chimney_size, 0, "Chimney send packet size limit");
250 /* Limit the size of packet for direct transmission */
251 static int hn_direct_tx_size = HN_DIRECT_TX_SIZE_DEF;
252 SYSCTL_INT(_hw_hn, OID_AUTO, direct_tx_size, CTLFLAG_RDTUN,
253 &hn_direct_tx_size, 0, "Size of the packet for direct transmission");
255 #if defined(INET) || defined(INET6)
256 #if __FreeBSD_version >= 1100095
257 static int hn_lro_entry_count = HN_LROENT_CNT_DEF;
258 SYSCTL_INT(_hw_hn, OID_AUTO, lro_entry_count, CTLFLAG_RDTUN,
259 &hn_lro_entry_count, 0, "LRO entry count");
263 static int hn_share_tx_taskq = 0;
264 SYSCTL_INT(_hw_hn, OID_AUTO, share_tx_taskq, CTLFLAG_RDTUN,
265 &hn_share_tx_taskq, 0, "Enable shared TX taskqueue");
267 static struct taskqueue *hn_tx_taskq;
269 #ifndef HN_USE_TXDESC_BUFRING
270 static int hn_use_txdesc_bufring = 0;
272 static int hn_use_txdesc_bufring = 1;
274 SYSCTL_INT(_hw_hn, OID_AUTO, use_txdesc_bufring, CTLFLAG_RD,
275 &hn_use_txdesc_bufring, 0, "Use buf_ring for TX descriptors");
277 static int hn_bind_tx_taskq = -1;
278 SYSCTL_INT(_hw_hn, OID_AUTO, bind_tx_taskq, CTLFLAG_RDTUN,
279 &hn_bind_tx_taskq, 0, "Bind TX taskqueue to the specified cpu");
281 static int hn_use_if_start = 0;
282 SYSCTL_INT(_hw_hn, OID_AUTO, use_if_start, CTLFLAG_RDTUN,
283 &hn_use_if_start, 0, "Use if_start TX method");
285 static int hn_chan_cnt = 1;
286 SYSCTL_INT(_hw_hn, OID_AUTO, chan_cnt, CTLFLAG_RDTUN,
288 "# of channels to use; each channel has one RX ring and one TX ring");
290 static int hn_tx_ring_cnt = 1;
291 SYSCTL_INT(_hw_hn, OID_AUTO, tx_ring_cnt, CTLFLAG_RDTUN,
292 &hn_tx_ring_cnt, 0, "# of TX rings to use");
294 static u_int hn_cpu_index;
297 * Forward declarations
299 static void hn_stop(hn_softc_t *sc);
300 static void hn_ifinit_locked(hn_softc_t *sc);
301 static void hn_ifinit(void *xsc);
302 static int hn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
303 static int hn_start_locked(struct hn_tx_ring *txr, int len);
304 static void hn_start(struct ifnet *ifp);
305 static void hn_start_txeof(struct hn_tx_ring *);
306 static int hn_ifmedia_upd(struct ifnet *ifp);
307 static void hn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
308 #if __FreeBSD_version >= 1100099
309 static int hn_lro_lenlim_sysctl(SYSCTL_HANDLER_ARGS);
310 static int hn_lro_ackcnt_sysctl(SYSCTL_HANDLER_ARGS);
312 static int hn_trust_hcsum_sysctl(SYSCTL_HANDLER_ARGS);
313 static int hn_tx_chimney_size_sysctl(SYSCTL_HANDLER_ARGS);
314 static int hn_rx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS);
315 static int hn_rx_stat_u64_sysctl(SYSCTL_HANDLER_ARGS);
316 static int hn_tx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS);
317 static int hn_tx_conf_int_sysctl(SYSCTL_HANDLER_ARGS);
318 static int hn_check_iplen(const struct mbuf *, int);
319 static int hn_create_tx_ring(struct hn_softc *, int);
320 static void hn_destroy_tx_ring(struct hn_tx_ring *);
321 static int hn_create_tx_data(struct hn_softc *, int);
322 static void hn_destroy_tx_data(struct hn_softc *);
323 static void hn_start_taskfunc(void *, int);
324 static void hn_start_txeof_taskfunc(void *, int);
325 static void hn_stop_tx_tasks(struct hn_softc *);
326 static int hn_encap(struct hn_tx_ring *, struct hn_txdesc *, struct mbuf **);
327 static void hn_create_rx_data(struct hn_softc *sc, int);
328 static void hn_destroy_rx_data(struct hn_softc *sc);
329 static void hn_set_tx_chimney_size(struct hn_softc *, int);
330 static void hn_channel_attach(struct hn_softc *, struct hv_vmbus_channel *);
332 static int hn_transmit(struct ifnet *, struct mbuf *);
333 static void hn_xmit_qflush(struct ifnet *);
334 static int hn_xmit(struct hn_tx_ring *, int);
335 static void hn_xmit_txeof(struct hn_tx_ring *);
336 static void hn_xmit_taskfunc(void *, int);
337 static void hn_xmit_txeof_taskfunc(void *, int);
339 #if __FreeBSD_version >= 1100099
341 hn_set_lro_lenlim(struct hn_softc *sc, int lenlim)
345 for (i = 0; i < sc->hn_rx_ring_inuse; ++i)
346 sc->hn_rx_ring[i].hn_lro.lro_length_lim = lenlim;
351 hn_ifmedia_upd(struct ifnet *ifp __unused)
358 hn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
360 struct hn_softc *sc = ifp->if_softc;
362 ifmr->ifm_status = IFM_AVALID;
363 ifmr->ifm_active = IFM_ETHER;
365 if (!sc->hn_carrier) {
366 ifmr->ifm_active |= IFM_NONE;
369 ifmr->ifm_status |= IFM_ACTIVE;
370 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
373 /* {F8615163-DF3E-46c5-913F-F2D2F965ED0E} */
374 static const hv_guid g_net_vsc_device_type = {
375 .data = {0x63, 0x51, 0x61, 0xF8, 0x3E, 0xDF, 0xc5, 0x46,
376 0x91, 0x3F, 0xF2, 0xD2, 0xF9, 0x65, 0xED, 0x0E}
380 * Standard probe entry point.
384 netvsc_probe(device_t dev)
388 p = vmbus_get_type(dev);
389 if (!memcmp(p, &g_net_vsc_device_type.data, sizeof(hv_guid))) {
390 device_set_desc(dev, "Synthetic Network Interface");
392 printf("Netvsc probe... DONE \n");
394 return (BUS_PROBE_DEFAULT);
401 * Standard attach entry point.
403 * Called when the driver is loaded. It allocates needed resources,
404 * and initializes the "hardware" and software.
407 netvsc_attach(device_t dev)
409 struct hv_device *device_ctx = vmbus_get_devctx(dev);
410 struct hv_vmbus_channel *chan;
411 netvsc_device_info device_info;
413 int unit = device_get_unit(dev);
414 struct ifnet *ifp = NULL;
415 int error, ring_cnt, tx_ring_cnt;
416 #if __FreeBSD_version >= 1100045
420 sc = device_get_softc(dev);
425 bzero(sc, sizeof(hn_softc_t));
429 if (hn_tx_taskq == NULL) {
430 sc->hn_tx_taskq = taskqueue_create("hn_tx", M_WAITOK,
431 taskqueue_thread_enqueue, &sc->hn_tx_taskq);
432 if (hn_bind_tx_taskq >= 0) {
433 int cpu = hn_bind_tx_taskq;
436 if (cpu > mp_ncpus - 1)
438 CPU_SETOF(cpu, &cpu_set);
439 taskqueue_start_threads_cpuset(&sc->hn_tx_taskq, 1,
440 PI_NET, &cpu_set, "%s tx",
441 device_get_nameunit(dev));
443 taskqueue_start_threads(&sc->hn_tx_taskq, 1, PI_NET,
444 "%s tx", device_get_nameunit(dev));
447 sc->hn_tx_taskq = hn_tx_taskq;
449 NV_LOCK_INIT(sc, "NetVSCLock");
451 sc->hn_dev_obj = device_ctx;
453 ifp = sc->hn_ifp = if_alloc(IFT_ETHER);
455 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
458 * Figure out the # of RX rings (ring_cnt) and the # of TX rings
459 * to use (tx_ring_cnt).
462 * The # of RX rings to use is same as the # of channels to use.
464 ring_cnt = hn_chan_cnt;
468 if (ring_cnt > HN_RING_CNT_DEF_MAX)
469 ring_cnt = HN_RING_CNT_DEF_MAX;
470 } else if (ring_cnt > mp_ncpus) {
474 tx_ring_cnt = hn_tx_ring_cnt;
475 if (tx_ring_cnt <= 0 || tx_ring_cnt > ring_cnt)
476 tx_ring_cnt = ring_cnt;
477 if (hn_use_if_start) {
478 /* ifnet.if_start only needs one TX ring. */
483 * Set the leader CPU for channels.
485 sc->hn_cpu = atomic_fetchadd_int(&hn_cpu_index, ring_cnt) % mp_ncpus;
487 error = hn_create_tx_data(sc, tx_ring_cnt);
490 hn_create_rx_data(sc, ring_cnt);
493 * Associate the first TX/RX ring w/ the primary channel.
495 chan = device_ctx->channel;
496 KASSERT(HV_VMBUS_CHAN_ISPRIMARY(chan), ("not primary channel"));
497 KASSERT(chan->offer_msg.offer.sub_channel_index == 0,
498 ("primary channel subidx %u",
499 chan->offer_msg.offer.sub_channel_index));
500 hn_channel_attach(sc, chan);
502 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
503 ifp->if_ioctl = hn_ioctl;
504 ifp->if_init = hn_ifinit;
505 /* needed by hv_rf_on_device_add() code */
506 ifp->if_mtu = ETHERMTU;
507 if (hn_use_if_start) {
508 ifp->if_start = hn_start;
509 IFQ_SET_MAXLEN(&ifp->if_snd, 512);
510 ifp->if_snd.ifq_drv_maxlen = 511;
511 IFQ_SET_READY(&ifp->if_snd);
513 ifp->if_transmit = hn_transmit;
514 ifp->if_qflush = hn_xmit_qflush;
517 ifmedia_init(&sc->hn_media, 0, hn_ifmedia_upd, hn_ifmedia_sts);
518 ifmedia_add(&sc->hn_media, IFM_ETHER | IFM_AUTO, 0, NULL);
519 ifmedia_set(&sc->hn_media, IFM_ETHER | IFM_AUTO);
520 /* XXX ifmedia_set really should do this for us */
521 sc->hn_media.ifm_media = sc->hn_media.ifm_cur->ifm_media;
524 * Tell upper layers that we support full VLAN capability.
526 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
527 ifp->if_capabilities |=
528 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_TSO |
531 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_TSO |
533 ifp->if_hwassist = sc->hn_tx_ring[0].hn_csum_assist | CSUM_TSO;
535 error = hv_rf_on_device_add(device_ctx, &device_info, ring_cnt);
538 KASSERT(sc->net_dev->num_channel > 0 &&
539 sc->net_dev->num_channel <= sc->hn_rx_ring_inuse,
540 ("invalid channel count %u, should be less than %d",
541 sc->net_dev->num_channel, sc->hn_rx_ring_inuse));
544 * Set the # of TX/RX rings that could be used according to
545 * the # of channels that host offered.
547 if (sc->hn_tx_ring_inuse > sc->net_dev->num_channel)
548 sc->hn_tx_ring_inuse = sc->net_dev->num_channel;
549 sc->hn_rx_ring_inuse = sc->net_dev->num_channel;
550 device_printf(dev, "%d TX ring, %d RX ring\n",
551 sc->hn_tx_ring_inuse, sc->hn_rx_ring_inuse);
553 #if __FreeBSD_version >= 1100099
554 if (sc->hn_rx_ring_inuse > 1) {
556 * Reduce TCP segment aggregation limit for multiple
557 * RX rings to increase ACK timeliness.
559 hn_set_lro_lenlim(sc, HN_LRO_LENLIM_MULTIRX_DEF);
563 if (device_info.link_state == 0) {
567 #if __FreeBSD_version >= 1100045
568 tso_maxlen = hn_tso_maxlen;
569 if (tso_maxlen <= 0 || tso_maxlen > IP_MAXPACKET)
570 tso_maxlen = IP_MAXPACKET;
572 ifp->if_hw_tsomaxsegcount = HN_TX_DATA_SEGCNT_MAX;
573 ifp->if_hw_tsomaxsegsize = PAGE_SIZE;
574 ifp->if_hw_tsomax = tso_maxlen -
575 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
578 ether_ifattach(ifp, device_info.mac_addr);
580 #if __FreeBSD_version >= 1100045
581 if_printf(ifp, "TSO: %u/%u/%u\n", ifp->if_hw_tsomax,
582 ifp->if_hw_tsomaxsegcount, ifp->if_hw_tsomaxsegsize);
585 sc->hn_tx_chimney_max = sc->net_dev->send_section_size;
586 hn_set_tx_chimney_size(sc, sc->hn_tx_chimney_max);
587 if (hn_tx_chimney_size > 0 &&
588 hn_tx_chimney_size < sc->hn_tx_chimney_max)
589 hn_set_tx_chimney_size(sc, hn_tx_chimney_size);
593 hn_destroy_tx_data(sc);
600 * Standard detach entry point
603 netvsc_detach(device_t dev)
605 struct hn_softc *sc = device_get_softc(dev);
606 struct hv_device *hv_device = vmbus_get_devctx(dev);
609 printf("netvsc_detach\n");
612 * XXXKYS: Need to clean up all our
613 * driver state; this is the driver
618 * XXXKYS: Need to stop outgoing traffic and unregister
622 hv_rf_on_device_remove(hv_device, HV_RF_NV_DESTROY_CHANNEL);
624 hn_stop_tx_tasks(sc);
626 ifmedia_removeall(&sc->hn_media);
627 hn_destroy_rx_data(sc);
628 hn_destroy_tx_data(sc);
630 if (sc->hn_tx_taskq != hn_tx_taskq)
631 taskqueue_free(sc->hn_tx_taskq);
637 * Standard shutdown entry point
640 netvsc_shutdown(device_t dev)
646 hn_txdesc_dmamap_load(struct hn_tx_ring *txr, struct hn_txdesc *txd,
647 struct mbuf **m_head, bus_dma_segment_t *segs, int *nsegs)
649 struct mbuf *m = *m_head;
652 error = bus_dmamap_load_mbuf_sg(txr->hn_tx_data_dtag, txd->data_dmap,
653 m, segs, nsegs, BUS_DMA_NOWAIT);
654 if (error == EFBIG) {
657 m_new = m_collapse(m, M_NOWAIT, HN_TX_DATA_SEGCNT_MAX);
662 txr->hn_tx_collapsed++;
664 error = bus_dmamap_load_mbuf_sg(txr->hn_tx_data_dtag,
665 txd->data_dmap, m, segs, nsegs, BUS_DMA_NOWAIT);
668 bus_dmamap_sync(txr->hn_tx_data_dtag, txd->data_dmap,
669 BUS_DMASYNC_PREWRITE);
670 txd->flags |= HN_TXD_FLAG_DMAMAP;
676 hn_txdesc_dmamap_unload(struct hn_tx_ring *txr, struct hn_txdesc *txd)
679 if (txd->flags & HN_TXD_FLAG_DMAMAP) {
680 bus_dmamap_sync(txr->hn_tx_data_dtag,
681 txd->data_dmap, BUS_DMASYNC_POSTWRITE);
682 bus_dmamap_unload(txr->hn_tx_data_dtag,
684 txd->flags &= ~HN_TXD_FLAG_DMAMAP;
689 hn_txdesc_put(struct hn_tx_ring *txr, struct hn_txdesc *txd)
692 KASSERT((txd->flags & HN_TXD_FLAG_ONLIST) == 0,
693 ("put an onlist txd %#x", txd->flags));
695 KASSERT(txd->refs > 0, ("invalid txd refs %d", txd->refs));
696 if (atomic_fetchadd_int(&txd->refs, -1) != 1)
699 hn_txdesc_dmamap_unload(txr, txd);
700 if (txd->m != NULL) {
705 txd->flags |= HN_TXD_FLAG_ONLIST;
707 #ifndef HN_USE_TXDESC_BUFRING
708 mtx_lock_spin(&txr->hn_txlist_spin);
709 KASSERT(txr->hn_txdesc_avail >= 0 &&
710 txr->hn_txdesc_avail < txr->hn_txdesc_cnt,
711 ("txdesc_put: invalid txd avail %d", txr->hn_txdesc_avail));
712 txr->hn_txdesc_avail++;
713 SLIST_INSERT_HEAD(&txr->hn_txlist, txd, link);
714 mtx_unlock_spin(&txr->hn_txlist_spin);
716 atomic_add_int(&txr->hn_txdesc_avail, 1);
717 buf_ring_enqueue(txr->hn_txdesc_br, txd);
723 static __inline struct hn_txdesc *
724 hn_txdesc_get(struct hn_tx_ring *txr)
726 struct hn_txdesc *txd;
728 #ifndef HN_USE_TXDESC_BUFRING
729 mtx_lock_spin(&txr->hn_txlist_spin);
730 txd = SLIST_FIRST(&txr->hn_txlist);
732 KASSERT(txr->hn_txdesc_avail > 0,
733 ("txdesc_get: invalid txd avail %d", txr->hn_txdesc_avail));
734 txr->hn_txdesc_avail--;
735 SLIST_REMOVE_HEAD(&txr->hn_txlist, link);
737 mtx_unlock_spin(&txr->hn_txlist_spin);
739 txd = buf_ring_dequeue_sc(txr->hn_txdesc_br);
743 #ifdef HN_USE_TXDESC_BUFRING
744 atomic_subtract_int(&txr->hn_txdesc_avail, 1);
746 KASSERT(txd->m == NULL && txd->refs == 0 &&
747 (txd->flags & HN_TXD_FLAG_ONLIST), ("invalid txd"));
748 txd->flags &= ~HN_TXD_FLAG_ONLIST;
755 hn_txdesc_hold(struct hn_txdesc *txd)
758 /* 0->1 transition will never work */
759 KASSERT(txd->refs > 0, ("invalid refs %d", txd->refs));
760 atomic_add_int(&txd->refs, 1);
764 hn_tx_done(struct hv_vmbus_channel *chan, void *xpkt)
766 netvsc_packet *packet = xpkt;
767 struct hn_txdesc *txd;
768 struct hn_tx_ring *txr;
770 txd = (struct hn_txdesc *)(uintptr_t)
771 packet->compl.send.send_completion_tid;
774 KASSERT(txr->hn_chan == chan,
775 ("channel mismatch, on channel%u, should be channel%u",
776 chan->offer_msg.offer.sub_channel_index,
777 txr->hn_chan->offer_msg.offer.sub_channel_index));
779 txr->hn_has_txeof = 1;
780 hn_txdesc_put(txr, txd);
784 netvsc_channel_rollup(struct hv_vmbus_channel *chan)
786 struct hn_tx_ring *txr = chan->hv_chan_txr;
787 #if defined(INET) || defined(INET6)
788 struct hn_rx_ring *rxr = chan->hv_chan_rxr;
790 tcp_lro_flush_all(&rxr->hn_lro);
795 * 'txr' could be NULL, if multiple channels and
796 * ifnet.if_start method are enabled.
798 if (txr == NULL || !txr->hn_has_txeof)
801 txr->hn_has_txeof = 0;
807 * If this function fails, then both txd and m_head0 will be freed.
810 hn_encap(struct hn_tx_ring *txr, struct hn_txdesc *txd, struct mbuf **m_head0)
812 bus_dma_segment_t segs[HN_TX_DATA_SEGCNT_MAX];
814 struct mbuf *m_head = *m_head0;
815 netvsc_packet *packet;
816 rndis_msg *rndis_mesg;
817 rndis_packet *rndis_pkt;
818 rndis_per_packet_info *rppi;
819 struct ndis_hash_info *hash_info;
820 uint32_t rndis_msg_size;
822 packet = &txd->netvsc_pkt;
823 packet->is_data_pkt = TRUE;
824 packet->tot_data_buf_len = m_head->m_pkthdr.len;
827 * extension points to the area reserved for the
828 * rndis_filter_packet, which is placed just after
829 * the netvsc_packet (and rppi struct, if present;
830 * length is updated later).
832 rndis_mesg = txd->rndis_msg;
833 /* XXX not necessary */
834 memset(rndis_mesg, 0, HN_RNDIS_MSG_LEN);
835 rndis_mesg->ndis_msg_type = REMOTE_NDIS_PACKET_MSG;
837 rndis_pkt = &rndis_mesg->msg.packet;
838 rndis_pkt->data_offset = sizeof(rndis_packet);
839 rndis_pkt->data_length = packet->tot_data_buf_len;
840 rndis_pkt->per_pkt_info_offset = sizeof(rndis_packet);
842 rndis_msg_size = RNDIS_MESSAGE_SIZE(rndis_packet);
845 * Set the hash info for this packet, so that the host could
846 * dispatch the TX done event for this packet back to this TX
849 rndis_msg_size += RNDIS_HASH_PPI_SIZE;
850 rppi = hv_set_rppi_data(rndis_mesg, RNDIS_HASH_PPI_SIZE,
852 hash_info = (struct ndis_hash_info *)((uint8_t *)rppi +
853 rppi->per_packet_info_offset);
854 hash_info->hash = txr->hn_tx_idx;
856 if (m_head->m_flags & M_VLANTAG) {
857 ndis_8021q_info *rppi_vlan_info;
859 rndis_msg_size += RNDIS_VLAN_PPI_SIZE;
860 rppi = hv_set_rppi_data(rndis_mesg, RNDIS_VLAN_PPI_SIZE,
863 rppi_vlan_info = (ndis_8021q_info *)((uint8_t *)rppi +
864 rppi->per_packet_info_offset);
865 rppi_vlan_info->u1.s1.vlan_id =
866 m_head->m_pkthdr.ether_vtag & 0xfff;
869 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
870 rndis_tcp_tso_info *tso_info;
871 struct ether_vlan_header *eh;
875 * XXX need m_pullup and use mtodo
877 eh = mtod(m_head, struct ether_vlan_header*);
878 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
879 ether_len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
881 ether_len = ETHER_HDR_LEN;
883 rndis_msg_size += RNDIS_TSO_PPI_SIZE;
884 rppi = hv_set_rppi_data(rndis_mesg, RNDIS_TSO_PPI_SIZE,
885 tcp_large_send_info);
887 tso_info = (rndis_tcp_tso_info *)((uint8_t *)rppi +
888 rppi->per_packet_info_offset);
889 tso_info->lso_v2_xmit.type =
890 RNDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
893 if (m_head->m_pkthdr.csum_flags & CSUM_IP_TSO) {
895 (struct ip *)(m_head->m_data + ether_len);
896 unsigned long iph_len = ip->ip_hl << 2;
898 (struct tcphdr *)((caddr_t)ip + iph_len);
900 tso_info->lso_v2_xmit.ip_version =
901 RNDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
905 th->th_sum = in_pseudo(ip->ip_src.s_addr,
906 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
909 #if defined(INET6) && defined(INET)
914 struct ip6_hdr *ip6 = (struct ip6_hdr *)
915 (m_head->m_data + ether_len);
916 struct tcphdr *th = (struct tcphdr *)(ip6 + 1);
918 tso_info->lso_v2_xmit.ip_version =
919 RNDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
921 th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
924 tso_info->lso_v2_xmit.tcp_header_offset = 0;
925 tso_info->lso_v2_xmit.mss = m_head->m_pkthdr.tso_segsz;
926 } else if (m_head->m_pkthdr.csum_flags & txr->hn_csum_assist) {
927 rndis_tcp_ip_csum_info *csum_info;
929 rndis_msg_size += RNDIS_CSUM_PPI_SIZE;
930 rppi = hv_set_rppi_data(rndis_mesg, RNDIS_CSUM_PPI_SIZE,
932 csum_info = (rndis_tcp_ip_csum_info *)((uint8_t *)rppi +
933 rppi->per_packet_info_offset);
935 csum_info->xmit.is_ipv4 = 1;
936 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
937 csum_info->xmit.ip_header_csum = 1;
939 if (m_head->m_pkthdr.csum_flags & CSUM_TCP) {
940 csum_info->xmit.tcp_csum = 1;
941 csum_info->xmit.tcp_header_offset = 0;
942 } else if (m_head->m_pkthdr.csum_flags & CSUM_UDP) {
943 csum_info->xmit.udp_csum = 1;
947 rndis_mesg->msg_len = packet->tot_data_buf_len + rndis_msg_size;
948 packet->tot_data_buf_len = rndis_mesg->msg_len;
951 * Chimney send, if the packet could fit into one chimney buffer.
953 * TODO: vRSS, chimney buffer should be per-channel.
955 if (packet->tot_data_buf_len < txr->hn_tx_chimney_size) {
956 netvsc_dev *net_dev = txr->hn_sc->net_dev;
957 uint32_t send_buf_section_idx;
959 send_buf_section_idx =
960 hv_nv_get_next_send_section(net_dev);
961 if (send_buf_section_idx !=
962 NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX) {
963 uint8_t *dest = ((uint8_t *)net_dev->send_buf +
964 (send_buf_section_idx *
965 net_dev->send_section_size));
967 memcpy(dest, rndis_mesg, rndis_msg_size);
968 dest += rndis_msg_size;
969 m_copydata(m_head, 0, m_head->m_pkthdr.len, dest);
971 packet->send_buf_section_idx = send_buf_section_idx;
972 packet->send_buf_section_size =
973 packet->tot_data_buf_len;
974 packet->page_buf_count = 0;
975 txr->hn_tx_chimney++;
980 error = hn_txdesc_dmamap_load(txr, txd, &m_head, segs, &nsegs);
985 * This mbuf is not linked w/ the txd yet, so free it now.
990 freed = hn_txdesc_put(txr, txd);
992 ("fail to free txd upon txdma error"));
994 txr->hn_txdma_failed++;
995 if_inc_counter(txr->hn_sc->hn_ifp, IFCOUNTER_OERRORS, 1);
1000 packet->page_buf_count = nsegs + HV_RF_NUM_TX_RESERVED_PAGE_BUFS;
1002 /* send packet with page buffer */
1003 packet->page_buffers[0].pfn = atop(txd->rndis_msg_paddr);
1004 packet->page_buffers[0].offset = txd->rndis_msg_paddr & PAGE_MASK;
1005 packet->page_buffers[0].length = rndis_msg_size;
1008 * Fill the page buffers with mbuf info starting at index
1009 * HV_RF_NUM_TX_RESERVED_PAGE_BUFS.
1011 for (i = 0; i < nsegs; ++i) {
1012 hv_vmbus_page_buffer *pb = &packet->page_buffers[
1013 i + HV_RF_NUM_TX_RESERVED_PAGE_BUFS];
1015 pb->pfn = atop(segs[i].ds_addr);
1016 pb->offset = segs[i].ds_addr & PAGE_MASK;
1017 pb->length = segs[i].ds_len;
1020 packet->send_buf_section_idx =
1021 NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX;
1022 packet->send_buf_section_size = 0;
1026 /* Set the completion routine */
1027 packet->compl.send.on_send_completion = hn_tx_done;
1028 packet->compl.send.send_completion_context = packet;
1029 packet->compl.send.send_completion_tid = (uint64_t)(uintptr_t)txd;
1036 * If this function fails, then txd will be freed, but the mbuf
1037 * associated w/ the txd will _not_ be freed.
1040 hn_send_pkt(struct ifnet *ifp, struct hn_tx_ring *txr, struct hn_txdesc *txd)
1042 int error, send_failed = 0;
1046 * Make sure that txd is not freed before ETHER_BPF_MTAP.
1048 hn_txdesc_hold(txd);
1049 error = hv_nv_on_send(txr->hn_chan, &txd->netvsc_pkt);
1051 ETHER_BPF_MTAP(ifp, txd->m);
1052 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1053 if (!hn_use_if_start) {
1054 if_inc_counter(ifp, IFCOUNTER_OBYTES,
1055 txd->m->m_pkthdr.len);
1056 if (txd->m->m_flags & M_MCAST)
1057 if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
1061 hn_txdesc_put(txr, txd);
1063 if (__predict_false(error)) {
1067 * This should "really rarely" happen.
1069 * XXX Too many RX to be acked or too many sideband
1070 * commands to run? Ask netvsc_channel_rollup()
1071 * to kick start later.
1073 txr->hn_has_txeof = 1;
1075 txr->hn_send_failed++;
1078 * Try sending again after set hn_has_txeof;
1079 * in case that we missed the last
1080 * netvsc_channel_rollup().
1084 if_printf(ifp, "send failed\n");
1087 * Caller will perform further processing on the
1088 * associated mbuf, so don't free it in hn_txdesc_put();
1089 * only unload it from the DMA map in hn_txdesc_put(),
1093 freed = hn_txdesc_put(txr, txd);
1095 ("fail to free txd upon send error"));
1097 txr->hn_send_failed++;
1103 * Start a transmit of one or more packets
1106 hn_start_locked(struct hn_tx_ring *txr, int len)
1108 struct hn_softc *sc = txr->hn_sc;
1109 struct ifnet *ifp = sc->hn_ifp;
1111 KASSERT(hn_use_if_start,
1112 ("hn_start_locked is called, when if_start is disabled"));
1113 KASSERT(txr == &sc->hn_tx_ring[0], ("not the first TX ring"));
1114 mtx_assert(&txr->hn_tx_lock, MA_OWNED);
1116 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1120 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
1121 struct hn_txdesc *txd;
1122 struct mbuf *m_head;
1125 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1129 if (len > 0 && m_head->m_pkthdr.len > len) {
1131 * This sending could be time consuming; let callers
1132 * dispatch this packet sending (and sending of any
1133 * following up packets) to tx taskqueue.
1135 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1139 txd = hn_txdesc_get(txr);
1141 txr->hn_no_txdescs++;
1142 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1143 atomic_set_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
1147 error = hn_encap(txr, txd, &m_head);
1149 /* Both txd and m_head are freed */
1153 error = hn_send_pkt(ifp, txr, txd);
1154 if (__predict_false(error)) {
1155 /* txd is freed, but m_head is not */
1156 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1157 atomic_set_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
1165 * Link up/down notification
1168 netvsc_linkstatus_callback(struct hv_device *device_obj, uint32_t status)
1170 hn_softc_t *sc = device_get_softc(device_obj->device);
1184 * Append the specified data to the indicated mbuf chain,
1185 * Extend the mbuf chain if the new data does not fit in
1188 * This is a minor rewrite of m_append() from sys/kern/uipc_mbuf.c.
1189 * There should be an equivalent in the kernel mbuf code,
1190 * but there does not appear to be one yet.
1192 * Differs from m_append() in that additional mbufs are
1193 * allocated with cluster size MJUMPAGESIZE, and filled
1196 * Return 1 if able to complete the job; otherwise 0.
1199 hv_m_append(struct mbuf *m0, int len, c_caddr_t cp)
1202 int remainder, space;
1204 for (m = m0; m->m_next != NULL; m = m->m_next)
1207 space = M_TRAILINGSPACE(m);
1210 * Copy into available space.
1212 if (space > remainder)
1214 bcopy(cp, mtod(m, caddr_t) + m->m_len, space);
1219 while (remainder > 0) {
1221 * Allocate a new mbuf; could check space
1222 * and allocate a cluster instead.
1224 n = m_getjcl(M_NOWAIT, m->m_type, 0, MJUMPAGESIZE);
1227 n->m_len = min(MJUMPAGESIZE, remainder);
1228 bcopy(cp, mtod(n, caddr_t), n->m_len);
1230 remainder -= n->m_len;
1234 if (m0->m_flags & M_PKTHDR)
1235 m0->m_pkthdr.len += len - remainder;
1237 return (remainder == 0);
1242 * Called when we receive a data packet from the "wire" on the
1245 * Note: This is no longer used as a callback
1248 netvsc_recv(struct hv_vmbus_channel *chan, netvsc_packet *packet,
1249 rndis_tcp_ip_csum_info *csum_info)
1251 struct hn_rx_ring *rxr = chan->hv_chan_rxr;
1252 struct ifnet *ifp = rxr->hn_ifp;
1254 int size, do_lro = 0, do_csum = 1;
1256 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1260 * Bail out if packet contains more data than configured MTU.
1262 if (packet->tot_data_buf_len > (ifp->if_mtu + ETHER_HDR_LEN)) {
1264 } else if (packet->tot_data_buf_len <= MHLEN) {
1265 m_new = m_gethdr(M_NOWAIT, MT_DATA);
1266 if (m_new == NULL) {
1267 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1270 memcpy(mtod(m_new, void *), packet->data,
1271 packet->tot_data_buf_len);
1272 m_new->m_pkthdr.len = m_new->m_len = packet->tot_data_buf_len;
1273 rxr->hn_small_pkts++;
1276 * Get an mbuf with a cluster. For packets 2K or less,
1277 * get a standard 2K cluster. For anything larger, get a
1278 * 4K cluster. Any buffers larger than 4K can cause problems
1279 * if looped around to the Hyper-V TX channel, so avoid them.
1282 if (packet->tot_data_buf_len > MCLBYTES) {
1284 size = MJUMPAGESIZE;
1287 m_new = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, size);
1288 if (m_new == NULL) {
1289 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1293 hv_m_append(m_new, packet->tot_data_buf_len, packet->data);
1295 m_new->m_pkthdr.rcvif = ifp;
1297 if (__predict_false((ifp->if_capenable & IFCAP_RXCSUM) == 0))
1300 /* receive side checksum offload */
1301 if (csum_info != NULL) {
1302 /* IP csum offload */
1303 if (csum_info->receive.ip_csum_succeeded && do_csum) {
1304 m_new->m_pkthdr.csum_flags |=
1305 (CSUM_IP_CHECKED | CSUM_IP_VALID);
1309 /* TCP/UDP csum offload */
1310 if ((csum_info->receive.tcp_csum_succeeded ||
1311 csum_info->receive.udp_csum_succeeded) && do_csum) {
1312 m_new->m_pkthdr.csum_flags |=
1313 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1314 m_new->m_pkthdr.csum_data = 0xffff;
1315 if (csum_info->receive.tcp_csum_succeeded)
1321 if (csum_info->receive.ip_csum_succeeded &&
1322 csum_info->receive.tcp_csum_succeeded)
1325 const struct ether_header *eh;
1330 if (m_new->m_len < hoff)
1332 eh = mtod(m_new, struct ether_header *);
1333 etype = ntohs(eh->ether_type);
1334 if (etype == ETHERTYPE_VLAN) {
1335 const struct ether_vlan_header *evl;
1337 hoff = sizeof(*evl);
1338 if (m_new->m_len < hoff)
1340 evl = mtod(m_new, struct ether_vlan_header *);
1341 etype = ntohs(evl->evl_proto);
1344 if (etype == ETHERTYPE_IP) {
1347 pr = hn_check_iplen(m_new, hoff);
1348 if (pr == IPPROTO_TCP) {
1350 (rxr->hn_trust_hcsum &
1351 HN_TRUST_HCSUM_TCP)) {
1352 rxr->hn_csum_trusted++;
1353 m_new->m_pkthdr.csum_flags |=
1354 (CSUM_IP_CHECKED | CSUM_IP_VALID |
1355 CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1356 m_new->m_pkthdr.csum_data = 0xffff;
1358 /* Rely on SW csum verification though... */
1360 } else if (pr == IPPROTO_UDP) {
1362 (rxr->hn_trust_hcsum &
1363 HN_TRUST_HCSUM_UDP)) {
1364 rxr->hn_csum_trusted++;
1365 m_new->m_pkthdr.csum_flags |=
1366 (CSUM_IP_CHECKED | CSUM_IP_VALID |
1367 CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1368 m_new->m_pkthdr.csum_data = 0xffff;
1370 } else if (pr != IPPROTO_DONE && do_csum &&
1371 (rxr->hn_trust_hcsum & HN_TRUST_HCSUM_IP)) {
1372 rxr->hn_csum_trusted++;
1373 m_new->m_pkthdr.csum_flags |=
1374 (CSUM_IP_CHECKED | CSUM_IP_VALID);
1379 if ((packet->vlan_tci != 0) &&
1380 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
1381 m_new->m_pkthdr.ether_vtag = packet->vlan_tci;
1382 m_new->m_flags |= M_VLANTAG;
1385 m_new->m_pkthdr.flowid = rxr->hn_rx_idx;
1386 M_HASHTYPE_SET(m_new, M_HASHTYPE_OPAQUE);
1389 * Note: Moved RX completion back to hv_nv_on_receive() so all
1390 * messages (not just data messages) will trigger a response.
1393 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1396 if ((ifp->if_capenable & IFCAP_LRO) && do_lro) {
1397 #if defined(INET) || defined(INET6)
1398 struct lro_ctrl *lro = &rxr->hn_lro;
1401 rxr->hn_lro_tried++;
1402 if (tcp_lro_rx(lro, m_new, 0) == 0) {
1410 /* We're not holding the lock here, so don't release it */
1411 (*ifp->if_input)(ifp, m_new);
1417 * Rules for using sc->temp_unusable:
1418 * 1. sc->temp_unusable can only be read or written while holding NV_LOCK()
1419 * 2. code reading sc->temp_unusable under NV_LOCK(), and finding
1420 * sc->temp_unusable set, must release NV_LOCK() and exit
1421 * 3. to retain exclusive control of the interface,
1422 * sc->temp_unusable must be set by code before releasing NV_LOCK()
1423 * 4. only code setting sc->temp_unusable can clear sc->temp_unusable
1424 * 5. code setting sc->temp_unusable must eventually clear sc->temp_unusable
1428 * Standard ioctl entry point. Called when the user wants to configure
1432 hn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1434 hn_softc_t *sc = ifp->if_softc;
1435 struct ifreq *ifr = (struct ifreq *)data;
1437 struct ifaddr *ifa = (struct ifaddr *)data;
1439 netvsc_device_info device_info;
1440 struct hv_device *hn_dev;
1441 int mask, error = 0;
1442 int retry_cnt = 500;
1448 if (ifa->ifa_addr->sa_family == AF_INET) {
1449 ifp->if_flags |= IFF_UP;
1450 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1452 arp_ifinit(ifp, ifa);
1455 error = ether_ioctl(ifp, cmd, data);
1458 hn_dev = vmbus_get_devctx(sc->hn_dev);
1460 /* Check MTU value change */
1461 if (ifp->if_mtu == ifr->ifr_mtu)
1464 if (ifr->ifr_mtu > NETVSC_MAX_CONFIGURABLE_MTU) {
1469 /* Obtain and record requested MTU */
1470 ifp->if_mtu = ifr->ifr_mtu;
1472 #if __FreeBSD_version >= 1100099
1474 * Make sure that LRO aggregation length limit is still
1475 * valid, after the MTU change.
1478 if (sc->hn_rx_ring[0].hn_lro.lro_length_lim <
1479 HN_LRO_LENLIM_MIN(ifp))
1480 hn_set_lro_lenlim(sc, HN_LRO_LENLIM_MIN(ifp));
1486 if (!sc->temp_unusable) {
1487 sc->temp_unusable = TRUE;
1491 if (retry_cnt > 0) {
1495 } while (retry_cnt > 0);
1497 if (retry_cnt == 0) {
1502 /* We must remove and add back the device to cause the new
1503 * MTU to take effect. This includes tearing down, but not
1504 * deleting the channel, then bringing it back up.
1506 error = hv_rf_on_device_remove(hn_dev, HV_RF_NV_RETAIN_CHANNEL);
1509 sc->temp_unusable = FALSE;
1513 error = hv_rf_on_device_add(hn_dev, &device_info,
1514 sc->hn_rx_ring_inuse);
1517 sc->temp_unusable = FALSE;
1522 sc->hn_tx_chimney_max = sc->net_dev->send_section_size;
1523 if (sc->hn_tx_ring[0].hn_tx_chimney_size >
1524 sc->hn_tx_chimney_max)
1525 hn_set_tx_chimney_size(sc, sc->hn_tx_chimney_max);
1527 hn_ifinit_locked(sc);
1530 sc->temp_unusable = FALSE;
1536 if (!sc->temp_unusable) {
1537 sc->temp_unusable = TRUE;
1541 if (retry_cnt > 0) {
1545 } while (retry_cnt > 0);
1547 if (retry_cnt == 0) {
1552 if (ifp->if_flags & IFF_UP) {
1554 * If only the state of the PROMISC flag changed,
1555 * then just use the 'set promisc mode' command
1556 * instead of reinitializing the entire NIC. Doing
1557 * a full re-init means reloading the firmware and
1558 * waiting for it to start up, which may take a
1562 /* Fixme: Promiscuous mode? */
1563 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1564 ifp->if_flags & IFF_PROMISC &&
1565 !(sc->hn_if_flags & IFF_PROMISC)) {
1566 /* do something here for Hyper-V */
1567 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1568 !(ifp->if_flags & IFF_PROMISC) &&
1569 sc->hn_if_flags & IFF_PROMISC) {
1570 /* do something here for Hyper-V */
1573 hn_ifinit_locked(sc);
1575 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1580 sc->temp_unusable = FALSE;
1582 sc->hn_if_flags = ifp->if_flags;
1588 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1589 if (mask & IFCAP_TXCSUM) {
1590 ifp->if_capenable ^= IFCAP_TXCSUM;
1591 if (ifp->if_capenable & IFCAP_TXCSUM) {
1593 sc->hn_tx_ring[0].hn_csum_assist;
1596 ~sc->hn_tx_ring[0].hn_csum_assist;
1600 if (mask & IFCAP_RXCSUM)
1601 ifp->if_capenable ^= IFCAP_RXCSUM;
1603 if (mask & IFCAP_LRO)
1604 ifp->if_capenable ^= IFCAP_LRO;
1606 if (mask & IFCAP_TSO4) {
1607 ifp->if_capenable ^= IFCAP_TSO4;
1608 if (ifp->if_capenable & IFCAP_TSO4)
1609 ifp->if_hwassist |= CSUM_IP_TSO;
1611 ifp->if_hwassist &= ~CSUM_IP_TSO;
1614 if (mask & IFCAP_TSO6) {
1615 ifp->if_capenable ^= IFCAP_TSO6;
1616 if (ifp->if_capenable & IFCAP_TSO6)
1617 ifp->if_hwassist |= CSUM_IP6_TSO;
1619 ifp->if_hwassist &= ~CSUM_IP6_TSO;
1628 /* Fixme: Multicast mode? */
1629 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1631 netvsc_setmulti(sc);
1640 error = ifmedia_ioctl(ifp, ifr, &sc->hn_media, cmd);
1643 error = ether_ioctl(ifp, cmd, data);
1654 hn_stop(hn_softc_t *sc)
1658 struct hv_device *device_ctx = vmbus_get_devctx(sc->hn_dev);
1663 printf(" Closing Device ...\n");
1665 atomic_clear_int(&ifp->if_drv_flags,
1666 (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
1667 for (i = 0; i < sc->hn_tx_ring_inuse; ++i)
1668 sc->hn_tx_ring[i].hn_oactive = 0;
1670 if_link_state_change(ifp, LINK_STATE_DOWN);
1671 sc->hn_initdone = 0;
1673 ret = hv_rf_on_close(device_ctx);
1677 * FreeBSD transmit entry point
1680 hn_start(struct ifnet *ifp)
1682 struct hn_softc *sc = ifp->if_softc;
1683 struct hn_tx_ring *txr = &sc->hn_tx_ring[0];
1685 if (txr->hn_sched_tx)
1688 if (mtx_trylock(&txr->hn_tx_lock)) {
1691 sched = hn_start_locked(txr, txr->hn_direct_tx_size);
1692 mtx_unlock(&txr->hn_tx_lock);
1697 taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_tx_task);
1701 hn_start_txeof(struct hn_tx_ring *txr)
1703 struct hn_softc *sc = txr->hn_sc;
1704 struct ifnet *ifp = sc->hn_ifp;
1706 KASSERT(txr == &sc->hn_tx_ring[0], ("not the first TX ring"));
1708 if (txr->hn_sched_tx)
1711 if (mtx_trylock(&txr->hn_tx_lock)) {
1714 atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
1715 sched = hn_start_locked(txr, txr->hn_direct_tx_size);
1716 mtx_unlock(&txr->hn_tx_lock);
1718 taskqueue_enqueue(txr->hn_tx_taskq,
1724 * Release the OACTIVE earlier, with the hope, that
1725 * others could catch up. The task will clear the
1726 * flag again with the hn_tx_lock to avoid possible
1729 atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
1730 taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_txeof_task);
1738 hn_ifinit_locked(hn_softc_t *sc)
1741 struct hv_device *device_ctx = vmbus_get_devctx(sc->hn_dev);
1746 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1750 hv_promisc_mode = 1;
1752 ret = hv_rf_on_open(device_ctx);
1756 sc->hn_initdone = 1;
1759 atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
1760 for (i = 0; i < sc->hn_tx_ring_inuse; ++i)
1761 sc->hn_tx_ring[i].hn_oactive = 0;
1763 atomic_set_int(&ifp->if_drv_flags, IFF_DRV_RUNNING);
1764 if_link_state_change(ifp, LINK_STATE_UP);
1771 hn_ifinit(void *xsc)
1773 hn_softc_t *sc = xsc;
1776 if (sc->temp_unusable) {
1780 sc->temp_unusable = TRUE;
1783 hn_ifinit_locked(sc);
1786 sc->temp_unusable = FALSE;
1795 hn_watchdog(struct ifnet *ifp)
1800 printf("hn%d: watchdog timeout -- resetting\n", sc->hn_unit);
1801 hn_ifinit(sc); /*???*/
1802 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1806 #if __FreeBSD_version >= 1100099
1809 hn_lro_lenlim_sysctl(SYSCTL_HANDLER_ARGS)
1811 struct hn_softc *sc = arg1;
1812 unsigned int lenlim;
1815 lenlim = sc->hn_rx_ring[0].hn_lro.lro_length_lim;
1816 error = sysctl_handle_int(oidp, &lenlim, 0, req);
1817 if (error || req->newptr == NULL)
1820 if (lenlim < HN_LRO_LENLIM_MIN(sc->hn_ifp) ||
1821 lenlim > TCP_LRO_LENGTH_MAX)
1825 hn_set_lro_lenlim(sc, lenlim);
1831 hn_lro_ackcnt_sysctl(SYSCTL_HANDLER_ARGS)
1833 struct hn_softc *sc = arg1;
1834 int ackcnt, error, i;
1837 * lro_ackcnt_lim is append count limit,
1838 * +1 to turn it into aggregation limit.
1840 ackcnt = sc->hn_rx_ring[0].hn_lro.lro_ackcnt_lim + 1;
1841 error = sysctl_handle_int(oidp, &ackcnt, 0, req);
1842 if (error || req->newptr == NULL)
1845 if (ackcnt < 2 || ackcnt > (TCP_LRO_ACKCNT_MAX + 1))
1849 * Convert aggregation limit back to append
1854 for (i = 0; i < sc->hn_rx_ring_inuse; ++i)
1855 sc->hn_rx_ring[i].hn_lro.lro_ackcnt_lim = ackcnt;
1863 hn_trust_hcsum_sysctl(SYSCTL_HANDLER_ARGS)
1865 struct hn_softc *sc = arg1;
1870 if (sc->hn_rx_ring[0].hn_trust_hcsum & hcsum)
1873 error = sysctl_handle_int(oidp, &on, 0, req);
1874 if (error || req->newptr == NULL)
1878 for (i = 0; i < sc->hn_rx_ring_inuse; ++i) {
1879 struct hn_rx_ring *rxr = &sc->hn_rx_ring[i];
1882 rxr->hn_trust_hcsum |= hcsum;
1884 rxr->hn_trust_hcsum &= ~hcsum;
1891 hn_tx_chimney_size_sysctl(SYSCTL_HANDLER_ARGS)
1893 struct hn_softc *sc = arg1;
1894 int chimney_size, error;
1896 chimney_size = sc->hn_tx_ring[0].hn_tx_chimney_size;
1897 error = sysctl_handle_int(oidp, &chimney_size, 0, req);
1898 if (error || req->newptr == NULL)
1901 if (chimney_size > sc->hn_tx_chimney_max || chimney_size <= 0)
1904 hn_set_tx_chimney_size(sc, chimney_size);
1909 hn_rx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS)
1911 struct hn_softc *sc = arg1;
1912 int ofs = arg2, i, error;
1913 struct hn_rx_ring *rxr;
1917 for (i = 0; i < sc->hn_rx_ring_inuse; ++i) {
1918 rxr = &sc->hn_rx_ring[i];
1919 stat += *((u_long *)((uint8_t *)rxr + ofs));
1922 error = sysctl_handle_long(oidp, &stat, 0, req);
1923 if (error || req->newptr == NULL)
1926 /* Zero out this stat. */
1927 for (i = 0; i < sc->hn_rx_ring_inuse; ++i) {
1928 rxr = &sc->hn_rx_ring[i];
1929 *((u_long *)((uint8_t *)rxr + ofs)) = 0;
1935 hn_rx_stat_u64_sysctl(SYSCTL_HANDLER_ARGS)
1937 struct hn_softc *sc = arg1;
1938 int ofs = arg2, i, error;
1939 struct hn_rx_ring *rxr;
1943 for (i = 0; i < sc->hn_rx_ring_inuse; ++i) {
1944 rxr = &sc->hn_rx_ring[i];
1945 stat += *((uint64_t *)((uint8_t *)rxr + ofs));
1948 error = sysctl_handle_64(oidp, &stat, 0, req);
1949 if (error || req->newptr == NULL)
1952 /* Zero out this stat. */
1953 for (i = 0; i < sc->hn_rx_ring_inuse; ++i) {
1954 rxr = &sc->hn_rx_ring[i];
1955 *((uint64_t *)((uint8_t *)rxr + ofs)) = 0;
1961 hn_tx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS)
1963 struct hn_softc *sc = arg1;
1964 int ofs = arg2, i, error;
1965 struct hn_tx_ring *txr;
1969 for (i = 0; i < sc->hn_tx_ring_inuse; ++i) {
1970 txr = &sc->hn_tx_ring[i];
1971 stat += *((u_long *)((uint8_t *)txr + ofs));
1974 error = sysctl_handle_long(oidp, &stat, 0, req);
1975 if (error || req->newptr == NULL)
1978 /* Zero out this stat. */
1979 for (i = 0; i < sc->hn_tx_ring_inuse; ++i) {
1980 txr = &sc->hn_tx_ring[i];
1981 *((u_long *)((uint8_t *)txr + ofs)) = 0;
1987 hn_tx_conf_int_sysctl(SYSCTL_HANDLER_ARGS)
1989 struct hn_softc *sc = arg1;
1990 int ofs = arg2, i, error, conf;
1991 struct hn_tx_ring *txr;
1993 txr = &sc->hn_tx_ring[0];
1994 conf = *((int *)((uint8_t *)txr + ofs));
1996 error = sysctl_handle_int(oidp, &conf, 0, req);
1997 if (error || req->newptr == NULL)
2001 for (i = 0; i < sc->hn_tx_ring_inuse; ++i) {
2002 txr = &sc->hn_tx_ring[i];
2003 *((int *)((uint8_t *)txr + ofs)) = conf;
2011 hn_check_iplen(const struct mbuf *m, int hoff)
2013 const struct ip *ip;
2014 int len, iphlen, iplen;
2015 const struct tcphdr *th;
2016 int thoff; /* TCP data offset */
2018 len = hoff + sizeof(struct ip);
2020 /* The packet must be at least the size of an IP header. */
2021 if (m->m_pkthdr.len < len)
2022 return IPPROTO_DONE;
2024 /* The fixed IP header must reside completely in the first mbuf. */
2026 return IPPROTO_DONE;
2028 ip = mtodo(m, hoff);
2030 /* Bound check the packet's stated IP header length. */
2031 iphlen = ip->ip_hl << 2;
2032 if (iphlen < sizeof(struct ip)) /* minimum header length */
2033 return IPPROTO_DONE;
2035 /* The full IP header must reside completely in the one mbuf. */
2036 if (m->m_len < hoff + iphlen)
2037 return IPPROTO_DONE;
2039 iplen = ntohs(ip->ip_len);
2042 * Check that the amount of data in the buffers is as
2043 * at least much as the IP header would have us expect.
2045 if (m->m_pkthdr.len < hoff + iplen)
2046 return IPPROTO_DONE;
2049 * Ignore IP fragments.
2051 if (ntohs(ip->ip_off) & (IP_OFFMASK | IP_MF))
2052 return IPPROTO_DONE;
2055 * The TCP/IP or UDP/IP header must be entirely contained within
2056 * the first fragment of a packet.
2060 if (iplen < iphlen + sizeof(struct tcphdr))
2061 return IPPROTO_DONE;
2062 if (m->m_len < hoff + iphlen + sizeof(struct tcphdr))
2063 return IPPROTO_DONE;
2064 th = (const struct tcphdr *)((const uint8_t *)ip + iphlen);
2065 thoff = th->th_off << 2;
2066 if (thoff < sizeof(struct tcphdr) || thoff + iphlen > iplen)
2067 return IPPROTO_DONE;
2068 if (m->m_len < hoff + iphlen + thoff)
2069 return IPPROTO_DONE;
2072 if (iplen < iphlen + sizeof(struct udphdr))
2073 return IPPROTO_DONE;
2074 if (m->m_len < hoff + iphlen + sizeof(struct udphdr))
2075 return IPPROTO_DONE;
2079 return IPPROTO_DONE;
2086 hn_dma_map_paddr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2088 bus_addr_t *paddr = arg;
2093 KASSERT(nseg == 1, ("too many segments %d!", nseg));
2094 *paddr = segs->ds_addr;
2098 hn_create_rx_data(struct hn_softc *sc, int ring_cnt)
2100 struct sysctl_oid_list *child;
2101 struct sysctl_ctx_list *ctx;
2102 device_t dev = sc->hn_dev;
2103 #if defined(INET) || defined(INET6)
2104 #if __FreeBSD_version >= 1100095
2110 sc->hn_rx_ring_cnt = ring_cnt;
2111 sc->hn_rx_ring_inuse = sc->hn_rx_ring_cnt;
2113 sc->hn_rx_ring = malloc(sizeof(struct hn_rx_ring) * sc->hn_rx_ring_cnt,
2114 M_NETVSC, M_WAITOK | M_ZERO);
2116 #if defined(INET) || defined(INET6)
2117 #if __FreeBSD_version >= 1100095
2118 lroent_cnt = hn_lro_entry_count;
2119 if (lroent_cnt < TCP_LRO_ENTRIES)
2120 lroent_cnt = TCP_LRO_ENTRIES;
2121 device_printf(dev, "LRO: entry count %d\n", lroent_cnt);
2123 #endif /* INET || INET6 */
2125 ctx = device_get_sysctl_ctx(dev);
2126 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2128 /* Create dev.hn.UNIT.rx sysctl tree */
2129 sc->hn_rx_sysctl_tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "rx",
2132 for (i = 0; i < sc->hn_rx_ring_cnt; ++i) {
2133 struct hn_rx_ring *rxr = &sc->hn_rx_ring[i];
2135 if (hn_trust_hosttcp)
2136 rxr->hn_trust_hcsum |= HN_TRUST_HCSUM_TCP;
2137 if (hn_trust_hostudp)
2138 rxr->hn_trust_hcsum |= HN_TRUST_HCSUM_UDP;
2139 if (hn_trust_hostip)
2140 rxr->hn_trust_hcsum |= HN_TRUST_HCSUM_IP;
2141 rxr->hn_ifp = sc->hn_ifp;
2147 #if defined(INET) || defined(INET6)
2148 #if __FreeBSD_version >= 1100095
2149 tcp_lro_init_args(&rxr->hn_lro, sc->hn_ifp, lroent_cnt, 0);
2151 tcp_lro_init(&rxr->hn_lro);
2152 rxr->hn_lro.ifp = sc->hn_ifp;
2154 #if __FreeBSD_version >= 1100099
2155 rxr->hn_lro.lro_length_lim = HN_LRO_LENLIM_DEF;
2156 rxr->hn_lro.lro_ackcnt_lim = HN_LRO_ACKCNT_DEF;
2158 #endif /* INET || INET6 */
2160 if (sc->hn_rx_sysctl_tree != NULL) {
2164 * Create per RX ring sysctl tree:
2165 * dev.hn.UNIT.rx.RINGID
2167 snprintf(name, sizeof(name), "%d", i);
2168 rxr->hn_rx_sysctl_tree = SYSCTL_ADD_NODE(ctx,
2169 SYSCTL_CHILDREN(sc->hn_rx_sysctl_tree),
2170 OID_AUTO, name, CTLFLAG_RD, 0, "");
2172 if (rxr->hn_rx_sysctl_tree != NULL) {
2173 SYSCTL_ADD_ULONG(ctx,
2174 SYSCTL_CHILDREN(rxr->hn_rx_sysctl_tree),
2175 OID_AUTO, "packets", CTLFLAG_RW,
2176 &rxr->hn_pkts, "# of packets received");
2181 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_queued",
2182 CTLTYPE_U64 | CTLFLAG_RW, sc,
2183 __offsetof(struct hn_rx_ring, hn_lro.lro_queued),
2184 hn_rx_stat_u64_sysctl, "LU", "LRO queued");
2185 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_flushed",
2186 CTLTYPE_U64 | CTLFLAG_RW, sc,
2187 __offsetof(struct hn_rx_ring, hn_lro.lro_flushed),
2188 hn_rx_stat_u64_sysctl, "LU", "LRO flushed");
2189 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_tried",
2190 CTLTYPE_ULONG | CTLFLAG_RW, sc,
2191 __offsetof(struct hn_rx_ring, hn_lro_tried),
2192 hn_rx_stat_ulong_sysctl, "LU", "# of LRO tries");
2193 #if __FreeBSD_version >= 1100099
2194 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_length_lim",
2195 CTLTYPE_UINT | CTLFLAG_RW, sc, 0, hn_lro_lenlim_sysctl, "IU",
2196 "Max # of data bytes to be aggregated by LRO");
2197 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_ackcnt_lim",
2198 CTLTYPE_INT | CTLFLAG_RW, sc, 0, hn_lro_ackcnt_sysctl, "I",
2199 "Max # of ACKs to be aggregated by LRO");
2201 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hosttcp",
2202 CTLTYPE_INT | CTLFLAG_RW, sc, HN_TRUST_HCSUM_TCP,
2203 hn_trust_hcsum_sysctl, "I",
2204 "Trust tcp segement verification on host side, "
2205 "when csum info is missing");
2206 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hostudp",
2207 CTLTYPE_INT | CTLFLAG_RW, sc, HN_TRUST_HCSUM_UDP,
2208 hn_trust_hcsum_sysctl, "I",
2209 "Trust udp datagram verification on host side, "
2210 "when csum info is missing");
2211 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hostip",
2212 CTLTYPE_INT | CTLFLAG_RW, sc, HN_TRUST_HCSUM_IP,
2213 hn_trust_hcsum_sysctl, "I",
2214 "Trust ip packet verification on host side, "
2215 "when csum info is missing");
2216 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_ip",
2217 CTLTYPE_ULONG | CTLFLAG_RW, sc,
2218 __offsetof(struct hn_rx_ring, hn_csum_ip),
2219 hn_rx_stat_ulong_sysctl, "LU", "RXCSUM IP");
2220 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_tcp",
2221 CTLTYPE_ULONG | CTLFLAG_RW, sc,
2222 __offsetof(struct hn_rx_ring, hn_csum_tcp),
2223 hn_rx_stat_ulong_sysctl, "LU", "RXCSUM TCP");
2224 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_udp",
2225 CTLTYPE_ULONG | CTLFLAG_RW, sc,
2226 __offsetof(struct hn_rx_ring, hn_csum_udp),
2227 hn_rx_stat_ulong_sysctl, "LU", "RXCSUM UDP");
2228 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_trusted",
2229 CTLTYPE_ULONG | CTLFLAG_RW, sc,
2230 __offsetof(struct hn_rx_ring, hn_csum_trusted),
2231 hn_rx_stat_ulong_sysctl, "LU",
2232 "# of packets that we trust host's csum verification");
2233 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "small_pkts",
2234 CTLTYPE_ULONG | CTLFLAG_RW, sc,
2235 __offsetof(struct hn_rx_ring, hn_small_pkts),
2236 hn_rx_stat_ulong_sysctl, "LU", "# of small packets received");
2237 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rx_ring_cnt",
2238 CTLFLAG_RD, &sc->hn_rx_ring_cnt, 0, "# created RX rings");
2239 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rx_ring_inuse",
2240 CTLFLAG_RD, &sc->hn_rx_ring_inuse, 0, "# used RX rings");
2244 hn_destroy_rx_data(struct hn_softc *sc)
2246 #if defined(INET) || defined(INET6)
2250 if (sc->hn_rx_ring_cnt == 0)
2253 #if defined(INET) || defined(INET6)
2254 for (i = 0; i < sc->hn_rx_ring_cnt; ++i)
2255 tcp_lro_free(&sc->hn_rx_ring[i].hn_lro);
2257 free(sc->hn_rx_ring, M_NETVSC);
2258 sc->hn_rx_ring = NULL;
2260 sc->hn_rx_ring_cnt = 0;
2261 sc->hn_rx_ring_inuse = 0;
2265 hn_create_tx_ring(struct hn_softc *sc, int id)
2267 struct hn_tx_ring *txr = &sc->hn_tx_ring[id];
2268 bus_dma_tag_t parent_dtag;
2272 txr->hn_tx_idx = id;
2274 #ifndef HN_USE_TXDESC_BUFRING
2275 mtx_init(&txr->hn_txlist_spin, "hn txlist", NULL, MTX_SPIN);
2277 mtx_init(&txr->hn_tx_lock, "hn tx", NULL, MTX_DEF);
2279 txr->hn_txdesc_cnt = HN_TX_DESC_CNT;
2280 txr->hn_txdesc = malloc(sizeof(struct hn_txdesc) * txr->hn_txdesc_cnt,
2281 M_NETVSC, M_WAITOK | M_ZERO);
2282 #ifndef HN_USE_TXDESC_BUFRING
2283 SLIST_INIT(&txr->hn_txlist);
2285 txr->hn_txdesc_br = buf_ring_alloc(txr->hn_txdesc_cnt, M_NETVSC,
2286 M_WAITOK, &txr->hn_tx_lock);
2289 txr->hn_tx_taskq = sc->hn_tx_taskq;
2291 if (hn_use_if_start) {
2292 txr->hn_txeof = hn_start_txeof;
2293 TASK_INIT(&txr->hn_tx_task, 0, hn_start_taskfunc, txr);
2294 TASK_INIT(&txr->hn_txeof_task, 0, hn_start_txeof_taskfunc, txr);
2296 txr->hn_txeof = hn_xmit_txeof;
2297 TASK_INIT(&txr->hn_tx_task, 0, hn_xmit_taskfunc, txr);
2298 TASK_INIT(&txr->hn_txeof_task, 0, hn_xmit_txeof_taskfunc, txr);
2299 txr->hn_mbuf_br = buf_ring_alloc(txr->hn_txdesc_cnt, M_NETVSC,
2300 M_WAITOK, &txr->hn_tx_lock);
2303 txr->hn_direct_tx_size = hn_direct_tx_size;
2304 if (hv_vmbus_protocal_version >= HV_VMBUS_VERSION_WIN8_1)
2305 txr->hn_csum_assist = HN_CSUM_ASSIST;
2307 txr->hn_csum_assist = HN_CSUM_ASSIST_WIN8;
2310 * Always schedule transmission instead of trying to do direct
2311 * transmission. This one gives the best performance so far.
2313 txr->hn_sched_tx = 1;
2315 parent_dtag = bus_get_dma_tag(sc->hn_dev);
2317 /* DMA tag for RNDIS messages. */
2318 error = bus_dma_tag_create(parent_dtag, /* parent */
2319 HN_RNDIS_MSG_ALIGN, /* alignment */
2320 HN_RNDIS_MSG_BOUNDARY, /* boundary */
2321 BUS_SPACE_MAXADDR, /* lowaddr */
2322 BUS_SPACE_MAXADDR, /* highaddr */
2323 NULL, NULL, /* filter, filterarg */
2324 HN_RNDIS_MSG_LEN, /* maxsize */
2326 HN_RNDIS_MSG_LEN, /* maxsegsize */
2328 NULL, /* lockfunc */
2329 NULL, /* lockfuncarg */
2330 &txr->hn_tx_rndis_dtag);
2332 device_printf(sc->hn_dev, "failed to create rndis dmatag\n");
2336 /* DMA tag for data. */
2337 error = bus_dma_tag_create(parent_dtag, /* parent */
2339 HN_TX_DATA_BOUNDARY, /* boundary */
2340 BUS_SPACE_MAXADDR, /* lowaddr */
2341 BUS_SPACE_MAXADDR, /* highaddr */
2342 NULL, NULL, /* filter, filterarg */
2343 HN_TX_DATA_MAXSIZE, /* maxsize */
2344 HN_TX_DATA_SEGCNT_MAX, /* nsegments */
2345 HN_TX_DATA_SEGSIZE, /* maxsegsize */
2347 NULL, /* lockfunc */
2348 NULL, /* lockfuncarg */
2349 &txr->hn_tx_data_dtag);
2351 device_printf(sc->hn_dev, "failed to create data dmatag\n");
2355 for (i = 0; i < txr->hn_txdesc_cnt; ++i) {
2356 struct hn_txdesc *txd = &txr->hn_txdesc[i];
2361 * Allocate and load RNDIS messages.
2363 error = bus_dmamem_alloc(txr->hn_tx_rndis_dtag,
2364 (void **)&txd->rndis_msg,
2365 BUS_DMA_WAITOK | BUS_DMA_COHERENT,
2366 &txd->rndis_msg_dmap);
2368 device_printf(sc->hn_dev,
2369 "failed to allocate rndis_msg, %d\n", i);
2373 error = bus_dmamap_load(txr->hn_tx_rndis_dtag,
2374 txd->rndis_msg_dmap,
2375 txd->rndis_msg, HN_RNDIS_MSG_LEN,
2376 hn_dma_map_paddr, &txd->rndis_msg_paddr,
2379 device_printf(sc->hn_dev,
2380 "failed to load rndis_msg, %d\n", i);
2381 bus_dmamem_free(txr->hn_tx_rndis_dtag,
2382 txd->rndis_msg, txd->rndis_msg_dmap);
2386 /* DMA map for TX data. */
2387 error = bus_dmamap_create(txr->hn_tx_data_dtag, 0,
2390 device_printf(sc->hn_dev,
2391 "failed to allocate tx data dmamap\n");
2392 bus_dmamap_unload(txr->hn_tx_rndis_dtag,
2393 txd->rndis_msg_dmap);
2394 bus_dmamem_free(txr->hn_tx_rndis_dtag,
2395 txd->rndis_msg, txd->rndis_msg_dmap);
2399 /* All set, put it to list */
2400 txd->flags |= HN_TXD_FLAG_ONLIST;
2401 #ifndef HN_USE_TXDESC_BUFRING
2402 SLIST_INSERT_HEAD(&txr->hn_txlist, txd, link);
2404 buf_ring_enqueue(txr->hn_txdesc_br, txd);
2407 txr->hn_txdesc_avail = txr->hn_txdesc_cnt;
2409 if (sc->hn_tx_sysctl_tree != NULL) {
2410 struct sysctl_oid_list *child;
2411 struct sysctl_ctx_list *ctx;
2415 * Create per TX ring sysctl tree:
2416 * dev.hn.UNIT.tx.RINGID
2418 ctx = device_get_sysctl_ctx(sc->hn_dev);
2419 child = SYSCTL_CHILDREN(sc->hn_tx_sysctl_tree);
2421 snprintf(name, sizeof(name), "%d", id);
2422 txr->hn_tx_sysctl_tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO,
2423 name, CTLFLAG_RD, 0, "");
2425 if (txr->hn_tx_sysctl_tree != NULL) {
2426 child = SYSCTL_CHILDREN(txr->hn_tx_sysctl_tree);
2428 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "txdesc_avail",
2429 CTLFLAG_RD, &txr->hn_txdesc_avail, 0,
2430 "# of available TX descs");
2431 if (!hn_use_if_start) {
2432 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "oactive",
2433 CTLFLAG_RD, &txr->hn_oactive, 0,
2436 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "packets",
2437 CTLFLAG_RW, &txr->hn_pkts,
2438 "# of packets transmitted");
2446 hn_txdesc_dmamap_destroy(struct hn_txdesc *txd)
2448 struct hn_tx_ring *txr = txd->txr;
2450 KASSERT(txd->m == NULL, ("still has mbuf installed"));
2451 KASSERT((txd->flags & HN_TXD_FLAG_DMAMAP) == 0, ("still dma mapped"));
2453 bus_dmamap_unload(txr->hn_tx_rndis_dtag, txd->rndis_msg_dmap);
2454 bus_dmamem_free(txr->hn_tx_rndis_dtag, txd->rndis_msg,
2455 txd->rndis_msg_dmap);
2456 bus_dmamap_destroy(txr->hn_tx_data_dtag, txd->data_dmap);
2460 hn_destroy_tx_ring(struct hn_tx_ring *txr)
2462 struct hn_txdesc *txd;
2464 if (txr->hn_txdesc == NULL)
2467 #ifndef HN_USE_TXDESC_BUFRING
2468 while ((txd = SLIST_FIRST(&txr->hn_txlist)) != NULL) {
2469 SLIST_REMOVE_HEAD(&txr->hn_txlist, link);
2470 hn_txdesc_dmamap_destroy(txd);
2473 mtx_lock(&txr->hn_tx_lock);
2474 while ((txd = buf_ring_dequeue_sc(txr->hn_txdesc_br)) != NULL)
2475 hn_txdesc_dmamap_destroy(txd);
2476 mtx_unlock(&txr->hn_tx_lock);
2479 if (txr->hn_tx_data_dtag != NULL)
2480 bus_dma_tag_destroy(txr->hn_tx_data_dtag);
2481 if (txr->hn_tx_rndis_dtag != NULL)
2482 bus_dma_tag_destroy(txr->hn_tx_rndis_dtag);
2484 #ifdef HN_USE_TXDESC_BUFRING
2485 buf_ring_free(txr->hn_txdesc_br, M_NETVSC);
2488 free(txr->hn_txdesc, M_NETVSC);
2489 txr->hn_txdesc = NULL;
2491 if (txr->hn_mbuf_br != NULL)
2492 buf_ring_free(txr->hn_mbuf_br, M_NETVSC);
2494 #ifndef HN_USE_TXDESC_BUFRING
2495 mtx_destroy(&txr->hn_txlist_spin);
2497 mtx_destroy(&txr->hn_tx_lock);
2501 hn_create_tx_data(struct hn_softc *sc, int ring_cnt)
2503 struct sysctl_oid_list *child;
2504 struct sysctl_ctx_list *ctx;
2507 sc->hn_tx_ring_cnt = ring_cnt;
2508 sc->hn_tx_ring_inuse = sc->hn_tx_ring_cnt;
2510 sc->hn_tx_ring = malloc(sizeof(struct hn_tx_ring) * sc->hn_tx_ring_cnt,
2511 M_NETVSC, M_WAITOK | M_ZERO);
2513 ctx = device_get_sysctl_ctx(sc->hn_dev);
2514 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->hn_dev));
2516 /* Create dev.hn.UNIT.tx sysctl tree */
2517 sc->hn_tx_sysctl_tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "tx",
2520 for (i = 0; i < sc->hn_tx_ring_cnt; ++i) {
2523 error = hn_create_tx_ring(sc, i);
2528 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "no_txdescs",
2529 CTLTYPE_ULONG | CTLFLAG_RW, sc,
2530 __offsetof(struct hn_tx_ring, hn_no_txdescs),
2531 hn_tx_stat_ulong_sysctl, "LU", "# of times short of TX descs");
2532 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "send_failed",
2533 CTLTYPE_ULONG | CTLFLAG_RW, sc,
2534 __offsetof(struct hn_tx_ring, hn_send_failed),
2535 hn_tx_stat_ulong_sysctl, "LU", "# of hyper-v sending failure");
2536 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txdma_failed",
2537 CTLTYPE_ULONG | CTLFLAG_RW, sc,
2538 __offsetof(struct hn_tx_ring, hn_txdma_failed),
2539 hn_tx_stat_ulong_sysctl, "LU", "# of TX DMA failure");
2540 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_collapsed",
2541 CTLTYPE_ULONG | CTLFLAG_RW, sc,
2542 __offsetof(struct hn_tx_ring, hn_tx_collapsed),
2543 hn_tx_stat_ulong_sysctl, "LU", "# of TX mbuf collapsed");
2544 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_chimney",
2545 CTLTYPE_ULONG | CTLFLAG_RW, sc,
2546 __offsetof(struct hn_tx_ring, hn_tx_chimney),
2547 hn_tx_stat_ulong_sysctl, "LU", "# of chimney send");
2548 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "txdesc_cnt",
2549 CTLFLAG_RD, &sc->hn_tx_ring[0].hn_txdesc_cnt, 0,
2550 "# of total TX descs");
2551 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_chimney_max",
2552 CTLFLAG_RD, &sc->hn_tx_chimney_max, 0,
2553 "Chimney send packet size upper boundary");
2554 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_chimney_size",
2555 CTLTYPE_INT | CTLFLAG_RW, sc, 0, hn_tx_chimney_size_sysctl,
2556 "I", "Chimney send packet size limit");
2557 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "direct_tx_size",
2558 CTLTYPE_INT | CTLFLAG_RW, sc,
2559 __offsetof(struct hn_tx_ring, hn_direct_tx_size),
2560 hn_tx_conf_int_sysctl, "I",
2561 "Size of the packet for direct transmission");
2562 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "sched_tx",
2563 CTLTYPE_INT | CTLFLAG_RW, sc,
2564 __offsetof(struct hn_tx_ring, hn_sched_tx),
2565 hn_tx_conf_int_sysctl, "I",
2566 "Always schedule transmission "
2567 "instead of doing direct transmission");
2568 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_ring_cnt",
2569 CTLFLAG_RD, &sc->hn_tx_ring_cnt, 0, "# created TX rings");
2570 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_ring_inuse",
2571 CTLFLAG_RD, &sc->hn_tx_ring_inuse, 0, "# used TX rings");
2577 hn_set_tx_chimney_size(struct hn_softc *sc, int chimney_size)
2582 for (i = 0; i < sc->hn_tx_ring_inuse; ++i)
2583 sc->hn_tx_ring[i].hn_tx_chimney_size = chimney_size;
2588 hn_destroy_tx_data(struct hn_softc *sc)
2592 if (sc->hn_tx_ring_cnt == 0)
2595 for (i = 0; i < sc->hn_tx_ring_cnt; ++i)
2596 hn_destroy_tx_ring(&sc->hn_tx_ring[i]);
2598 free(sc->hn_tx_ring, M_NETVSC);
2599 sc->hn_tx_ring = NULL;
2601 sc->hn_tx_ring_cnt = 0;
2602 sc->hn_tx_ring_inuse = 0;
2606 hn_start_taskfunc(void *xtxr, int pending __unused)
2608 struct hn_tx_ring *txr = xtxr;
2610 mtx_lock(&txr->hn_tx_lock);
2611 hn_start_locked(txr, 0);
2612 mtx_unlock(&txr->hn_tx_lock);
2616 hn_start_txeof_taskfunc(void *xtxr, int pending __unused)
2618 struct hn_tx_ring *txr = xtxr;
2620 mtx_lock(&txr->hn_tx_lock);
2621 atomic_clear_int(&txr->hn_sc->hn_ifp->if_drv_flags, IFF_DRV_OACTIVE);
2622 hn_start_locked(txr, 0);
2623 mtx_unlock(&txr->hn_tx_lock);
2627 hn_stop_tx_tasks(struct hn_softc *sc)
2631 for (i = 0; i < sc->hn_tx_ring_inuse; ++i) {
2632 struct hn_tx_ring *txr = &sc->hn_tx_ring[i];
2634 taskqueue_drain(txr->hn_tx_taskq, &txr->hn_tx_task);
2635 taskqueue_drain(txr->hn_tx_taskq, &txr->hn_txeof_task);
2640 hn_xmit(struct hn_tx_ring *txr, int len)
2642 struct hn_softc *sc = txr->hn_sc;
2643 struct ifnet *ifp = sc->hn_ifp;
2644 struct mbuf *m_head;
2646 mtx_assert(&txr->hn_tx_lock, MA_OWNED);
2647 KASSERT(hn_use_if_start == 0,
2648 ("hn_xmit is called, when if_start is enabled"));
2650 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || txr->hn_oactive)
2653 while ((m_head = drbr_peek(ifp, txr->hn_mbuf_br)) != NULL) {
2654 struct hn_txdesc *txd;
2657 if (len > 0 && m_head->m_pkthdr.len > len) {
2659 * This sending could be time consuming; let callers
2660 * dispatch this packet sending (and sending of any
2661 * following up packets) to tx taskqueue.
2663 drbr_putback(ifp, txr->hn_mbuf_br, m_head);
2667 txd = hn_txdesc_get(txr);
2669 txr->hn_no_txdescs++;
2670 drbr_putback(ifp, txr->hn_mbuf_br, m_head);
2671 txr->hn_oactive = 1;
2675 error = hn_encap(txr, txd, &m_head);
2677 /* Both txd and m_head are freed; discard */
2678 drbr_advance(ifp, txr->hn_mbuf_br);
2682 error = hn_send_pkt(ifp, txr, txd);
2683 if (__predict_false(error)) {
2684 /* txd is freed, but m_head is not */
2685 drbr_putback(ifp, txr->hn_mbuf_br, m_head);
2686 txr->hn_oactive = 1;
2691 drbr_advance(ifp, txr->hn_mbuf_br);
2697 hn_transmit(struct ifnet *ifp, struct mbuf *m)
2699 struct hn_softc *sc = ifp->if_softc;
2700 struct hn_tx_ring *txr;
2704 * Select the TX ring based on flowid
2706 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
2707 idx = m->m_pkthdr.flowid % sc->hn_tx_ring_inuse;
2708 txr = &sc->hn_tx_ring[idx];
2710 error = drbr_enqueue(ifp, txr->hn_mbuf_br, m);
2714 if (txr->hn_oactive)
2717 if (txr->hn_sched_tx)
2720 if (mtx_trylock(&txr->hn_tx_lock)) {
2723 sched = hn_xmit(txr, txr->hn_direct_tx_size);
2724 mtx_unlock(&txr->hn_tx_lock);
2729 taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_tx_task);
2734 hn_xmit_qflush(struct ifnet *ifp)
2736 struct hn_softc *sc = ifp->if_softc;
2739 for (i = 0; i < sc->hn_tx_ring_inuse; ++i) {
2740 struct hn_tx_ring *txr = &sc->hn_tx_ring[i];
2743 mtx_lock(&txr->hn_tx_lock);
2744 while ((m = buf_ring_dequeue_sc(txr->hn_mbuf_br)) != NULL)
2746 mtx_unlock(&txr->hn_tx_lock);
2752 hn_xmit_txeof(struct hn_tx_ring *txr)
2755 if (txr->hn_sched_tx)
2758 if (mtx_trylock(&txr->hn_tx_lock)) {
2761 txr->hn_oactive = 0;
2762 sched = hn_xmit(txr, txr->hn_direct_tx_size);
2763 mtx_unlock(&txr->hn_tx_lock);
2765 taskqueue_enqueue(txr->hn_tx_taskq,
2771 * Release the oactive earlier, with the hope, that
2772 * others could catch up. The task will clear the
2773 * oactive again with the hn_tx_lock to avoid possible
2776 txr->hn_oactive = 0;
2777 taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_txeof_task);
2782 hn_xmit_taskfunc(void *xtxr, int pending __unused)
2784 struct hn_tx_ring *txr = xtxr;
2786 mtx_lock(&txr->hn_tx_lock);
2788 mtx_unlock(&txr->hn_tx_lock);
2792 hn_xmit_txeof_taskfunc(void *xtxr, int pending __unused)
2794 struct hn_tx_ring *txr = xtxr;
2796 mtx_lock(&txr->hn_tx_lock);
2797 txr->hn_oactive = 0;
2799 mtx_unlock(&txr->hn_tx_lock);
2803 hn_channel_attach(struct hn_softc *sc, struct hv_vmbus_channel *chan)
2805 struct hn_rx_ring *rxr;
2808 idx = chan->offer_msg.offer.sub_channel_index;
2810 KASSERT(idx >= 0 && idx < sc->hn_rx_ring_inuse,
2811 ("invalid channel index %d, should > 0 && < %d",
2812 idx, sc->hn_rx_ring_inuse));
2813 rxr = &sc->hn_rx_ring[idx];
2814 KASSERT((rxr->hn_rx_flags & HN_RX_FLAG_ATTACHED) == 0,
2815 ("RX ring %d already attached", idx));
2816 rxr->hn_rx_flags |= HN_RX_FLAG_ATTACHED;
2818 chan->hv_chan_rxr = rxr;
2819 if_printf(sc->hn_ifp, "link RX ring %d to channel%u\n",
2820 idx, chan->offer_msg.child_rel_id);
2822 if (idx < sc->hn_tx_ring_inuse) {
2823 struct hn_tx_ring *txr = &sc->hn_tx_ring[idx];
2825 KASSERT((txr->hn_tx_flags & HN_TX_FLAG_ATTACHED) == 0,
2826 ("TX ring %d already attached", idx));
2827 txr->hn_tx_flags |= HN_TX_FLAG_ATTACHED;
2829 chan->hv_chan_txr = txr;
2830 txr->hn_chan = chan;
2831 if_printf(sc->hn_ifp, "link TX ring %d to channel%u\n",
2832 idx, chan->offer_msg.child_rel_id);
2835 /* Bind channel to a proper CPU */
2836 vmbus_channel_cpu_set(chan, (sc->hn_cpu + idx) % mp_ncpus);
2840 netvsc_subchan_callback(struct hn_softc *sc, struct hv_vmbus_channel *chan)
2843 KASSERT(!HV_VMBUS_CHAN_ISPRIMARY(chan),
2844 ("subchannel callback on primary channel"));
2845 KASSERT(chan->offer_msg.offer.sub_channel_index > 0,
2846 ("invalid channel subidx %u",
2847 chan->offer_msg.offer.sub_channel_index));
2848 hn_channel_attach(sc, chan);
2852 hn_tx_taskq_create(void *arg __unused)
2854 if (!hn_share_tx_taskq)
2857 hn_tx_taskq = taskqueue_create("hn_tx", M_WAITOK,
2858 taskqueue_thread_enqueue, &hn_tx_taskq);
2859 if (hn_bind_tx_taskq >= 0) {
2860 int cpu = hn_bind_tx_taskq;
2863 if (cpu > mp_ncpus - 1)
2865 CPU_SETOF(cpu, &cpu_set);
2866 taskqueue_start_threads_cpuset(&hn_tx_taskq, 1, PI_NET,
2869 taskqueue_start_threads(&hn_tx_taskq, 1, PI_NET, "hn tx");
2872 SYSINIT(hn_txtq_create, SI_SUB_DRIVERS, SI_ORDER_FIRST,
2873 hn_tx_taskq_create, NULL);
2876 hn_tx_taskq_destroy(void *arg __unused)
2878 if (hn_tx_taskq != NULL)
2879 taskqueue_free(hn_tx_taskq);
2881 SYSUNINIT(hn_txtq_destroy, SI_SUB_DRIVERS, SI_ORDER_FIRST,
2882 hn_tx_taskq_destroy, NULL);
2884 static device_method_t netvsc_methods[] = {
2885 /* Device interface */
2886 DEVMETHOD(device_probe, netvsc_probe),
2887 DEVMETHOD(device_attach, netvsc_attach),
2888 DEVMETHOD(device_detach, netvsc_detach),
2889 DEVMETHOD(device_shutdown, netvsc_shutdown),
2894 static driver_t netvsc_driver = {
2900 static devclass_t netvsc_devclass;
2902 DRIVER_MODULE(hn, vmbus, netvsc_driver, netvsc_devclass, 0, 0);
2903 MODULE_VERSION(hn, 1);
2904 MODULE_DEPEND(hn, vmbus, 1, 1, 1);