2 * Copyright (c) 2010-2012 Citrix Inc.
3 * Copyright (c) 2009-2012,2016 Microsoft Corp.
4 * Copyright (c) 2012 NetApp Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * Copyright (c) 2004-2006 Kip Macy
31 * All rights reserved.
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 #include <sys/cdefs.h>
56 __FBSDID("$FreeBSD$");
58 #include "opt_inet6.h"
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/sockio.h>
65 #include <sys/malloc.h>
66 #include <sys/module.h>
67 #include <sys/kernel.h>
68 #include <sys/socket.h>
69 #include <sys/queue.h>
73 #include <sys/sysctl.h>
74 #include <sys/buf_ring.h>
77 #include <net/if_arp.h>
78 #include <net/ethernet.h>
79 #include <net/if_dl.h>
80 #include <net/if_media.h>
84 #include <net/if_var.h>
85 #include <net/if_types.h>
86 #include <net/if_vlan_var.h>
88 #include <netinet/in_systm.h>
89 #include <netinet/in.h>
90 #include <netinet/ip.h>
91 #include <netinet/if_ether.h>
92 #include <netinet/tcp.h>
93 #include <netinet/udp.h>
94 #include <netinet/ip6.h>
97 #include <vm/vm_param.h>
98 #include <vm/vm_kern.h>
101 #include <machine/bus.h>
102 #include <machine/resource.h>
103 #include <machine/frame.h>
106 #include <sys/rman.h>
107 #include <sys/mutex.h>
108 #include <sys/errno.h>
109 #include <sys/types.h>
110 #include <machine/atomic.h>
112 #include <machine/intr_machdep.h>
114 #include <machine/in_cksum.h>
116 #include <dev/hyperv/include/hyperv.h>
117 #include <dev/hyperv/include/hyperv_busdma.h>
118 #include <dev/hyperv/include/vmbus_xact.h>
120 #include "hv_net_vsc.h"
121 #include "hv_rndis.h"
122 #include "hv_rndis_filter.h"
123 #include "vmbus_if.h"
125 /* Short for Hyper-V network interface */
126 #define NETVSC_DEVNAME "hn"
128 #define HN_XACT_REQ_SIZE (2 * PAGE_SIZE)
129 #define HN_XACT_RESP_SIZE (2 * PAGE_SIZE)
132 * It looks like offset 0 of buf is reserved to hold the softc pointer.
133 * The sc pointer evidently not needed, and is not presently populated.
134 * The packet offset is where the netvsc_packet starts in the buffer.
136 #define HV_NV_SC_PTR_OFFSET_IN_BUF 0
137 #define HV_NV_PACKET_OFFSET_IN_BUF 16
139 /* YYY should get it from the underlying channel */
140 #define HN_TX_DESC_CNT 512
142 #define HN_LROENT_CNT_DEF 128
144 #define HN_RING_CNT_DEF_MAX 8
146 #define HN_RNDIS_MSG_LEN \
147 (sizeof(rndis_msg) + \
148 RNDIS_HASHVAL_PPI_SIZE + \
149 RNDIS_VLAN_PPI_SIZE + \
150 RNDIS_TSO_PPI_SIZE + \
152 #define HN_RNDIS_MSG_BOUNDARY PAGE_SIZE
153 #define HN_RNDIS_MSG_ALIGN CACHE_LINE_SIZE
155 #define HN_TX_DATA_BOUNDARY PAGE_SIZE
156 #define HN_TX_DATA_MAXSIZE IP_MAXPACKET
157 #define HN_TX_DATA_SEGSIZE PAGE_SIZE
158 #define HN_TX_DATA_SEGCNT_MAX \
159 (NETVSC_PACKET_MAXPAGE - HV_RF_NUM_TX_RESERVED_PAGE_BUFS)
161 #define HN_DIRECT_TX_SIZE_DEF 128
163 #define HN_EARLY_TXEOF_THRESH 8
166 #ifndef HN_USE_TXDESC_BUFRING
167 SLIST_ENTRY(hn_txdesc) link;
170 struct hn_tx_ring *txr;
172 uint32_t flags; /* HN_TXD_FLAG_ */
173 struct hn_send_ctx send_ctx;
175 bus_dmamap_t data_dmap;
177 bus_addr_t rndis_msg_paddr;
178 rndis_msg *rndis_msg;
179 bus_dmamap_t rndis_msg_dmap;
182 #define HN_TXD_FLAG_ONLIST 0x1
183 #define HN_TXD_FLAG_DMAMAP 0x2
186 * Only enable UDP checksum offloading when it is on 2012R2 or
187 * later. UDP checksum offloading doesn't work on earlier
190 #define HN_CSUM_ASSIST_WIN8 (CSUM_IP | CSUM_TCP)
191 #define HN_CSUM_ASSIST (CSUM_IP | CSUM_UDP | CSUM_TCP)
193 #define HN_LRO_LENLIM_MULTIRX_DEF (12 * ETHERMTU)
194 #define HN_LRO_LENLIM_DEF (25 * ETHERMTU)
195 /* YYY 2*MTU is a bit rough, but should be good enough. */
196 #define HN_LRO_LENLIM_MIN(ifp) (2 * (ifp)->if_mtu)
198 #define HN_LRO_ACKCNT_DEF 1
201 * Be aware that this sleepable mutex will exhibit WITNESS errors when
202 * certain TCP and ARP code paths are taken. This appears to be a
203 * well-known condition, as all other drivers checked use a sleeping
204 * mutex to protect their transmit paths.
205 * Also Be aware that mutexes do not play well with semaphores, and there
206 * is a conflicting semaphore in a certain channel code path.
208 #define NV_LOCK_INIT(_sc, _name) \
209 mtx_init(&(_sc)->hn_lock, _name, MTX_NETWORK_LOCK, MTX_DEF)
210 #define NV_LOCK(_sc) mtx_lock(&(_sc)->hn_lock)
211 #define NV_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->hn_lock, MA_OWNED)
212 #define NV_UNLOCK(_sc) mtx_unlock(&(_sc)->hn_lock)
213 #define NV_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->hn_lock)
220 int hv_promisc_mode = 0; /* normal mode by default */
222 SYSCTL_NODE(_hw, OID_AUTO, hn, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
223 "Hyper-V network interface");
225 /* Trust tcp segements verification on host side. */
226 static int hn_trust_hosttcp = 1;
227 SYSCTL_INT(_hw_hn, OID_AUTO, trust_hosttcp, CTLFLAG_RDTUN,
228 &hn_trust_hosttcp, 0,
229 "Trust tcp segement verification on host side, "
230 "when csum info is missing (global setting)");
232 /* Trust udp datagrams verification on host side. */
233 static int hn_trust_hostudp = 1;
234 SYSCTL_INT(_hw_hn, OID_AUTO, trust_hostudp, CTLFLAG_RDTUN,
235 &hn_trust_hostudp, 0,
236 "Trust udp datagram verification on host side, "
237 "when csum info is missing (global setting)");
239 /* Trust ip packets verification on host side. */
240 static int hn_trust_hostip = 1;
241 SYSCTL_INT(_hw_hn, OID_AUTO, trust_hostip, CTLFLAG_RDTUN,
243 "Trust ip packet verification on host side, "
244 "when csum info is missing (global setting)");
246 #if __FreeBSD_version >= 1100045
247 /* Limit TSO burst size */
248 static int hn_tso_maxlen = 0;
249 SYSCTL_INT(_hw_hn, OID_AUTO, tso_maxlen, CTLFLAG_RDTUN,
250 &hn_tso_maxlen, 0, "TSO burst limit");
253 /* Limit chimney send size */
254 static int hn_tx_chimney_size = 0;
255 SYSCTL_INT(_hw_hn, OID_AUTO, tx_chimney_size, CTLFLAG_RDTUN,
256 &hn_tx_chimney_size, 0, "Chimney send packet size limit");
258 /* Limit the size of packet for direct transmission */
259 static int hn_direct_tx_size = HN_DIRECT_TX_SIZE_DEF;
260 SYSCTL_INT(_hw_hn, OID_AUTO, direct_tx_size, CTLFLAG_RDTUN,
261 &hn_direct_tx_size, 0, "Size of the packet for direct transmission");
263 #if defined(INET) || defined(INET6)
264 #if __FreeBSD_version >= 1100095
265 static int hn_lro_entry_count = HN_LROENT_CNT_DEF;
266 SYSCTL_INT(_hw_hn, OID_AUTO, lro_entry_count, CTLFLAG_RDTUN,
267 &hn_lro_entry_count, 0, "LRO entry count");
271 static int hn_share_tx_taskq = 0;
272 SYSCTL_INT(_hw_hn, OID_AUTO, share_tx_taskq, CTLFLAG_RDTUN,
273 &hn_share_tx_taskq, 0, "Enable shared TX taskqueue");
275 static struct taskqueue *hn_tx_taskq;
277 #ifndef HN_USE_TXDESC_BUFRING
278 static int hn_use_txdesc_bufring = 0;
280 static int hn_use_txdesc_bufring = 1;
282 SYSCTL_INT(_hw_hn, OID_AUTO, use_txdesc_bufring, CTLFLAG_RD,
283 &hn_use_txdesc_bufring, 0, "Use buf_ring for TX descriptors");
285 static int hn_bind_tx_taskq = -1;
286 SYSCTL_INT(_hw_hn, OID_AUTO, bind_tx_taskq, CTLFLAG_RDTUN,
287 &hn_bind_tx_taskq, 0, "Bind TX taskqueue to the specified cpu");
289 static int hn_use_if_start = 0;
290 SYSCTL_INT(_hw_hn, OID_AUTO, use_if_start, CTLFLAG_RDTUN,
291 &hn_use_if_start, 0, "Use if_start TX method");
293 static int hn_chan_cnt = 0;
294 SYSCTL_INT(_hw_hn, OID_AUTO, chan_cnt, CTLFLAG_RDTUN,
296 "# of channels to use; each channel has one RX ring and one TX ring");
298 static int hn_tx_ring_cnt = 0;
299 SYSCTL_INT(_hw_hn, OID_AUTO, tx_ring_cnt, CTLFLAG_RDTUN,
300 &hn_tx_ring_cnt, 0, "# of TX rings to use");
302 static int hn_tx_swq_depth = 0;
303 SYSCTL_INT(_hw_hn, OID_AUTO, tx_swq_depth, CTLFLAG_RDTUN,
304 &hn_tx_swq_depth, 0, "Depth of IFQ or BUFRING");
306 #if __FreeBSD_version >= 1100095
307 static u_int hn_lro_mbufq_depth = 0;
308 SYSCTL_UINT(_hw_hn, OID_AUTO, lro_mbufq_depth, CTLFLAG_RDTUN,
309 &hn_lro_mbufq_depth, 0, "Depth of LRO mbuf queue");
312 static u_int hn_cpu_index;
315 * Forward declarations
317 static void hn_stop(hn_softc_t *sc);
318 static void hn_ifinit_locked(hn_softc_t *sc);
319 static void hn_ifinit(void *xsc);
320 static int hn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
321 static int hn_start_locked(struct hn_tx_ring *txr, int len);
322 static void hn_start(struct ifnet *ifp);
323 static void hn_start_txeof(struct hn_tx_ring *);
324 static int hn_ifmedia_upd(struct ifnet *ifp);
325 static void hn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
326 #if __FreeBSD_version >= 1100099
327 static int hn_lro_lenlim_sysctl(SYSCTL_HANDLER_ARGS);
328 static int hn_lro_ackcnt_sysctl(SYSCTL_HANDLER_ARGS);
330 static int hn_trust_hcsum_sysctl(SYSCTL_HANDLER_ARGS);
331 static int hn_tx_chimney_size_sysctl(SYSCTL_HANDLER_ARGS);
332 static int hn_rx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS);
333 static int hn_rx_stat_u64_sysctl(SYSCTL_HANDLER_ARGS);
334 static int hn_tx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS);
335 static int hn_tx_conf_int_sysctl(SYSCTL_HANDLER_ARGS);
336 static int hn_check_iplen(const struct mbuf *, int);
337 static int hn_create_tx_ring(struct hn_softc *, int);
338 static void hn_destroy_tx_ring(struct hn_tx_ring *);
339 static int hn_create_tx_data(struct hn_softc *, int);
340 static void hn_destroy_tx_data(struct hn_softc *);
341 static void hn_start_taskfunc(void *, int);
342 static void hn_start_txeof_taskfunc(void *, int);
343 static void hn_stop_tx_tasks(struct hn_softc *);
344 static int hn_encap(struct hn_tx_ring *, struct hn_txdesc *, struct mbuf **);
345 static void hn_create_rx_data(struct hn_softc *sc, int);
346 static void hn_destroy_rx_data(struct hn_softc *sc);
347 static void hn_set_tx_chimney_size(struct hn_softc *, int);
348 static void hn_channel_attach(struct hn_softc *, struct vmbus_channel *);
349 static void hn_subchan_attach(struct hn_softc *, struct vmbus_channel *);
350 static void hn_subchan_setup(struct hn_softc *);
352 static int hn_transmit(struct ifnet *, struct mbuf *);
353 static void hn_xmit_qflush(struct ifnet *);
354 static int hn_xmit(struct hn_tx_ring *, int);
355 static void hn_xmit_txeof(struct hn_tx_ring *);
356 static void hn_xmit_taskfunc(void *, int);
357 static void hn_xmit_txeof_taskfunc(void *, int);
359 #if __FreeBSD_version >= 1100099
361 hn_set_lro_lenlim(struct hn_softc *sc, int lenlim)
365 for (i = 0; i < sc->hn_rx_ring_inuse; ++i)
366 sc->hn_rx_ring[i].hn_lro.lro_length_lim = lenlim;
371 hn_get_txswq_depth(const struct hn_tx_ring *txr)
374 KASSERT(txr->hn_txdesc_cnt > 0, ("tx ring is not setup yet"));
375 if (hn_tx_swq_depth < txr->hn_txdesc_cnt)
376 return txr->hn_txdesc_cnt;
377 return hn_tx_swq_depth;
381 hn_ifmedia_upd(struct ifnet *ifp __unused)
388 hn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
390 struct hn_softc *sc = ifp->if_softc;
392 ifmr->ifm_status = IFM_AVALID;
393 ifmr->ifm_active = IFM_ETHER;
395 if (!sc->hn_carrier) {
396 ifmr->ifm_active |= IFM_NONE;
399 ifmr->ifm_status |= IFM_ACTIVE;
400 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
403 /* {F8615163-DF3E-46c5-913F-F2D2F965ED0E} */
404 static const struct hyperv_guid g_net_vsc_device_type = {
405 .hv_guid = {0x63, 0x51, 0x61, 0xF8, 0x3E, 0xDF, 0xc5, 0x46,
406 0x91, 0x3F, 0xF2, 0xD2, 0xF9, 0x65, 0xED, 0x0E}
410 * Standard probe entry point.
414 netvsc_probe(device_t dev)
416 if (VMBUS_PROBE_GUID(device_get_parent(dev), dev,
417 &g_net_vsc_device_type) == 0) {
418 device_set_desc(dev, "Hyper-V Network Interface");
419 return BUS_PROBE_DEFAULT;
425 * Standard attach entry point.
427 * Called when the driver is loaded. It allocates needed resources,
428 * and initializes the "hardware" and software.
431 netvsc_attach(device_t dev)
433 netvsc_device_info device_info;
435 int unit = device_get_unit(dev);
436 struct ifnet *ifp = NULL;
437 int error, ring_cnt, tx_ring_cnt;
438 #if __FreeBSD_version >= 1100045
442 sc = device_get_softc(dev);
446 sc->hn_prichan = vmbus_get_channel(dev);
448 if (hn_tx_taskq == NULL) {
449 sc->hn_tx_taskq = taskqueue_create("hn_tx", M_WAITOK,
450 taskqueue_thread_enqueue, &sc->hn_tx_taskq);
451 if (hn_bind_tx_taskq >= 0) {
452 int cpu = hn_bind_tx_taskq;
455 if (cpu > mp_ncpus - 1)
457 CPU_SETOF(cpu, &cpu_set);
458 taskqueue_start_threads_cpuset(&sc->hn_tx_taskq, 1,
459 PI_NET, &cpu_set, "%s tx",
460 device_get_nameunit(dev));
462 taskqueue_start_threads(&sc->hn_tx_taskq, 1, PI_NET,
463 "%s tx", device_get_nameunit(dev));
466 sc->hn_tx_taskq = hn_tx_taskq;
468 NV_LOCK_INIT(sc, "NetVSCLock");
470 ifp = sc->hn_ifp = if_alloc(IFT_ETHER);
472 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
475 * Figure out the # of RX rings (ring_cnt) and the # of TX rings
476 * to use (tx_ring_cnt).
479 * The # of RX rings to use is same as the # of channels to use.
481 ring_cnt = hn_chan_cnt;
485 if (ring_cnt > HN_RING_CNT_DEF_MAX)
486 ring_cnt = HN_RING_CNT_DEF_MAX;
487 } else if (ring_cnt > mp_ncpus) {
491 tx_ring_cnt = hn_tx_ring_cnt;
492 if (tx_ring_cnt <= 0 || tx_ring_cnt > ring_cnt)
493 tx_ring_cnt = ring_cnt;
494 if (hn_use_if_start) {
495 /* ifnet.if_start only needs one TX ring. */
500 * Set the leader CPU for channels.
502 sc->hn_cpu = atomic_fetchadd_int(&hn_cpu_index, ring_cnt) % mp_ncpus;
504 error = hn_create_tx_data(sc, tx_ring_cnt);
507 hn_create_rx_data(sc, ring_cnt);
510 * Associate the first TX/RX ring w/ the primary channel.
512 hn_channel_attach(sc, sc->hn_prichan);
514 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
515 ifp->if_ioctl = hn_ioctl;
516 ifp->if_init = hn_ifinit;
517 /* needed by hv_rf_on_device_add() code */
518 ifp->if_mtu = ETHERMTU;
519 if (hn_use_if_start) {
520 int qdepth = hn_get_txswq_depth(&sc->hn_tx_ring[0]);
522 ifp->if_start = hn_start;
523 IFQ_SET_MAXLEN(&ifp->if_snd, qdepth);
524 ifp->if_snd.ifq_drv_maxlen = qdepth - 1;
525 IFQ_SET_READY(&ifp->if_snd);
527 ifp->if_transmit = hn_transmit;
528 ifp->if_qflush = hn_xmit_qflush;
531 ifmedia_init(&sc->hn_media, 0, hn_ifmedia_upd, hn_ifmedia_sts);
532 ifmedia_add(&sc->hn_media, IFM_ETHER | IFM_AUTO, 0, NULL);
533 ifmedia_set(&sc->hn_media, IFM_ETHER | IFM_AUTO);
534 /* XXX ifmedia_set really should do this for us */
535 sc->hn_media.ifm_media = sc->hn_media.ifm_cur->ifm_media;
538 * Tell upper layers that we support full VLAN capability.
540 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
541 ifp->if_capabilities |=
542 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_TSO |
545 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_TSO |
547 ifp->if_hwassist = sc->hn_tx_ring[0].hn_csum_assist | CSUM_TSO;
549 sc->hn_xact = vmbus_xact_ctx_create(bus_get_dma_tag(dev),
550 HN_XACT_REQ_SIZE, HN_XACT_RESP_SIZE, 0);
551 if (sc->hn_xact == NULL)
554 error = hv_rf_on_device_add(sc, &device_info, &ring_cnt,
558 KASSERT(ring_cnt > 0 && ring_cnt <= sc->hn_rx_ring_inuse,
559 ("invalid channel count %d, should be less than %d",
560 ring_cnt, sc->hn_rx_ring_inuse));
563 * Set the # of TX/RX rings that could be used according to
564 * the # of channels that host offered.
566 if (sc->hn_tx_ring_inuse > ring_cnt)
567 sc->hn_tx_ring_inuse = ring_cnt;
568 sc->hn_rx_ring_inuse = ring_cnt;
569 device_printf(dev, "%d TX ring, %d RX ring\n",
570 sc->hn_tx_ring_inuse, sc->hn_rx_ring_inuse);
572 if (sc->hn_rx_ring_inuse > 1)
573 hn_subchan_setup(sc);
575 #if __FreeBSD_version >= 1100099
576 if (sc->hn_rx_ring_inuse > 1) {
578 * Reduce TCP segment aggregation limit for multiple
579 * RX rings to increase ACK timeliness.
581 hn_set_lro_lenlim(sc, HN_LRO_LENLIM_MULTIRX_DEF);
585 if (device_info.link_state == 0) {
589 #if __FreeBSD_version >= 1100045
590 tso_maxlen = hn_tso_maxlen;
591 if (tso_maxlen <= 0 || tso_maxlen > IP_MAXPACKET)
592 tso_maxlen = IP_MAXPACKET;
594 ifp->if_hw_tsomaxsegcount = HN_TX_DATA_SEGCNT_MAX;
595 ifp->if_hw_tsomaxsegsize = PAGE_SIZE;
596 ifp->if_hw_tsomax = tso_maxlen -
597 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
600 ether_ifattach(ifp, device_info.mac_addr);
602 #if __FreeBSD_version >= 1100045
603 if_printf(ifp, "TSO: %u/%u/%u\n", ifp->if_hw_tsomax,
604 ifp->if_hw_tsomaxsegcount, ifp->if_hw_tsomaxsegsize);
607 sc->hn_tx_chimney_max = sc->net_dev->send_section_size;
608 hn_set_tx_chimney_size(sc, sc->hn_tx_chimney_max);
609 if (hn_tx_chimney_size > 0 &&
610 hn_tx_chimney_size < sc->hn_tx_chimney_max)
611 hn_set_tx_chimney_size(sc, hn_tx_chimney_size);
613 SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
614 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO,
615 "nvs_version", CTLFLAG_RD, &sc->hn_nvs_ver, 0, "NVS version");
619 hn_destroy_tx_data(sc);
626 * Standard detach entry point
629 netvsc_detach(device_t dev)
631 struct hn_softc *sc = device_get_softc(dev);
634 printf("netvsc_detach\n");
637 * XXXKYS: Need to clean up all our
638 * driver state; this is the driver
643 * XXXKYS: Need to stop outgoing traffic and unregister
647 hv_rf_on_device_remove(sc, HV_RF_NV_DESTROY_CHANNEL);
649 hn_stop_tx_tasks(sc);
651 ifmedia_removeall(&sc->hn_media);
652 hn_destroy_rx_data(sc);
653 hn_destroy_tx_data(sc);
655 if (sc->hn_tx_taskq != hn_tx_taskq)
656 taskqueue_free(sc->hn_tx_taskq);
658 vmbus_xact_ctx_destroy(sc->hn_xact);
663 * Standard shutdown entry point
666 netvsc_shutdown(device_t dev)
672 hn_txdesc_dmamap_load(struct hn_tx_ring *txr, struct hn_txdesc *txd,
673 struct mbuf **m_head, bus_dma_segment_t *segs, int *nsegs)
675 struct mbuf *m = *m_head;
678 error = bus_dmamap_load_mbuf_sg(txr->hn_tx_data_dtag, txd->data_dmap,
679 m, segs, nsegs, BUS_DMA_NOWAIT);
680 if (error == EFBIG) {
683 m_new = m_collapse(m, M_NOWAIT, HN_TX_DATA_SEGCNT_MAX);
688 txr->hn_tx_collapsed++;
690 error = bus_dmamap_load_mbuf_sg(txr->hn_tx_data_dtag,
691 txd->data_dmap, m, segs, nsegs, BUS_DMA_NOWAIT);
694 bus_dmamap_sync(txr->hn_tx_data_dtag, txd->data_dmap,
695 BUS_DMASYNC_PREWRITE);
696 txd->flags |= HN_TXD_FLAG_DMAMAP;
702 hn_txdesc_dmamap_unload(struct hn_tx_ring *txr, struct hn_txdesc *txd)
705 if (txd->flags & HN_TXD_FLAG_DMAMAP) {
706 bus_dmamap_sync(txr->hn_tx_data_dtag,
707 txd->data_dmap, BUS_DMASYNC_POSTWRITE);
708 bus_dmamap_unload(txr->hn_tx_data_dtag,
710 txd->flags &= ~HN_TXD_FLAG_DMAMAP;
715 hn_txdesc_put(struct hn_tx_ring *txr, struct hn_txdesc *txd)
718 KASSERT((txd->flags & HN_TXD_FLAG_ONLIST) == 0,
719 ("put an onlist txd %#x", txd->flags));
721 KASSERT(txd->refs > 0, ("invalid txd refs %d", txd->refs));
722 if (atomic_fetchadd_int(&txd->refs, -1) != 1)
725 hn_txdesc_dmamap_unload(txr, txd);
726 if (txd->m != NULL) {
731 txd->flags |= HN_TXD_FLAG_ONLIST;
733 #ifndef HN_USE_TXDESC_BUFRING
734 mtx_lock_spin(&txr->hn_txlist_spin);
735 KASSERT(txr->hn_txdesc_avail >= 0 &&
736 txr->hn_txdesc_avail < txr->hn_txdesc_cnt,
737 ("txdesc_put: invalid txd avail %d", txr->hn_txdesc_avail));
738 txr->hn_txdesc_avail++;
739 SLIST_INSERT_HEAD(&txr->hn_txlist, txd, link);
740 mtx_unlock_spin(&txr->hn_txlist_spin);
742 atomic_add_int(&txr->hn_txdesc_avail, 1);
743 buf_ring_enqueue(txr->hn_txdesc_br, txd);
749 static __inline struct hn_txdesc *
750 hn_txdesc_get(struct hn_tx_ring *txr)
752 struct hn_txdesc *txd;
754 #ifndef HN_USE_TXDESC_BUFRING
755 mtx_lock_spin(&txr->hn_txlist_spin);
756 txd = SLIST_FIRST(&txr->hn_txlist);
758 KASSERT(txr->hn_txdesc_avail > 0,
759 ("txdesc_get: invalid txd avail %d", txr->hn_txdesc_avail));
760 txr->hn_txdesc_avail--;
761 SLIST_REMOVE_HEAD(&txr->hn_txlist, link);
763 mtx_unlock_spin(&txr->hn_txlist_spin);
765 txd = buf_ring_dequeue_sc(txr->hn_txdesc_br);
769 #ifdef HN_USE_TXDESC_BUFRING
770 atomic_subtract_int(&txr->hn_txdesc_avail, 1);
772 KASSERT(txd->m == NULL && txd->refs == 0 &&
773 (txd->flags & HN_TXD_FLAG_ONLIST), ("invalid txd"));
774 txd->flags &= ~HN_TXD_FLAG_ONLIST;
781 hn_txdesc_hold(struct hn_txdesc *txd)
784 /* 0->1 transition will never work */
785 KASSERT(txd->refs > 0, ("invalid refs %d", txd->refs));
786 atomic_add_int(&txd->refs, 1);
790 hn_txeof(struct hn_tx_ring *txr)
792 txr->hn_has_txeof = 0;
797 hn_tx_done(struct hn_send_ctx *sndc, struct netvsc_dev_ *net_dev,
798 struct vmbus_channel *chan, const void *data __unused, int dlen __unused)
800 struct hn_txdesc *txd = sndc->hn_cbarg;
801 struct hn_tx_ring *txr;
803 if (sndc->hn_chim_idx != HN_NVS_CHIM_IDX_INVALID)
804 hn_chim_free(net_dev, sndc->hn_chim_idx);
807 KASSERT(txr->hn_chan == chan,
808 ("channel mismatch, on chan%u, should be chan%u",
809 vmbus_chan_subidx(chan), vmbus_chan_subidx(txr->hn_chan)));
811 txr->hn_has_txeof = 1;
812 hn_txdesc_put(txr, txd);
814 ++txr->hn_txdone_cnt;
815 if (txr->hn_txdone_cnt >= HN_EARLY_TXEOF_THRESH) {
816 txr->hn_txdone_cnt = 0;
823 netvsc_channel_rollup(struct hn_rx_ring *rxr, struct hn_tx_ring *txr)
825 #if defined(INET) || defined(INET6)
826 tcp_lro_flush_all(&rxr->hn_lro);
831 * 'txr' could be NULL, if multiple channels and
832 * ifnet.if_start method are enabled.
834 if (txr == NULL || !txr->hn_has_txeof)
837 txr->hn_txdone_cnt = 0;
843 * If this function fails, then both txd and m_head0 will be freed.
846 hn_encap(struct hn_tx_ring *txr, struct hn_txdesc *txd, struct mbuf **m_head0)
848 bus_dma_segment_t segs[HN_TX_DATA_SEGCNT_MAX];
850 struct mbuf *m_head = *m_head0;
851 rndis_msg *rndis_mesg;
852 rndis_packet *rndis_pkt;
853 rndis_per_packet_info *rppi;
854 struct rndis_hash_value *hash_value;
855 uint32_t rndis_msg_size, tot_data_buf_len, send_buf_section_idx;
856 int send_buf_section_size;
858 tot_data_buf_len = m_head->m_pkthdr.len;
861 * extension points to the area reserved for the
862 * rndis_filter_packet, which is placed just after
863 * the netvsc_packet (and rppi struct, if present;
864 * length is updated later).
866 rndis_mesg = txd->rndis_msg;
867 /* XXX not necessary */
868 memset(rndis_mesg, 0, HN_RNDIS_MSG_LEN);
869 rndis_mesg->ndis_msg_type = REMOTE_NDIS_PACKET_MSG;
871 rndis_pkt = &rndis_mesg->msg.packet;
872 rndis_pkt->data_offset = sizeof(rndis_packet);
873 rndis_pkt->data_length = tot_data_buf_len;
874 rndis_pkt->per_pkt_info_offset = sizeof(rndis_packet);
876 rndis_msg_size = RNDIS_MESSAGE_SIZE(rndis_packet);
879 * Set the hash value for this packet, so that the host could
880 * dispatch the TX done event for this packet back to this TX
883 rndis_msg_size += RNDIS_HASHVAL_PPI_SIZE;
884 rppi = hv_set_rppi_data(rndis_mesg, RNDIS_HASHVAL_PPI_SIZE,
886 hash_value = (struct rndis_hash_value *)((uint8_t *)rppi +
887 rppi->per_packet_info_offset);
888 hash_value->hash_value = txr->hn_tx_idx;
890 if (m_head->m_flags & M_VLANTAG) {
891 ndis_8021q_info *rppi_vlan_info;
893 rndis_msg_size += RNDIS_VLAN_PPI_SIZE;
894 rppi = hv_set_rppi_data(rndis_mesg, RNDIS_VLAN_PPI_SIZE,
897 rppi_vlan_info = (ndis_8021q_info *)((uint8_t *)rppi +
898 rppi->per_packet_info_offset);
899 rppi_vlan_info->u1.s1.vlan_id =
900 m_head->m_pkthdr.ether_vtag & 0xfff;
903 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
904 rndis_tcp_tso_info *tso_info;
905 struct ether_vlan_header *eh;
909 * XXX need m_pullup and use mtodo
911 eh = mtod(m_head, struct ether_vlan_header*);
912 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
913 ether_len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
915 ether_len = ETHER_HDR_LEN;
917 rndis_msg_size += RNDIS_TSO_PPI_SIZE;
918 rppi = hv_set_rppi_data(rndis_mesg, RNDIS_TSO_PPI_SIZE,
919 tcp_large_send_info);
921 tso_info = (rndis_tcp_tso_info *)((uint8_t *)rppi +
922 rppi->per_packet_info_offset);
923 tso_info->lso_v2_xmit.type =
924 RNDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
927 if (m_head->m_pkthdr.csum_flags & CSUM_IP_TSO) {
929 (struct ip *)(m_head->m_data + ether_len);
930 unsigned long iph_len = ip->ip_hl << 2;
932 (struct tcphdr *)((caddr_t)ip + iph_len);
934 tso_info->lso_v2_xmit.ip_version =
935 RNDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
939 th->th_sum = in_pseudo(ip->ip_src.s_addr,
940 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
943 #if defined(INET6) && defined(INET)
948 struct ip6_hdr *ip6 = (struct ip6_hdr *)
949 (m_head->m_data + ether_len);
950 struct tcphdr *th = (struct tcphdr *)(ip6 + 1);
952 tso_info->lso_v2_xmit.ip_version =
953 RNDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
955 th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
958 tso_info->lso_v2_xmit.tcp_header_offset = 0;
959 tso_info->lso_v2_xmit.mss = m_head->m_pkthdr.tso_segsz;
960 } else if (m_head->m_pkthdr.csum_flags & txr->hn_csum_assist) {
961 rndis_tcp_ip_csum_info *csum_info;
963 rndis_msg_size += RNDIS_CSUM_PPI_SIZE;
964 rppi = hv_set_rppi_data(rndis_mesg, RNDIS_CSUM_PPI_SIZE,
966 csum_info = (rndis_tcp_ip_csum_info *)((uint8_t *)rppi +
967 rppi->per_packet_info_offset);
969 csum_info->xmit.is_ipv4 = 1;
970 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
971 csum_info->xmit.ip_header_csum = 1;
973 if (m_head->m_pkthdr.csum_flags & CSUM_TCP) {
974 csum_info->xmit.tcp_csum = 1;
975 csum_info->xmit.tcp_header_offset = 0;
976 } else if (m_head->m_pkthdr.csum_flags & CSUM_UDP) {
977 csum_info->xmit.udp_csum = 1;
981 rndis_mesg->msg_len = tot_data_buf_len + rndis_msg_size;
982 tot_data_buf_len = rndis_mesg->msg_len;
985 * Chimney send, if the packet could fit into one chimney buffer.
987 if (tot_data_buf_len < txr->hn_tx_chimney_size) {
988 netvsc_dev *net_dev = txr->hn_sc->net_dev;
990 txr->hn_tx_chimney_tried++;
991 send_buf_section_idx =
992 hv_nv_get_next_send_section(net_dev);
993 if (send_buf_section_idx != HN_NVS_CHIM_IDX_INVALID) {
994 uint8_t *dest = ((uint8_t *)net_dev->send_buf +
995 (send_buf_section_idx *
996 net_dev->send_section_size));
998 memcpy(dest, rndis_mesg, rndis_msg_size);
999 dest += rndis_msg_size;
1000 m_copydata(m_head, 0, m_head->m_pkthdr.len, dest);
1002 send_buf_section_size = tot_data_buf_len;
1003 txr->hn_gpa_cnt = 0;
1004 txr->hn_tx_chimney++;
1009 error = hn_txdesc_dmamap_load(txr, txd, &m_head, segs, &nsegs);
1014 * This mbuf is not linked w/ the txd yet, so free it now.
1019 freed = hn_txdesc_put(txr, txd);
1021 ("fail to free txd upon txdma error"));
1023 txr->hn_txdma_failed++;
1024 if_inc_counter(txr->hn_sc->hn_ifp, IFCOUNTER_OERRORS, 1);
1029 txr->hn_gpa_cnt = nsegs + HV_RF_NUM_TX_RESERVED_PAGE_BUFS;
1031 /* send packet with page buffer */
1032 txr->hn_gpa[0].gpa_page = atop(txd->rndis_msg_paddr);
1033 txr->hn_gpa[0].gpa_ofs = txd->rndis_msg_paddr & PAGE_MASK;
1034 txr->hn_gpa[0].gpa_len = rndis_msg_size;
1037 * Fill the page buffers with mbuf info starting at index
1038 * HV_RF_NUM_TX_RESERVED_PAGE_BUFS.
1040 for (i = 0; i < nsegs; ++i) {
1041 struct vmbus_gpa *gpa = &txr->hn_gpa[
1042 i + HV_RF_NUM_TX_RESERVED_PAGE_BUFS];
1044 gpa->gpa_page = atop(segs[i].ds_addr);
1045 gpa->gpa_ofs = segs[i].ds_addr & PAGE_MASK;
1046 gpa->gpa_len = segs[i].ds_len;
1049 send_buf_section_idx = HN_NVS_CHIM_IDX_INVALID;
1050 send_buf_section_size = 0;
1054 /* Set the completion routine */
1055 hn_send_ctx_init(&txd->send_ctx, hn_tx_done, txd,
1056 send_buf_section_idx, send_buf_section_size);
1063 * If this function fails, then txd will be freed, but the mbuf
1064 * associated w/ the txd will _not_ be freed.
1067 hn_send_pkt(struct ifnet *ifp, struct hn_tx_ring *txr, struct hn_txdesc *txd)
1069 int error, send_failed = 0;
1073 * Make sure that txd is not freed before ETHER_BPF_MTAP.
1075 hn_txdesc_hold(txd);
1076 error = hv_nv_on_send(txr->hn_chan, HN_NVS_RNDIS_MTYPE_DATA,
1077 &txd->send_ctx, txr->hn_gpa, txr->hn_gpa_cnt);
1079 ETHER_BPF_MTAP(ifp, txd->m);
1080 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1081 if (!hn_use_if_start) {
1082 if_inc_counter(ifp, IFCOUNTER_OBYTES,
1083 txd->m->m_pkthdr.len);
1084 if (txd->m->m_flags & M_MCAST)
1085 if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
1089 hn_txdesc_put(txr, txd);
1091 if (__predict_false(error)) {
1095 * This should "really rarely" happen.
1097 * XXX Too many RX to be acked or too many sideband
1098 * commands to run? Ask netvsc_channel_rollup()
1099 * to kick start later.
1101 txr->hn_has_txeof = 1;
1103 txr->hn_send_failed++;
1106 * Try sending again after set hn_has_txeof;
1107 * in case that we missed the last
1108 * netvsc_channel_rollup().
1112 if_printf(ifp, "send failed\n");
1115 * Caller will perform further processing on the
1116 * associated mbuf, so don't free it in hn_txdesc_put();
1117 * only unload it from the DMA map in hn_txdesc_put(),
1121 freed = hn_txdesc_put(txr, txd);
1123 ("fail to free txd upon send error"));
1125 txr->hn_send_failed++;
1131 * Start a transmit of one or more packets
1134 hn_start_locked(struct hn_tx_ring *txr, int len)
1136 struct hn_softc *sc = txr->hn_sc;
1137 struct ifnet *ifp = sc->hn_ifp;
1139 KASSERT(hn_use_if_start,
1140 ("hn_start_locked is called, when if_start is disabled"));
1141 KASSERT(txr == &sc->hn_tx_ring[0], ("not the first TX ring"));
1142 mtx_assert(&txr->hn_tx_lock, MA_OWNED);
1144 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1148 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
1149 struct hn_txdesc *txd;
1150 struct mbuf *m_head;
1153 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1157 if (len > 0 && m_head->m_pkthdr.len > len) {
1159 * This sending could be time consuming; let callers
1160 * dispatch this packet sending (and sending of any
1161 * following up packets) to tx taskqueue.
1163 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1167 txd = hn_txdesc_get(txr);
1169 txr->hn_no_txdescs++;
1170 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1171 atomic_set_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
1175 error = hn_encap(txr, txd, &m_head);
1177 /* Both txd and m_head are freed */
1181 error = hn_send_pkt(ifp, txr, txd);
1182 if (__predict_false(error)) {
1183 /* txd is freed, but m_head is not */
1184 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1185 atomic_set_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
1193 * Link up/down notification
1196 netvsc_linkstatus_callback(struct hn_softc *sc, uint32_t status)
1206 * Append the specified data to the indicated mbuf chain,
1207 * Extend the mbuf chain if the new data does not fit in
1210 * This is a minor rewrite of m_append() from sys/kern/uipc_mbuf.c.
1211 * There should be an equivalent in the kernel mbuf code,
1212 * but there does not appear to be one yet.
1214 * Differs from m_append() in that additional mbufs are
1215 * allocated with cluster size MJUMPAGESIZE, and filled
1218 * Return 1 if able to complete the job; otherwise 0.
1221 hv_m_append(struct mbuf *m0, int len, c_caddr_t cp)
1224 int remainder, space;
1226 for (m = m0; m->m_next != NULL; m = m->m_next)
1229 space = M_TRAILINGSPACE(m);
1232 * Copy into available space.
1234 if (space > remainder)
1236 bcopy(cp, mtod(m, caddr_t) + m->m_len, space);
1241 while (remainder > 0) {
1243 * Allocate a new mbuf; could check space
1244 * and allocate a cluster instead.
1246 n = m_getjcl(M_NOWAIT, m->m_type, 0, MJUMPAGESIZE);
1249 n->m_len = min(MJUMPAGESIZE, remainder);
1250 bcopy(cp, mtod(n, caddr_t), n->m_len);
1252 remainder -= n->m_len;
1256 if (m0->m_flags & M_PKTHDR)
1257 m0->m_pkthdr.len += len - remainder;
1259 return (remainder == 0);
1262 #if defined(INET) || defined(INET6)
1264 hn_lro_rx(struct lro_ctrl *lc, struct mbuf *m)
1266 #if __FreeBSD_version >= 1100095
1267 if (hn_lro_mbufq_depth) {
1268 tcp_lro_queue_mbuf(lc, m);
1272 return tcp_lro_rx(lc, m, 0);
1277 * Called when we receive a data packet from the "wire" on the
1280 * Note: This is no longer used as a callback
1283 netvsc_recv(struct hn_rx_ring *rxr, const void *data, int dlen,
1284 const struct hn_recvinfo *info)
1286 struct ifnet *ifp = rxr->hn_ifp;
1288 int size, do_lro = 0, do_csum = 1;
1289 int hash_type = M_HASHTYPE_OPAQUE_HASH;
1291 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1295 * Bail out if packet contains more data than configured MTU.
1297 if (dlen > (ifp->if_mtu + ETHER_HDR_LEN)) {
1299 } else if (dlen <= MHLEN) {
1300 m_new = m_gethdr(M_NOWAIT, MT_DATA);
1301 if (m_new == NULL) {
1302 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1305 memcpy(mtod(m_new, void *), data, dlen);
1306 m_new->m_pkthdr.len = m_new->m_len = dlen;
1307 rxr->hn_small_pkts++;
1310 * Get an mbuf with a cluster. For packets 2K or less,
1311 * get a standard 2K cluster. For anything larger, get a
1312 * 4K cluster. Any buffers larger than 4K can cause problems
1313 * if looped around to the Hyper-V TX channel, so avoid them.
1316 if (dlen > MCLBYTES) {
1318 size = MJUMPAGESIZE;
1321 m_new = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, size);
1322 if (m_new == NULL) {
1323 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1327 hv_m_append(m_new, dlen, data);
1329 m_new->m_pkthdr.rcvif = ifp;
1331 if (__predict_false((ifp->if_capenable & IFCAP_RXCSUM) == 0))
1334 /* receive side checksum offload */
1335 if (info->csum_info != NULL) {
1336 /* IP csum offload */
1337 if (info->csum_info->receive.ip_csum_succeeded && do_csum) {
1338 m_new->m_pkthdr.csum_flags |=
1339 (CSUM_IP_CHECKED | CSUM_IP_VALID);
1343 /* TCP/UDP csum offload */
1344 if ((info->csum_info->receive.tcp_csum_succeeded ||
1345 info->csum_info->receive.udp_csum_succeeded) && do_csum) {
1346 m_new->m_pkthdr.csum_flags |=
1347 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1348 m_new->m_pkthdr.csum_data = 0xffff;
1349 if (info->csum_info->receive.tcp_csum_succeeded)
1355 if (info->csum_info->receive.ip_csum_succeeded &&
1356 info->csum_info->receive.tcp_csum_succeeded)
1359 const struct ether_header *eh;
1364 if (m_new->m_len < hoff)
1366 eh = mtod(m_new, struct ether_header *);
1367 etype = ntohs(eh->ether_type);
1368 if (etype == ETHERTYPE_VLAN) {
1369 const struct ether_vlan_header *evl;
1371 hoff = sizeof(*evl);
1372 if (m_new->m_len < hoff)
1374 evl = mtod(m_new, struct ether_vlan_header *);
1375 etype = ntohs(evl->evl_proto);
1378 if (etype == ETHERTYPE_IP) {
1381 pr = hn_check_iplen(m_new, hoff);
1382 if (pr == IPPROTO_TCP) {
1384 (rxr->hn_trust_hcsum &
1385 HN_TRUST_HCSUM_TCP)) {
1386 rxr->hn_csum_trusted++;
1387 m_new->m_pkthdr.csum_flags |=
1388 (CSUM_IP_CHECKED | CSUM_IP_VALID |
1389 CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1390 m_new->m_pkthdr.csum_data = 0xffff;
1393 } else if (pr == IPPROTO_UDP) {
1395 (rxr->hn_trust_hcsum &
1396 HN_TRUST_HCSUM_UDP)) {
1397 rxr->hn_csum_trusted++;
1398 m_new->m_pkthdr.csum_flags |=
1399 (CSUM_IP_CHECKED | CSUM_IP_VALID |
1400 CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1401 m_new->m_pkthdr.csum_data = 0xffff;
1403 } else if (pr != IPPROTO_DONE && do_csum &&
1404 (rxr->hn_trust_hcsum & HN_TRUST_HCSUM_IP)) {
1405 rxr->hn_csum_trusted++;
1406 m_new->m_pkthdr.csum_flags |=
1407 (CSUM_IP_CHECKED | CSUM_IP_VALID);
1412 if (info->vlan_info != NULL) {
1413 m_new->m_pkthdr.ether_vtag = info->vlan_info->u1.s1.vlan_id;
1414 m_new->m_flags |= M_VLANTAG;
1417 if (info->hash_info != NULL && info->hash_value != NULL) {
1419 m_new->m_pkthdr.flowid = info->hash_value->hash_value;
1420 if ((info->hash_info->hash_info & NDIS_HASH_FUNCTION_MASK) ==
1421 NDIS_HASH_FUNCTION_TOEPLITZ) {
1423 (info->hash_info->hash_info & NDIS_HASH_TYPE_MASK);
1426 case NDIS_HASH_IPV4:
1427 hash_type = M_HASHTYPE_RSS_IPV4;
1430 case NDIS_HASH_TCP_IPV4:
1431 hash_type = M_HASHTYPE_RSS_TCP_IPV4;
1434 case NDIS_HASH_IPV6:
1435 hash_type = M_HASHTYPE_RSS_IPV6;
1438 case NDIS_HASH_IPV6_EX:
1439 hash_type = M_HASHTYPE_RSS_IPV6_EX;
1442 case NDIS_HASH_TCP_IPV6:
1443 hash_type = M_HASHTYPE_RSS_TCP_IPV6;
1446 case NDIS_HASH_TCP_IPV6_EX:
1447 hash_type = M_HASHTYPE_RSS_TCP_IPV6_EX;
1452 if (info->hash_value != NULL) {
1453 m_new->m_pkthdr.flowid = info->hash_value->hash_value;
1455 m_new->m_pkthdr.flowid = rxr->hn_rx_idx;
1456 hash_type = M_HASHTYPE_OPAQUE;
1459 M_HASHTYPE_SET(m_new, hash_type);
1462 * Note: Moved RX completion back to hv_nv_on_receive() so all
1463 * messages (not just data messages) will trigger a response.
1466 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
1469 if ((ifp->if_capenable & IFCAP_LRO) && do_lro) {
1470 #if defined(INET) || defined(INET6)
1471 struct lro_ctrl *lro = &rxr->hn_lro;
1474 rxr->hn_lro_tried++;
1475 if (hn_lro_rx(lro, m_new) == 0) {
1483 /* We're not holding the lock here, so don't release it */
1484 (*ifp->if_input)(ifp, m_new);
1490 * Rules for using sc->temp_unusable:
1491 * 1. sc->temp_unusable can only be read or written while holding NV_LOCK()
1492 * 2. code reading sc->temp_unusable under NV_LOCK(), and finding
1493 * sc->temp_unusable set, must release NV_LOCK() and exit
1494 * 3. to retain exclusive control of the interface,
1495 * sc->temp_unusable must be set by code before releasing NV_LOCK()
1496 * 4. only code setting sc->temp_unusable can clear sc->temp_unusable
1497 * 5. code setting sc->temp_unusable must eventually clear sc->temp_unusable
1501 * Standard ioctl entry point. Called when the user wants to configure
1505 hn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1507 hn_softc_t *sc = ifp->if_softc;
1508 struct ifreq *ifr = (struct ifreq *)data;
1510 struct ifaddr *ifa = (struct ifaddr *)data;
1512 netvsc_device_info device_info;
1513 int mask, error = 0, ring_cnt;
1514 int retry_cnt = 500;
1520 if (ifa->ifa_addr->sa_family == AF_INET) {
1521 ifp->if_flags |= IFF_UP;
1522 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1524 arp_ifinit(ifp, ifa);
1527 error = ether_ioctl(ifp, cmd, data);
1530 /* Check MTU value change */
1531 if (ifp->if_mtu == ifr->ifr_mtu)
1534 if (ifr->ifr_mtu > NETVSC_MAX_CONFIGURABLE_MTU) {
1539 /* Obtain and record requested MTU */
1540 ifp->if_mtu = ifr->ifr_mtu;
1542 #if __FreeBSD_version >= 1100099
1544 * Make sure that LRO aggregation length limit is still
1545 * valid, after the MTU change.
1548 if (sc->hn_rx_ring[0].hn_lro.lro_length_lim <
1549 HN_LRO_LENLIM_MIN(ifp))
1550 hn_set_lro_lenlim(sc, HN_LRO_LENLIM_MIN(ifp));
1556 if (!sc->temp_unusable) {
1557 sc->temp_unusable = TRUE;
1561 if (retry_cnt > 0) {
1565 } while (retry_cnt > 0);
1567 if (retry_cnt == 0) {
1572 /* We must remove and add back the device to cause the new
1573 * MTU to take effect. This includes tearing down, but not
1574 * deleting the channel, then bringing it back up.
1576 error = hv_rf_on_device_remove(sc, HV_RF_NV_RETAIN_CHANNEL);
1579 sc->temp_unusable = FALSE;
1584 /* Wait for subchannels to be destroyed */
1585 vmbus_subchan_drain(sc->hn_prichan);
1587 ring_cnt = sc->hn_rx_ring_inuse;
1588 error = hv_rf_on_device_add(sc, &device_info, &ring_cnt,
1589 &sc->hn_rx_ring[0]);
1592 sc->temp_unusable = FALSE;
1596 /* # of channels can _not_ be changed */
1597 KASSERT(sc->hn_rx_ring_inuse == ring_cnt,
1598 ("RX ring count %d and channel count %u mismatch",
1599 sc->hn_rx_ring_cnt, ring_cnt));
1600 if (sc->hn_rx_ring_inuse > 1) {
1604 * Skip the rings on primary channel; they are
1605 * handled by the hv_rf_on_device_add() above.
1607 for (r = 1; r < sc->hn_rx_ring_cnt; ++r) {
1608 sc->hn_rx_ring[r].hn_rx_flags &=
1609 ~HN_RX_FLAG_ATTACHED;
1611 for (r = 1; r < sc->hn_tx_ring_cnt; ++r) {
1612 sc->hn_tx_ring[r].hn_tx_flags &=
1613 ~HN_TX_FLAG_ATTACHED;
1615 hn_subchan_setup(sc);
1618 sc->hn_tx_chimney_max = sc->net_dev->send_section_size;
1619 if (sc->hn_tx_ring[0].hn_tx_chimney_size >
1620 sc->hn_tx_chimney_max)
1621 hn_set_tx_chimney_size(sc, sc->hn_tx_chimney_max);
1623 hn_ifinit_locked(sc);
1626 sc->temp_unusable = FALSE;
1632 if (!sc->temp_unusable) {
1633 sc->temp_unusable = TRUE;
1637 if (retry_cnt > 0) {
1641 } while (retry_cnt > 0);
1643 if (retry_cnt == 0) {
1648 if (ifp->if_flags & IFF_UP) {
1650 * If only the state of the PROMISC flag changed,
1651 * then just use the 'set promisc mode' command
1652 * instead of reinitializing the entire NIC. Doing
1653 * a full re-init means reloading the firmware and
1654 * waiting for it to start up, which may take a
1658 /* Fixme: Promiscuous mode? */
1659 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1660 ifp->if_flags & IFF_PROMISC &&
1661 !(sc->hn_if_flags & IFF_PROMISC)) {
1662 /* do something here for Hyper-V */
1663 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1664 !(ifp->if_flags & IFF_PROMISC) &&
1665 sc->hn_if_flags & IFF_PROMISC) {
1666 /* do something here for Hyper-V */
1669 hn_ifinit_locked(sc);
1671 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1676 sc->temp_unusable = FALSE;
1678 sc->hn_if_flags = ifp->if_flags;
1684 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1685 if (mask & IFCAP_TXCSUM) {
1686 ifp->if_capenable ^= IFCAP_TXCSUM;
1687 if (ifp->if_capenable & IFCAP_TXCSUM) {
1689 sc->hn_tx_ring[0].hn_csum_assist;
1692 ~sc->hn_tx_ring[0].hn_csum_assist;
1696 if (mask & IFCAP_RXCSUM)
1697 ifp->if_capenable ^= IFCAP_RXCSUM;
1699 if (mask & IFCAP_LRO)
1700 ifp->if_capenable ^= IFCAP_LRO;
1702 if (mask & IFCAP_TSO4) {
1703 ifp->if_capenable ^= IFCAP_TSO4;
1704 if (ifp->if_capenable & IFCAP_TSO4)
1705 ifp->if_hwassist |= CSUM_IP_TSO;
1707 ifp->if_hwassist &= ~CSUM_IP_TSO;
1710 if (mask & IFCAP_TSO6) {
1711 ifp->if_capenable ^= IFCAP_TSO6;
1712 if (ifp->if_capenable & IFCAP_TSO6)
1713 ifp->if_hwassist |= CSUM_IP6_TSO;
1715 ifp->if_hwassist &= ~CSUM_IP6_TSO;
1724 /* Fixme: Multicast mode? */
1725 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1727 netvsc_setmulti(sc);
1736 error = ifmedia_ioctl(ifp, ifr, &sc->hn_media, cmd);
1739 error = ether_ioctl(ifp, cmd, data);
1750 hn_stop(hn_softc_t *sc)
1758 printf(" Closing Device ...\n");
1760 atomic_clear_int(&ifp->if_drv_flags,
1761 (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
1762 for (i = 0; i < sc->hn_tx_ring_inuse; ++i)
1763 sc->hn_tx_ring[i].hn_oactive = 0;
1765 if_link_state_change(ifp, LINK_STATE_DOWN);
1766 sc->hn_initdone = 0;
1768 ret = hv_rf_on_close(sc);
1772 * FreeBSD transmit entry point
1775 hn_start(struct ifnet *ifp)
1777 struct hn_softc *sc = ifp->if_softc;
1778 struct hn_tx_ring *txr = &sc->hn_tx_ring[0];
1780 if (txr->hn_sched_tx)
1783 if (mtx_trylock(&txr->hn_tx_lock)) {
1786 sched = hn_start_locked(txr, txr->hn_direct_tx_size);
1787 mtx_unlock(&txr->hn_tx_lock);
1792 taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_tx_task);
1796 hn_start_txeof(struct hn_tx_ring *txr)
1798 struct hn_softc *sc = txr->hn_sc;
1799 struct ifnet *ifp = sc->hn_ifp;
1801 KASSERT(txr == &sc->hn_tx_ring[0], ("not the first TX ring"));
1803 if (txr->hn_sched_tx)
1806 if (mtx_trylock(&txr->hn_tx_lock)) {
1809 atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
1810 sched = hn_start_locked(txr, txr->hn_direct_tx_size);
1811 mtx_unlock(&txr->hn_tx_lock);
1813 taskqueue_enqueue(txr->hn_tx_taskq,
1819 * Release the OACTIVE earlier, with the hope, that
1820 * others could catch up. The task will clear the
1821 * flag again with the hn_tx_lock to avoid possible
1824 atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
1825 taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_txeof_task);
1833 hn_ifinit_locked(hn_softc_t *sc)
1840 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1844 hv_promisc_mode = 1;
1846 ret = hv_rf_on_open(sc);
1850 sc->hn_initdone = 1;
1853 atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
1854 for (i = 0; i < sc->hn_tx_ring_inuse; ++i)
1855 sc->hn_tx_ring[i].hn_oactive = 0;
1857 atomic_set_int(&ifp->if_drv_flags, IFF_DRV_RUNNING);
1858 if_link_state_change(ifp, LINK_STATE_UP);
1865 hn_ifinit(void *xsc)
1867 hn_softc_t *sc = xsc;
1870 if (sc->temp_unusable) {
1874 sc->temp_unusable = TRUE;
1877 hn_ifinit_locked(sc);
1880 sc->temp_unusable = FALSE;
1889 hn_watchdog(struct ifnet *ifp)
1894 printf("hn%d: watchdog timeout -- resetting\n", sc->hn_unit);
1895 hn_ifinit(sc); /*???*/
1896 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
1900 #if __FreeBSD_version >= 1100099
1903 hn_lro_lenlim_sysctl(SYSCTL_HANDLER_ARGS)
1905 struct hn_softc *sc = arg1;
1906 unsigned int lenlim;
1909 lenlim = sc->hn_rx_ring[0].hn_lro.lro_length_lim;
1910 error = sysctl_handle_int(oidp, &lenlim, 0, req);
1911 if (error || req->newptr == NULL)
1914 if (lenlim < HN_LRO_LENLIM_MIN(sc->hn_ifp) ||
1915 lenlim > TCP_LRO_LENGTH_MAX)
1919 hn_set_lro_lenlim(sc, lenlim);
1925 hn_lro_ackcnt_sysctl(SYSCTL_HANDLER_ARGS)
1927 struct hn_softc *sc = arg1;
1928 int ackcnt, error, i;
1931 * lro_ackcnt_lim is append count limit,
1932 * +1 to turn it into aggregation limit.
1934 ackcnt = sc->hn_rx_ring[0].hn_lro.lro_ackcnt_lim + 1;
1935 error = sysctl_handle_int(oidp, &ackcnt, 0, req);
1936 if (error || req->newptr == NULL)
1939 if (ackcnt < 2 || ackcnt > (TCP_LRO_ACKCNT_MAX + 1))
1943 * Convert aggregation limit back to append
1948 for (i = 0; i < sc->hn_rx_ring_inuse; ++i)
1949 sc->hn_rx_ring[i].hn_lro.lro_ackcnt_lim = ackcnt;
1957 hn_trust_hcsum_sysctl(SYSCTL_HANDLER_ARGS)
1959 struct hn_softc *sc = arg1;
1964 if (sc->hn_rx_ring[0].hn_trust_hcsum & hcsum)
1967 error = sysctl_handle_int(oidp, &on, 0, req);
1968 if (error || req->newptr == NULL)
1972 for (i = 0; i < sc->hn_rx_ring_inuse; ++i) {
1973 struct hn_rx_ring *rxr = &sc->hn_rx_ring[i];
1976 rxr->hn_trust_hcsum |= hcsum;
1978 rxr->hn_trust_hcsum &= ~hcsum;
1985 hn_tx_chimney_size_sysctl(SYSCTL_HANDLER_ARGS)
1987 struct hn_softc *sc = arg1;
1988 int chimney_size, error;
1990 chimney_size = sc->hn_tx_ring[0].hn_tx_chimney_size;
1991 error = sysctl_handle_int(oidp, &chimney_size, 0, req);
1992 if (error || req->newptr == NULL)
1995 if (chimney_size > sc->hn_tx_chimney_max || chimney_size <= 0)
1998 hn_set_tx_chimney_size(sc, chimney_size);
2003 hn_rx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS)
2005 struct hn_softc *sc = arg1;
2006 int ofs = arg2, i, error;
2007 struct hn_rx_ring *rxr;
2011 for (i = 0; i < sc->hn_rx_ring_inuse; ++i) {
2012 rxr = &sc->hn_rx_ring[i];
2013 stat += *((u_long *)((uint8_t *)rxr + ofs));
2016 error = sysctl_handle_long(oidp, &stat, 0, req);
2017 if (error || req->newptr == NULL)
2020 /* Zero out this stat. */
2021 for (i = 0; i < sc->hn_rx_ring_inuse; ++i) {
2022 rxr = &sc->hn_rx_ring[i];
2023 *((u_long *)((uint8_t *)rxr + ofs)) = 0;
2029 hn_rx_stat_u64_sysctl(SYSCTL_HANDLER_ARGS)
2031 struct hn_softc *sc = arg1;
2032 int ofs = arg2, i, error;
2033 struct hn_rx_ring *rxr;
2037 for (i = 0; i < sc->hn_rx_ring_inuse; ++i) {
2038 rxr = &sc->hn_rx_ring[i];
2039 stat += *((uint64_t *)((uint8_t *)rxr + ofs));
2042 error = sysctl_handle_64(oidp, &stat, 0, req);
2043 if (error || req->newptr == NULL)
2046 /* Zero out this stat. */
2047 for (i = 0; i < sc->hn_rx_ring_inuse; ++i) {
2048 rxr = &sc->hn_rx_ring[i];
2049 *((uint64_t *)((uint8_t *)rxr + ofs)) = 0;
2055 hn_tx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS)
2057 struct hn_softc *sc = arg1;
2058 int ofs = arg2, i, error;
2059 struct hn_tx_ring *txr;
2063 for (i = 0; i < sc->hn_tx_ring_inuse; ++i) {
2064 txr = &sc->hn_tx_ring[i];
2065 stat += *((u_long *)((uint8_t *)txr + ofs));
2068 error = sysctl_handle_long(oidp, &stat, 0, req);
2069 if (error || req->newptr == NULL)
2072 /* Zero out this stat. */
2073 for (i = 0; i < sc->hn_tx_ring_inuse; ++i) {
2074 txr = &sc->hn_tx_ring[i];
2075 *((u_long *)((uint8_t *)txr + ofs)) = 0;
2081 hn_tx_conf_int_sysctl(SYSCTL_HANDLER_ARGS)
2083 struct hn_softc *sc = arg1;
2084 int ofs = arg2, i, error, conf;
2085 struct hn_tx_ring *txr;
2087 txr = &sc->hn_tx_ring[0];
2088 conf = *((int *)((uint8_t *)txr + ofs));
2090 error = sysctl_handle_int(oidp, &conf, 0, req);
2091 if (error || req->newptr == NULL)
2095 for (i = 0; i < sc->hn_tx_ring_inuse; ++i) {
2096 txr = &sc->hn_tx_ring[i];
2097 *((int *)((uint8_t *)txr + ofs)) = conf;
2105 hn_check_iplen(const struct mbuf *m, int hoff)
2107 const struct ip *ip;
2108 int len, iphlen, iplen;
2109 const struct tcphdr *th;
2110 int thoff; /* TCP data offset */
2112 len = hoff + sizeof(struct ip);
2114 /* The packet must be at least the size of an IP header. */
2115 if (m->m_pkthdr.len < len)
2116 return IPPROTO_DONE;
2118 /* The fixed IP header must reside completely in the first mbuf. */
2120 return IPPROTO_DONE;
2122 ip = mtodo(m, hoff);
2124 /* Bound check the packet's stated IP header length. */
2125 iphlen = ip->ip_hl << 2;
2126 if (iphlen < sizeof(struct ip)) /* minimum header length */
2127 return IPPROTO_DONE;
2129 /* The full IP header must reside completely in the one mbuf. */
2130 if (m->m_len < hoff + iphlen)
2131 return IPPROTO_DONE;
2133 iplen = ntohs(ip->ip_len);
2136 * Check that the amount of data in the buffers is as
2137 * at least much as the IP header would have us expect.
2139 if (m->m_pkthdr.len < hoff + iplen)
2140 return IPPROTO_DONE;
2143 * Ignore IP fragments.
2145 if (ntohs(ip->ip_off) & (IP_OFFMASK | IP_MF))
2146 return IPPROTO_DONE;
2149 * The TCP/IP or UDP/IP header must be entirely contained within
2150 * the first fragment of a packet.
2154 if (iplen < iphlen + sizeof(struct tcphdr))
2155 return IPPROTO_DONE;
2156 if (m->m_len < hoff + iphlen + sizeof(struct tcphdr))
2157 return IPPROTO_DONE;
2158 th = (const struct tcphdr *)((const uint8_t *)ip + iphlen);
2159 thoff = th->th_off << 2;
2160 if (thoff < sizeof(struct tcphdr) || thoff + iphlen > iplen)
2161 return IPPROTO_DONE;
2162 if (m->m_len < hoff + iphlen + thoff)
2163 return IPPROTO_DONE;
2166 if (iplen < iphlen + sizeof(struct udphdr))
2167 return IPPROTO_DONE;
2168 if (m->m_len < hoff + iphlen + sizeof(struct udphdr))
2169 return IPPROTO_DONE;
2173 return IPPROTO_DONE;
2180 hn_create_rx_data(struct hn_softc *sc, int ring_cnt)
2182 struct sysctl_oid_list *child;
2183 struct sysctl_ctx_list *ctx;
2184 device_t dev = sc->hn_dev;
2185 #if defined(INET) || defined(INET6)
2186 #if __FreeBSD_version >= 1100095
2192 sc->hn_rx_ring_cnt = ring_cnt;
2193 sc->hn_rx_ring_inuse = sc->hn_rx_ring_cnt;
2195 sc->hn_rx_ring = malloc(sizeof(struct hn_rx_ring) * sc->hn_rx_ring_cnt,
2196 M_NETVSC, M_WAITOK | M_ZERO);
2198 #if defined(INET) || defined(INET6)
2199 #if __FreeBSD_version >= 1100095
2200 lroent_cnt = hn_lro_entry_count;
2201 if (lroent_cnt < TCP_LRO_ENTRIES)
2202 lroent_cnt = TCP_LRO_ENTRIES;
2203 device_printf(dev, "LRO: entry count %d\n", lroent_cnt);
2205 #endif /* INET || INET6 */
2207 ctx = device_get_sysctl_ctx(dev);
2208 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2210 /* Create dev.hn.UNIT.rx sysctl tree */
2211 sc->hn_rx_sysctl_tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "rx",
2212 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
2214 for (i = 0; i < sc->hn_rx_ring_cnt; ++i) {
2215 struct hn_rx_ring *rxr = &sc->hn_rx_ring[i];
2217 if (hn_trust_hosttcp)
2218 rxr->hn_trust_hcsum |= HN_TRUST_HCSUM_TCP;
2219 if (hn_trust_hostudp)
2220 rxr->hn_trust_hcsum |= HN_TRUST_HCSUM_UDP;
2221 if (hn_trust_hostip)
2222 rxr->hn_trust_hcsum |= HN_TRUST_HCSUM_IP;
2223 rxr->hn_ifp = sc->hn_ifp;
2224 if (i < sc->hn_tx_ring_cnt)
2225 rxr->hn_txr = &sc->hn_tx_ring[i];
2226 rxr->hn_rdbuf = malloc(NETVSC_PACKET_SIZE, M_NETVSC, M_WAITOK);
2232 #if defined(INET) || defined(INET6)
2233 #if __FreeBSD_version >= 1100095
2234 tcp_lro_init_args(&rxr->hn_lro, sc->hn_ifp, lroent_cnt,
2235 hn_lro_mbufq_depth);
2237 tcp_lro_init(&rxr->hn_lro);
2238 rxr->hn_lro.ifp = sc->hn_ifp;
2240 #if __FreeBSD_version >= 1100099
2241 rxr->hn_lro.lro_length_lim = HN_LRO_LENLIM_DEF;
2242 rxr->hn_lro.lro_ackcnt_lim = HN_LRO_ACKCNT_DEF;
2244 #endif /* INET || INET6 */
2246 if (sc->hn_rx_sysctl_tree != NULL) {
2250 * Create per RX ring sysctl tree:
2251 * dev.hn.UNIT.rx.RINGID
2253 snprintf(name, sizeof(name), "%d", i);
2254 rxr->hn_rx_sysctl_tree = SYSCTL_ADD_NODE(ctx,
2255 SYSCTL_CHILDREN(sc->hn_rx_sysctl_tree),
2256 OID_AUTO, name, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
2258 if (rxr->hn_rx_sysctl_tree != NULL) {
2259 SYSCTL_ADD_ULONG(ctx,
2260 SYSCTL_CHILDREN(rxr->hn_rx_sysctl_tree),
2261 OID_AUTO, "packets", CTLFLAG_RW,
2262 &rxr->hn_pkts, "# of packets received");
2263 SYSCTL_ADD_ULONG(ctx,
2264 SYSCTL_CHILDREN(rxr->hn_rx_sysctl_tree),
2265 OID_AUTO, "rss_pkts", CTLFLAG_RW,
2267 "# of packets w/ RSS info received");
2272 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_queued",
2273 CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2274 __offsetof(struct hn_rx_ring, hn_lro.lro_queued),
2275 hn_rx_stat_u64_sysctl, "LU", "LRO queued");
2276 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_flushed",
2277 CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2278 __offsetof(struct hn_rx_ring, hn_lro.lro_flushed),
2279 hn_rx_stat_u64_sysctl, "LU", "LRO flushed");
2280 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_tried",
2281 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2282 __offsetof(struct hn_rx_ring, hn_lro_tried),
2283 hn_rx_stat_ulong_sysctl, "LU", "# of LRO tries");
2284 #if __FreeBSD_version >= 1100099
2285 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_length_lim",
2286 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
2287 hn_lro_lenlim_sysctl, "IU",
2288 "Max # of data bytes to be aggregated by LRO");
2289 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_ackcnt_lim",
2290 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
2291 hn_lro_ackcnt_sysctl, "I",
2292 "Max # of ACKs to be aggregated by LRO");
2294 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hosttcp",
2295 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, HN_TRUST_HCSUM_TCP,
2296 hn_trust_hcsum_sysctl, "I",
2297 "Trust tcp segement verification on host side, "
2298 "when csum info is missing");
2299 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hostudp",
2300 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, HN_TRUST_HCSUM_UDP,
2301 hn_trust_hcsum_sysctl, "I",
2302 "Trust udp datagram verification on host side, "
2303 "when csum info is missing");
2304 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hostip",
2305 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, HN_TRUST_HCSUM_IP,
2306 hn_trust_hcsum_sysctl, "I",
2307 "Trust ip packet verification on host side, "
2308 "when csum info is missing");
2309 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_ip",
2310 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2311 __offsetof(struct hn_rx_ring, hn_csum_ip),
2312 hn_rx_stat_ulong_sysctl, "LU", "RXCSUM IP");
2313 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_tcp",
2314 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2315 __offsetof(struct hn_rx_ring, hn_csum_tcp),
2316 hn_rx_stat_ulong_sysctl, "LU", "RXCSUM TCP");
2317 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_udp",
2318 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2319 __offsetof(struct hn_rx_ring, hn_csum_udp),
2320 hn_rx_stat_ulong_sysctl, "LU", "RXCSUM UDP");
2321 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_trusted",
2322 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2323 __offsetof(struct hn_rx_ring, hn_csum_trusted),
2324 hn_rx_stat_ulong_sysctl, "LU",
2325 "# of packets that we trust host's csum verification");
2326 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "small_pkts",
2327 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2328 __offsetof(struct hn_rx_ring, hn_small_pkts),
2329 hn_rx_stat_ulong_sysctl, "LU", "# of small packets received");
2330 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rx_ring_cnt",
2331 CTLFLAG_RD, &sc->hn_rx_ring_cnt, 0, "# created RX rings");
2332 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rx_ring_inuse",
2333 CTLFLAG_RD, &sc->hn_rx_ring_inuse, 0, "# used RX rings");
2337 hn_destroy_rx_data(struct hn_softc *sc)
2341 if (sc->hn_rx_ring_cnt == 0)
2344 for (i = 0; i < sc->hn_rx_ring_cnt; ++i) {
2345 struct hn_rx_ring *rxr = &sc->hn_rx_ring[i];
2347 #if defined(INET) || defined(INET6)
2348 tcp_lro_free(&rxr->hn_lro);
2350 free(rxr->hn_rdbuf, M_NETVSC);
2352 free(sc->hn_rx_ring, M_NETVSC);
2353 sc->hn_rx_ring = NULL;
2355 sc->hn_rx_ring_cnt = 0;
2356 sc->hn_rx_ring_inuse = 0;
2360 hn_create_tx_ring(struct hn_softc *sc, int id)
2362 struct hn_tx_ring *txr = &sc->hn_tx_ring[id];
2363 device_t dev = sc->hn_dev;
2364 bus_dma_tag_t parent_dtag;
2369 txr->hn_tx_idx = id;
2371 #ifndef HN_USE_TXDESC_BUFRING
2372 mtx_init(&txr->hn_txlist_spin, "hn txlist", NULL, MTX_SPIN);
2374 mtx_init(&txr->hn_tx_lock, "hn tx", NULL, MTX_DEF);
2376 txr->hn_txdesc_cnt = HN_TX_DESC_CNT;
2377 txr->hn_txdesc = malloc(sizeof(struct hn_txdesc) * txr->hn_txdesc_cnt,
2378 M_NETVSC, M_WAITOK | M_ZERO);
2379 #ifndef HN_USE_TXDESC_BUFRING
2380 SLIST_INIT(&txr->hn_txlist);
2382 txr->hn_txdesc_br = buf_ring_alloc(txr->hn_txdesc_cnt, M_NETVSC,
2383 M_WAITOK, &txr->hn_tx_lock);
2386 txr->hn_tx_taskq = sc->hn_tx_taskq;
2388 if (hn_use_if_start) {
2389 txr->hn_txeof = hn_start_txeof;
2390 TASK_INIT(&txr->hn_tx_task, 0, hn_start_taskfunc, txr);
2391 TASK_INIT(&txr->hn_txeof_task, 0, hn_start_txeof_taskfunc, txr);
2395 txr->hn_txeof = hn_xmit_txeof;
2396 TASK_INIT(&txr->hn_tx_task, 0, hn_xmit_taskfunc, txr);
2397 TASK_INIT(&txr->hn_txeof_task, 0, hn_xmit_txeof_taskfunc, txr);
2399 br_depth = hn_get_txswq_depth(txr);
2400 txr->hn_mbuf_br = buf_ring_alloc(br_depth, M_NETVSC,
2401 M_WAITOK, &txr->hn_tx_lock);
2404 txr->hn_direct_tx_size = hn_direct_tx_size;
2405 version = VMBUS_GET_VERSION(device_get_parent(dev), dev);
2406 if (version >= VMBUS_VERSION_WIN8_1) {
2407 txr->hn_csum_assist = HN_CSUM_ASSIST;
2409 txr->hn_csum_assist = HN_CSUM_ASSIST_WIN8;
2411 device_printf(dev, "bus version %u.%u, "
2412 "no UDP checksum offloading\n",
2413 VMBUS_VERSION_MAJOR(version),
2414 VMBUS_VERSION_MINOR(version));
2419 * Always schedule transmission instead of trying to do direct
2420 * transmission. This one gives the best performance so far.
2422 txr->hn_sched_tx = 1;
2424 parent_dtag = bus_get_dma_tag(dev);
2426 /* DMA tag for RNDIS messages. */
2427 error = bus_dma_tag_create(parent_dtag, /* parent */
2428 HN_RNDIS_MSG_ALIGN, /* alignment */
2429 HN_RNDIS_MSG_BOUNDARY, /* boundary */
2430 BUS_SPACE_MAXADDR, /* lowaddr */
2431 BUS_SPACE_MAXADDR, /* highaddr */
2432 NULL, NULL, /* filter, filterarg */
2433 HN_RNDIS_MSG_LEN, /* maxsize */
2435 HN_RNDIS_MSG_LEN, /* maxsegsize */
2437 NULL, /* lockfunc */
2438 NULL, /* lockfuncarg */
2439 &txr->hn_tx_rndis_dtag);
2441 device_printf(dev, "failed to create rndis dmatag\n");
2445 /* DMA tag for data. */
2446 error = bus_dma_tag_create(parent_dtag, /* parent */
2448 HN_TX_DATA_BOUNDARY, /* boundary */
2449 BUS_SPACE_MAXADDR, /* lowaddr */
2450 BUS_SPACE_MAXADDR, /* highaddr */
2451 NULL, NULL, /* filter, filterarg */
2452 HN_TX_DATA_MAXSIZE, /* maxsize */
2453 HN_TX_DATA_SEGCNT_MAX, /* nsegments */
2454 HN_TX_DATA_SEGSIZE, /* maxsegsize */
2456 NULL, /* lockfunc */
2457 NULL, /* lockfuncarg */
2458 &txr->hn_tx_data_dtag);
2460 device_printf(dev, "failed to create data dmatag\n");
2464 for (i = 0; i < txr->hn_txdesc_cnt; ++i) {
2465 struct hn_txdesc *txd = &txr->hn_txdesc[i];
2470 * Allocate and load RNDIS messages.
2472 error = bus_dmamem_alloc(txr->hn_tx_rndis_dtag,
2473 (void **)&txd->rndis_msg,
2474 BUS_DMA_WAITOK | BUS_DMA_COHERENT,
2475 &txd->rndis_msg_dmap);
2478 "failed to allocate rndis_msg, %d\n", i);
2482 error = bus_dmamap_load(txr->hn_tx_rndis_dtag,
2483 txd->rndis_msg_dmap,
2484 txd->rndis_msg, HN_RNDIS_MSG_LEN,
2485 hyperv_dma_map_paddr, &txd->rndis_msg_paddr,
2489 "failed to load rndis_msg, %d\n", i);
2490 bus_dmamem_free(txr->hn_tx_rndis_dtag,
2491 txd->rndis_msg, txd->rndis_msg_dmap);
2495 /* DMA map for TX data. */
2496 error = bus_dmamap_create(txr->hn_tx_data_dtag, 0,
2500 "failed to allocate tx data dmamap\n");
2501 bus_dmamap_unload(txr->hn_tx_rndis_dtag,
2502 txd->rndis_msg_dmap);
2503 bus_dmamem_free(txr->hn_tx_rndis_dtag,
2504 txd->rndis_msg, txd->rndis_msg_dmap);
2508 /* All set, put it to list */
2509 txd->flags |= HN_TXD_FLAG_ONLIST;
2510 #ifndef HN_USE_TXDESC_BUFRING
2511 SLIST_INSERT_HEAD(&txr->hn_txlist, txd, link);
2513 buf_ring_enqueue(txr->hn_txdesc_br, txd);
2516 txr->hn_txdesc_avail = txr->hn_txdesc_cnt;
2518 if (sc->hn_tx_sysctl_tree != NULL) {
2519 struct sysctl_oid_list *child;
2520 struct sysctl_ctx_list *ctx;
2524 * Create per TX ring sysctl tree:
2525 * dev.hn.UNIT.tx.RINGID
2527 ctx = device_get_sysctl_ctx(dev);
2528 child = SYSCTL_CHILDREN(sc->hn_tx_sysctl_tree);
2530 snprintf(name, sizeof(name), "%d", id);
2531 txr->hn_tx_sysctl_tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO,
2532 name, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
2534 if (txr->hn_tx_sysctl_tree != NULL) {
2535 child = SYSCTL_CHILDREN(txr->hn_tx_sysctl_tree);
2537 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "txdesc_avail",
2538 CTLFLAG_RD, &txr->hn_txdesc_avail, 0,
2539 "# of available TX descs");
2540 if (!hn_use_if_start) {
2541 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "oactive",
2542 CTLFLAG_RD, &txr->hn_oactive, 0,
2545 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "packets",
2546 CTLFLAG_RW, &txr->hn_pkts,
2547 "# of packets transmitted");
2555 hn_txdesc_dmamap_destroy(struct hn_txdesc *txd)
2557 struct hn_tx_ring *txr = txd->txr;
2559 KASSERT(txd->m == NULL, ("still has mbuf installed"));
2560 KASSERT((txd->flags & HN_TXD_FLAG_DMAMAP) == 0, ("still dma mapped"));
2562 bus_dmamap_unload(txr->hn_tx_rndis_dtag, txd->rndis_msg_dmap);
2563 bus_dmamem_free(txr->hn_tx_rndis_dtag, txd->rndis_msg,
2564 txd->rndis_msg_dmap);
2565 bus_dmamap_destroy(txr->hn_tx_data_dtag, txd->data_dmap);
2569 hn_destroy_tx_ring(struct hn_tx_ring *txr)
2571 struct hn_txdesc *txd;
2573 if (txr->hn_txdesc == NULL)
2576 #ifndef HN_USE_TXDESC_BUFRING
2577 while ((txd = SLIST_FIRST(&txr->hn_txlist)) != NULL) {
2578 SLIST_REMOVE_HEAD(&txr->hn_txlist, link);
2579 hn_txdesc_dmamap_destroy(txd);
2582 mtx_lock(&txr->hn_tx_lock);
2583 while ((txd = buf_ring_dequeue_sc(txr->hn_txdesc_br)) != NULL)
2584 hn_txdesc_dmamap_destroy(txd);
2585 mtx_unlock(&txr->hn_tx_lock);
2588 if (txr->hn_tx_data_dtag != NULL)
2589 bus_dma_tag_destroy(txr->hn_tx_data_dtag);
2590 if (txr->hn_tx_rndis_dtag != NULL)
2591 bus_dma_tag_destroy(txr->hn_tx_rndis_dtag);
2593 #ifdef HN_USE_TXDESC_BUFRING
2594 buf_ring_free(txr->hn_txdesc_br, M_NETVSC);
2597 free(txr->hn_txdesc, M_NETVSC);
2598 txr->hn_txdesc = NULL;
2600 if (txr->hn_mbuf_br != NULL)
2601 buf_ring_free(txr->hn_mbuf_br, M_NETVSC);
2603 #ifndef HN_USE_TXDESC_BUFRING
2604 mtx_destroy(&txr->hn_txlist_spin);
2606 mtx_destroy(&txr->hn_tx_lock);
2610 hn_create_tx_data(struct hn_softc *sc, int ring_cnt)
2612 struct sysctl_oid_list *child;
2613 struct sysctl_ctx_list *ctx;
2616 sc->hn_tx_ring_cnt = ring_cnt;
2617 sc->hn_tx_ring_inuse = sc->hn_tx_ring_cnt;
2619 sc->hn_tx_ring = malloc(sizeof(struct hn_tx_ring) * sc->hn_tx_ring_cnt,
2620 M_NETVSC, M_WAITOK | M_ZERO);
2622 ctx = device_get_sysctl_ctx(sc->hn_dev);
2623 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->hn_dev));
2625 /* Create dev.hn.UNIT.tx sysctl tree */
2626 sc->hn_tx_sysctl_tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "tx",
2627 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
2629 for (i = 0; i < sc->hn_tx_ring_cnt; ++i) {
2632 error = hn_create_tx_ring(sc, i);
2637 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "no_txdescs",
2638 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2639 __offsetof(struct hn_tx_ring, hn_no_txdescs),
2640 hn_tx_stat_ulong_sysctl, "LU", "# of times short of TX descs");
2641 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "send_failed",
2642 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2643 __offsetof(struct hn_tx_ring, hn_send_failed),
2644 hn_tx_stat_ulong_sysctl, "LU", "# of hyper-v sending failure");
2645 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txdma_failed",
2646 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2647 __offsetof(struct hn_tx_ring, hn_txdma_failed),
2648 hn_tx_stat_ulong_sysctl, "LU", "# of TX DMA failure");
2649 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_collapsed",
2650 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2651 __offsetof(struct hn_tx_ring, hn_tx_collapsed),
2652 hn_tx_stat_ulong_sysctl, "LU", "# of TX mbuf collapsed");
2653 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_chimney",
2654 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2655 __offsetof(struct hn_tx_ring, hn_tx_chimney),
2656 hn_tx_stat_ulong_sysctl, "LU", "# of chimney send");
2657 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_chimney_tried",
2658 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2659 __offsetof(struct hn_tx_ring, hn_tx_chimney_tried),
2660 hn_tx_stat_ulong_sysctl, "LU", "# of chimney send tries");
2661 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "txdesc_cnt",
2662 CTLFLAG_RD, &sc->hn_tx_ring[0].hn_txdesc_cnt, 0,
2663 "# of total TX descs");
2664 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_chimney_max",
2665 CTLFLAG_RD, &sc->hn_tx_chimney_max, 0,
2666 "Chimney send packet size upper boundary");
2667 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_chimney_size",
2668 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
2669 hn_tx_chimney_size_sysctl,
2670 "I", "Chimney send packet size limit");
2671 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "direct_tx_size",
2672 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2673 __offsetof(struct hn_tx_ring, hn_direct_tx_size),
2674 hn_tx_conf_int_sysctl, "I",
2675 "Size of the packet for direct transmission");
2676 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "sched_tx",
2677 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2678 __offsetof(struct hn_tx_ring, hn_sched_tx),
2679 hn_tx_conf_int_sysctl, "I",
2680 "Always schedule transmission "
2681 "instead of doing direct transmission");
2682 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_ring_cnt",
2683 CTLFLAG_RD, &sc->hn_tx_ring_cnt, 0, "# created TX rings");
2684 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_ring_inuse",
2685 CTLFLAG_RD, &sc->hn_tx_ring_inuse, 0, "# used TX rings");
2691 hn_set_tx_chimney_size(struct hn_softc *sc, int chimney_size)
2696 for (i = 0; i < sc->hn_tx_ring_inuse; ++i)
2697 sc->hn_tx_ring[i].hn_tx_chimney_size = chimney_size;
2702 hn_destroy_tx_data(struct hn_softc *sc)
2706 if (sc->hn_tx_ring_cnt == 0)
2709 for (i = 0; i < sc->hn_tx_ring_cnt; ++i)
2710 hn_destroy_tx_ring(&sc->hn_tx_ring[i]);
2712 free(sc->hn_tx_ring, M_NETVSC);
2713 sc->hn_tx_ring = NULL;
2715 sc->hn_tx_ring_cnt = 0;
2716 sc->hn_tx_ring_inuse = 0;
2720 hn_start_taskfunc(void *xtxr, int pending __unused)
2722 struct hn_tx_ring *txr = xtxr;
2724 mtx_lock(&txr->hn_tx_lock);
2725 hn_start_locked(txr, 0);
2726 mtx_unlock(&txr->hn_tx_lock);
2730 hn_start_txeof_taskfunc(void *xtxr, int pending __unused)
2732 struct hn_tx_ring *txr = xtxr;
2734 mtx_lock(&txr->hn_tx_lock);
2735 atomic_clear_int(&txr->hn_sc->hn_ifp->if_drv_flags, IFF_DRV_OACTIVE);
2736 hn_start_locked(txr, 0);
2737 mtx_unlock(&txr->hn_tx_lock);
2741 hn_stop_tx_tasks(struct hn_softc *sc)
2745 for (i = 0; i < sc->hn_tx_ring_inuse; ++i) {
2746 struct hn_tx_ring *txr = &sc->hn_tx_ring[i];
2748 taskqueue_drain(txr->hn_tx_taskq, &txr->hn_tx_task);
2749 taskqueue_drain(txr->hn_tx_taskq, &txr->hn_txeof_task);
2754 hn_xmit(struct hn_tx_ring *txr, int len)
2756 struct hn_softc *sc = txr->hn_sc;
2757 struct ifnet *ifp = sc->hn_ifp;
2758 struct mbuf *m_head;
2760 mtx_assert(&txr->hn_tx_lock, MA_OWNED);
2761 KASSERT(hn_use_if_start == 0,
2762 ("hn_xmit is called, when if_start is enabled"));
2764 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || txr->hn_oactive)
2767 while ((m_head = drbr_peek(ifp, txr->hn_mbuf_br)) != NULL) {
2768 struct hn_txdesc *txd;
2771 if (len > 0 && m_head->m_pkthdr.len > len) {
2773 * This sending could be time consuming; let callers
2774 * dispatch this packet sending (and sending of any
2775 * following up packets) to tx taskqueue.
2777 drbr_putback(ifp, txr->hn_mbuf_br, m_head);
2781 txd = hn_txdesc_get(txr);
2783 txr->hn_no_txdescs++;
2784 drbr_putback(ifp, txr->hn_mbuf_br, m_head);
2785 txr->hn_oactive = 1;
2789 error = hn_encap(txr, txd, &m_head);
2791 /* Both txd and m_head are freed; discard */
2792 drbr_advance(ifp, txr->hn_mbuf_br);
2796 error = hn_send_pkt(ifp, txr, txd);
2797 if (__predict_false(error)) {
2798 /* txd is freed, but m_head is not */
2799 drbr_putback(ifp, txr->hn_mbuf_br, m_head);
2800 txr->hn_oactive = 1;
2805 drbr_advance(ifp, txr->hn_mbuf_br);
2811 hn_transmit(struct ifnet *ifp, struct mbuf *m)
2813 struct hn_softc *sc = ifp->if_softc;
2814 struct hn_tx_ring *txr;
2818 * Select the TX ring based on flowid
2820 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
2821 idx = m->m_pkthdr.flowid % sc->hn_tx_ring_inuse;
2822 txr = &sc->hn_tx_ring[idx];
2824 error = drbr_enqueue(ifp, txr->hn_mbuf_br, m);
2826 if_inc_counter(ifp, IFCOUNTER_OQDROPS, 1);
2830 if (txr->hn_oactive)
2833 if (txr->hn_sched_tx)
2836 if (mtx_trylock(&txr->hn_tx_lock)) {
2839 sched = hn_xmit(txr, txr->hn_direct_tx_size);
2840 mtx_unlock(&txr->hn_tx_lock);
2845 taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_tx_task);
2850 hn_xmit_qflush(struct ifnet *ifp)
2852 struct hn_softc *sc = ifp->if_softc;
2855 for (i = 0; i < sc->hn_tx_ring_inuse; ++i) {
2856 struct hn_tx_ring *txr = &sc->hn_tx_ring[i];
2859 mtx_lock(&txr->hn_tx_lock);
2860 while ((m = buf_ring_dequeue_sc(txr->hn_mbuf_br)) != NULL)
2862 mtx_unlock(&txr->hn_tx_lock);
2868 hn_xmit_txeof(struct hn_tx_ring *txr)
2871 if (txr->hn_sched_tx)
2874 if (mtx_trylock(&txr->hn_tx_lock)) {
2877 txr->hn_oactive = 0;
2878 sched = hn_xmit(txr, txr->hn_direct_tx_size);
2879 mtx_unlock(&txr->hn_tx_lock);
2881 taskqueue_enqueue(txr->hn_tx_taskq,
2887 * Release the oactive earlier, with the hope, that
2888 * others could catch up. The task will clear the
2889 * oactive again with the hn_tx_lock to avoid possible
2892 txr->hn_oactive = 0;
2893 taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_txeof_task);
2898 hn_xmit_taskfunc(void *xtxr, int pending __unused)
2900 struct hn_tx_ring *txr = xtxr;
2902 mtx_lock(&txr->hn_tx_lock);
2904 mtx_unlock(&txr->hn_tx_lock);
2908 hn_xmit_txeof_taskfunc(void *xtxr, int pending __unused)
2910 struct hn_tx_ring *txr = xtxr;
2912 mtx_lock(&txr->hn_tx_lock);
2913 txr->hn_oactive = 0;
2915 mtx_unlock(&txr->hn_tx_lock);
2919 hn_channel_attach(struct hn_softc *sc, struct vmbus_channel *chan)
2921 struct hn_rx_ring *rxr;
2924 idx = vmbus_chan_subidx(chan);
2926 KASSERT(idx >= 0 && idx < sc->hn_rx_ring_inuse,
2927 ("invalid channel index %d, should > 0 && < %d",
2928 idx, sc->hn_rx_ring_inuse));
2929 rxr = &sc->hn_rx_ring[idx];
2930 KASSERT((rxr->hn_rx_flags & HN_RX_FLAG_ATTACHED) == 0,
2931 ("RX ring %d already attached", idx));
2932 rxr->hn_rx_flags |= HN_RX_FLAG_ATTACHED;
2935 if_printf(sc->hn_ifp, "link RX ring %d to channel%u\n",
2936 idx, vmbus_chan_id(chan));
2939 if (idx < sc->hn_tx_ring_inuse) {
2940 struct hn_tx_ring *txr = &sc->hn_tx_ring[idx];
2942 KASSERT((txr->hn_tx_flags & HN_TX_FLAG_ATTACHED) == 0,
2943 ("TX ring %d already attached", idx));
2944 txr->hn_tx_flags |= HN_TX_FLAG_ATTACHED;
2946 txr->hn_chan = chan;
2948 if_printf(sc->hn_ifp, "link TX ring %d to channel%u\n",
2949 idx, vmbus_chan_id(chan));
2953 /* Bind channel to a proper CPU */
2954 vmbus_chan_cpu_set(chan, (sc->hn_cpu + idx) % mp_ncpus);
2958 hn_subchan_attach(struct hn_softc *sc, struct vmbus_channel *chan)
2961 KASSERT(!vmbus_chan_is_primary(chan),
2962 ("subchannel callback on primary channel"));
2963 hn_channel_attach(sc, chan);
2967 hn_subchan_setup(struct hn_softc *sc)
2969 struct vmbus_channel **subchans;
2970 int subchan_cnt = sc->hn_rx_ring_inuse - 1;
2973 /* Wait for sub-channels setup to complete. */
2974 subchans = vmbus_subchan_get(sc->hn_prichan, subchan_cnt);
2976 /* Attach the sub-channels. */
2977 for (i = 0; i < subchan_cnt; ++i) {
2978 struct vmbus_channel *subchan = subchans[i];
2980 /* NOTE: Calling order is critical. */
2981 hn_subchan_attach(sc, subchan);
2982 hv_nv_subchan_attach(subchan,
2983 &sc->hn_rx_ring[vmbus_chan_subidx(subchan)]);
2986 /* Release the sub-channels */
2987 vmbus_subchan_rel(subchans, subchan_cnt);
2988 if_printf(sc->hn_ifp, "%d sub-channels setup done\n", subchan_cnt);
2992 hn_tx_taskq_create(void *arg __unused)
2994 if (!hn_share_tx_taskq)
2997 hn_tx_taskq = taskqueue_create("hn_tx", M_WAITOK,
2998 taskqueue_thread_enqueue, &hn_tx_taskq);
2999 if (hn_bind_tx_taskq >= 0) {
3000 int cpu = hn_bind_tx_taskq;
3003 if (cpu > mp_ncpus - 1)
3005 CPU_SETOF(cpu, &cpu_set);
3006 taskqueue_start_threads_cpuset(&hn_tx_taskq, 1, PI_NET,
3009 taskqueue_start_threads(&hn_tx_taskq, 1, PI_NET, "hn tx");
3012 SYSINIT(hn_txtq_create, SI_SUB_DRIVERS, SI_ORDER_FIRST,
3013 hn_tx_taskq_create, NULL);
3016 hn_tx_taskq_destroy(void *arg __unused)
3018 if (hn_tx_taskq != NULL)
3019 taskqueue_free(hn_tx_taskq);
3021 SYSUNINIT(hn_txtq_destroy, SI_SUB_DRIVERS, SI_ORDER_FIRST,
3022 hn_tx_taskq_destroy, NULL);
3024 static device_method_t netvsc_methods[] = {
3025 /* Device interface */
3026 DEVMETHOD(device_probe, netvsc_probe),
3027 DEVMETHOD(device_attach, netvsc_attach),
3028 DEVMETHOD(device_detach, netvsc_detach),
3029 DEVMETHOD(device_shutdown, netvsc_shutdown),
3034 static driver_t netvsc_driver = {
3040 static devclass_t netvsc_devclass;
3042 DRIVER_MODULE(hn, vmbus, netvsc_driver, netvsc_devclass, 0, 0);
3043 MODULE_VERSION(hn, 1);
3044 MODULE_DEPEND(hn, vmbus, 1, 1, 1);