2 * Copyright (c) 2010-2012 Citrix Inc.
3 * Copyright (c) 2009-2012,2016 Microsoft Corp.
4 * Copyright (c) 2012 NetApp Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * Copyright (c) 2004-2006 Kip Macy
31 * All rights reserved.
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 #include <sys/cdefs.h>
56 __FBSDID("$FreeBSD$");
58 #include "opt_inet6.h"
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/sockio.h>
65 #include <sys/malloc.h>
66 #include <sys/module.h>
67 #include <sys/kernel.h>
68 #include <sys/socket.h>
70 #include <sys/queue.h>
74 #include <sys/sysctl.h>
75 #include <sys/buf_ring.h>
78 #include <net/if_arp.h>
79 #include <net/ethernet.h>
80 #include <net/if_dl.h>
81 #include <net/if_media.h>
85 #include <net/if_types.h>
86 #include <net/if_vlan_var.h>
89 #include <netinet/in_systm.h>
90 #include <netinet/in.h>
91 #include <netinet/ip.h>
92 #include <netinet/if_ether.h>
93 #include <netinet/tcp.h>
94 #include <netinet/udp.h>
95 #include <netinet/ip6.h>
98 #include <vm/vm_param.h>
99 #include <vm/vm_kern.h>
102 #include <machine/bus.h>
103 #include <machine/resource.h>
104 #include <machine/frame.h>
105 #include <machine/vmparam.h>
108 #include <sys/rman.h>
109 #include <sys/mutex.h>
110 #include <sys/errno.h>
111 #include <sys/types.h>
112 #include <machine/atomic.h>
114 #include <machine/intr_machdep.h>
116 #include <machine/in_cksum.h>
118 #include <dev/hyperv/include/hyperv.h>
119 #include <dev/hyperv/include/hyperv_busdma.h>
120 #include <dev/hyperv/include/vmbus_xact.h>
122 #include <dev/hyperv/netvsc/hv_net_vsc.h>
123 #include <dev/hyperv/netvsc/hv_rndis.h>
124 #include <dev/hyperv/netvsc/hv_rndis_filter.h>
125 #include <dev/hyperv/netvsc/ndis.h>
127 #include "vmbus_if.h"
129 /* Short for Hyper-V network interface */
130 #define NETVSC_DEVNAME "hn"
133 * It looks like offset 0 of buf is reserved to hold the softc pointer.
134 * The sc pointer evidently not needed, and is not presently populated.
135 * The packet offset is where the netvsc_packet starts in the buffer.
137 #define HV_NV_SC_PTR_OFFSET_IN_BUF 0
138 #define HV_NV_PACKET_OFFSET_IN_BUF 16
140 /* YYY should get it from the underlying channel */
141 #define HN_TX_DESC_CNT 512
143 #define HN_LROENT_CNT_DEF 128
145 #define HN_RING_CNT_DEF_MAX 8
147 #define HN_RNDIS_MSG_LEN \
148 (sizeof(rndis_msg) + \
149 RNDIS_HASHVAL_PPI_SIZE + \
150 RNDIS_VLAN_PPI_SIZE + \
151 RNDIS_TSO_PPI_SIZE + \
153 #define HN_RNDIS_MSG_BOUNDARY PAGE_SIZE
154 #define HN_RNDIS_MSG_ALIGN CACHE_LINE_SIZE
156 #define HN_TX_DATA_BOUNDARY PAGE_SIZE
157 #define HN_TX_DATA_MAXSIZE IP_MAXPACKET
158 #define HN_TX_DATA_SEGSIZE PAGE_SIZE
159 /* -1 for RNDIS packet message */
160 #define HN_TX_DATA_SEGCNT_MAX (NETVSC_PACKET_MAXPAGE - 1)
162 #define HN_DIRECT_TX_SIZE_DEF 128
164 #define HN_EARLY_TXEOF_THRESH 8
167 #ifndef HN_USE_TXDESC_BUFRING
168 SLIST_ENTRY(hn_txdesc) link;
171 struct hn_tx_ring *txr;
173 uint32_t flags; /* HN_TXD_FLAG_ */
174 struct hn_send_ctx send_ctx;
176 bus_dmamap_t data_dmap;
178 bus_addr_t rndis_msg_paddr;
179 rndis_msg *rndis_msg;
180 bus_dmamap_t rndis_msg_dmap;
183 #define HN_TXD_FLAG_ONLIST 0x1
184 #define HN_TXD_FLAG_DMAMAP 0x2
187 * Only enable UDP checksum offloading when it is on 2012R2 or
188 * later. UDP checksum offloading doesn't work on earlier
191 #define HN_CSUM_ASSIST_WIN8 (CSUM_IP | CSUM_TCP)
192 #define HN_CSUM_ASSIST (CSUM_IP | CSUM_UDP | CSUM_TCP)
194 #define HN_LRO_LENLIM_MULTIRX_DEF (12 * ETHERMTU)
195 #define HN_LRO_LENLIM_DEF (25 * ETHERMTU)
196 /* YYY 2*MTU is a bit rough, but should be good enough. */
197 #define HN_LRO_LENLIM_MIN(ifp) (2 * (ifp)->if_mtu)
199 #define HN_LRO_ACKCNT_DEF 1
202 * Be aware that this sleepable mutex will exhibit WITNESS errors when
203 * certain TCP and ARP code paths are taken. This appears to be a
204 * well-known condition, as all other drivers checked use a sleeping
205 * mutex to protect their transmit paths.
206 * Also Be aware that mutexes do not play well with semaphores, and there
207 * is a conflicting semaphore in a certain channel code path.
209 #define NV_LOCK_INIT(_sc, _name) \
210 mtx_init(&(_sc)->hn_lock, _name, MTX_NETWORK_LOCK, MTX_DEF)
211 #define NV_LOCK(_sc) mtx_lock(&(_sc)->hn_lock)
212 #define NV_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->hn_lock, MA_OWNED)
213 #define NV_UNLOCK(_sc) mtx_unlock(&(_sc)->hn_lock)
214 #define NV_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->hn_lock)
221 int hv_promisc_mode = 0; /* normal mode by default */
223 SYSCTL_NODE(_hw, OID_AUTO, hn, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
224 "Hyper-V network interface");
226 /* Trust tcp segements verification on host side. */
227 static int hn_trust_hosttcp = 1;
228 SYSCTL_INT(_hw_hn, OID_AUTO, trust_hosttcp, CTLFLAG_RDTUN,
229 &hn_trust_hosttcp, 0,
230 "Trust tcp segement verification on host side, "
231 "when csum info is missing (global setting)");
233 /* Trust udp datagrams verification on host side. */
234 static int hn_trust_hostudp = 1;
235 SYSCTL_INT(_hw_hn, OID_AUTO, trust_hostudp, CTLFLAG_RDTUN,
236 &hn_trust_hostudp, 0,
237 "Trust udp datagram verification on host side, "
238 "when csum info is missing (global setting)");
240 /* Trust ip packets verification on host side. */
241 static int hn_trust_hostip = 1;
242 SYSCTL_INT(_hw_hn, OID_AUTO, trust_hostip, CTLFLAG_RDTUN,
244 "Trust ip packet verification on host side, "
245 "when csum info is missing (global setting)");
247 /* Limit TSO burst size */
248 static int hn_tso_maxlen = 0;
249 SYSCTL_INT(_hw_hn, OID_AUTO, tso_maxlen, CTLFLAG_RDTUN,
250 &hn_tso_maxlen, 0, "TSO burst limit");
252 /* Limit chimney send size */
253 static int hn_tx_chimney_size = 0;
254 SYSCTL_INT(_hw_hn, OID_AUTO, tx_chimney_size, CTLFLAG_RDTUN,
255 &hn_tx_chimney_size, 0, "Chimney send packet size limit");
257 /* Limit the size of packet for direct transmission */
258 static int hn_direct_tx_size = HN_DIRECT_TX_SIZE_DEF;
259 SYSCTL_INT(_hw_hn, OID_AUTO, direct_tx_size, CTLFLAG_RDTUN,
260 &hn_direct_tx_size, 0, "Size of the packet for direct transmission");
262 #if defined(INET) || defined(INET6)
263 #if __FreeBSD_version >= 1100095
264 static int hn_lro_entry_count = HN_LROENT_CNT_DEF;
265 SYSCTL_INT(_hw_hn, OID_AUTO, lro_entry_count, CTLFLAG_RDTUN,
266 &hn_lro_entry_count, 0, "LRO entry count");
270 static int hn_share_tx_taskq = 0;
271 SYSCTL_INT(_hw_hn, OID_AUTO, share_tx_taskq, CTLFLAG_RDTUN,
272 &hn_share_tx_taskq, 0, "Enable shared TX taskqueue");
274 static struct taskqueue *hn_tx_taskq;
276 #ifndef HN_USE_TXDESC_BUFRING
277 static int hn_use_txdesc_bufring = 0;
279 static int hn_use_txdesc_bufring = 1;
281 SYSCTL_INT(_hw_hn, OID_AUTO, use_txdesc_bufring, CTLFLAG_RD,
282 &hn_use_txdesc_bufring, 0, "Use buf_ring for TX descriptors");
284 static int hn_bind_tx_taskq = -1;
285 SYSCTL_INT(_hw_hn, OID_AUTO, bind_tx_taskq, CTLFLAG_RDTUN,
286 &hn_bind_tx_taskq, 0, "Bind TX taskqueue to the specified cpu");
288 static int hn_use_if_start = 0;
289 SYSCTL_INT(_hw_hn, OID_AUTO, use_if_start, CTLFLAG_RDTUN,
290 &hn_use_if_start, 0, "Use if_start TX method");
292 static int hn_chan_cnt = 0;
293 SYSCTL_INT(_hw_hn, OID_AUTO, chan_cnt, CTLFLAG_RDTUN,
295 "# of channels to use; each channel has one RX ring and one TX ring");
297 static int hn_tx_ring_cnt = 0;
298 SYSCTL_INT(_hw_hn, OID_AUTO, tx_ring_cnt, CTLFLAG_RDTUN,
299 &hn_tx_ring_cnt, 0, "# of TX rings to use");
301 static int hn_tx_swq_depth = 0;
302 SYSCTL_INT(_hw_hn, OID_AUTO, tx_swq_depth, CTLFLAG_RDTUN,
303 &hn_tx_swq_depth, 0, "Depth of IFQ or BUFRING");
305 #if __FreeBSD_version >= 1100095
306 static u_int hn_lro_mbufq_depth = 0;
307 SYSCTL_UINT(_hw_hn, OID_AUTO, lro_mbufq_depth, CTLFLAG_RDTUN,
308 &hn_lro_mbufq_depth, 0, "Depth of LRO mbuf queue");
311 static u_int hn_cpu_index;
314 * Forward declarations
316 static void hn_stop(hn_softc_t *sc);
317 static void hn_ifinit_locked(hn_softc_t *sc);
318 static void hn_ifinit(void *xsc);
319 static int hn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
320 static int hn_start_locked(struct hn_tx_ring *txr, int len);
321 static void hn_start(struct ifnet *ifp);
322 static void hn_start_txeof(struct hn_tx_ring *);
323 static int hn_ifmedia_upd(struct ifnet *ifp);
324 static void hn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
325 #if __FreeBSD_version >= 1100099
326 static int hn_lro_lenlim_sysctl(SYSCTL_HANDLER_ARGS);
327 static int hn_lro_ackcnt_sysctl(SYSCTL_HANDLER_ARGS);
329 static int hn_trust_hcsum_sysctl(SYSCTL_HANDLER_ARGS);
330 static int hn_chim_size_sysctl(SYSCTL_HANDLER_ARGS);
331 #if __FreeBSD_version < 1100095
332 static int hn_rx_stat_int_sysctl(SYSCTL_HANDLER_ARGS);
334 static int hn_rx_stat_u64_sysctl(SYSCTL_HANDLER_ARGS);
336 static int hn_rx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS);
337 static int hn_tx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS);
338 static int hn_tx_conf_int_sysctl(SYSCTL_HANDLER_ARGS);
339 static int hn_ndis_version_sysctl(SYSCTL_HANDLER_ARGS);
340 static int hn_check_iplen(const struct mbuf *, int);
341 static int hn_create_tx_ring(struct hn_softc *, int);
342 static void hn_destroy_tx_ring(struct hn_tx_ring *);
343 static int hn_create_tx_data(struct hn_softc *, int);
344 static void hn_destroy_tx_data(struct hn_softc *);
345 static void hn_start_taskfunc(void *, int);
346 static void hn_start_txeof_taskfunc(void *, int);
347 static void hn_stop_tx_tasks(struct hn_softc *);
348 static int hn_encap(struct hn_tx_ring *, struct hn_txdesc *, struct mbuf **);
349 static int hn_create_rx_data(struct hn_softc *sc, int);
350 static void hn_destroy_rx_data(struct hn_softc *sc);
351 static void hn_set_chim_size(struct hn_softc *, int);
352 static void hn_channel_attach(struct hn_softc *, struct vmbus_channel *);
353 static void hn_subchan_attach(struct hn_softc *, struct vmbus_channel *);
354 static void hn_subchan_setup(struct hn_softc *);
356 static int hn_transmit(struct ifnet *, struct mbuf *);
357 static void hn_xmit_qflush(struct ifnet *);
358 static int hn_xmit(struct hn_tx_ring *, int);
359 static void hn_xmit_txeof(struct hn_tx_ring *);
360 static void hn_xmit_taskfunc(void *, int);
361 static void hn_xmit_txeof_taskfunc(void *, int);
363 #if __FreeBSD_version >= 1100099
365 hn_set_lro_lenlim(struct hn_softc *sc, int lenlim)
369 for (i = 0; i < sc->hn_rx_ring_inuse; ++i)
370 sc->hn_rx_ring[i].hn_lro.lro_length_lim = lenlim;
375 hn_get_txswq_depth(const struct hn_tx_ring *txr)
378 KASSERT(txr->hn_txdesc_cnt > 0, ("tx ring is not setup yet"));
379 if (hn_tx_swq_depth < txr->hn_txdesc_cnt)
380 return txr->hn_txdesc_cnt;
381 return hn_tx_swq_depth;
385 hn_ifmedia_upd(struct ifnet *ifp __unused)
392 hn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
394 struct hn_softc *sc = ifp->if_softc;
396 ifmr->ifm_status = IFM_AVALID;
397 ifmr->ifm_active = IFM_ETHER;
399 if (!sc->hn_carrier) {
400 ifmr->ifm_active |= IFM_NONE;
403 ifmr->ifm_status |= IFM_ACTIVE;
404 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
407 /* {F8615163-DF3E-46c5-913F-F2D2F965ED0E} */
408 static const struct hyperv_guid g_net_vsc_device_type = {
409 .hv_guid = {0x63, 0x51, 0x61, 0xF8, 0x3E, 0xDF, 0xc5, 0x46,
410 0x91, 0x3F, 0xF2, 0xD2, 0xF9, 0x65, 0xED, 0x0E}
414 * Standard probe entry point.
418 netvsc_probe(device_t dev)
420 if (VMBUS_PROBE_GUID(device_get_parent(dev), dev,
421 &g_net_vsc_device_type) == 0) {
422 device_set_desc(dev, "Hyper-V Network Interface");
423 return BUS_PROBE_DEFAULT;
429 hn_cpuset_setthread_task(void *xmask, int pending __unused)
431 cpuset_t *mask = xmask;
434 error = cpuset_setthread(curthread->td_tid, mask);
436 panic("curthread=%ju: can't pin; error=%d",
437 (uintmax_t)curthread->td_tid, error);
442 * Standard attach entry point.
444 * Called when the driver is loaded. It allocates needed resources,
445 * and initializes the "hardware" and software.
448 netvsc_attach(device_t dev)
450 struct sysctl_oid_list *child;
451 struct sysctl_ctx_list *ctx;
452 netvsc_device_info device_info;
454 int unit = device_get_unit(dev);
455 struct ifnet *ifp = NULL;
456 int error, ring_cnt, tx_ring_cnt;
459 sc = device_get_softc(dev);
463 sc->hn_prichan = vmbus_get_channel(dev);
465 if (hn_tx_taskq == NULL) {
466 sc->hn_tx_taskq = taskqueue_create("hn_tx", M_WAITOK,
467 taskqueue_thread_enqueue, &sc->hn_tx_taskq);
468 taskqueue_start_threads(&sc->hn_tx_taskq, 1, PI_NET, "%s tx",
469 device_get_nameunit(dev));
470 if (hn_bind_tx_taskq >= 0) {
471 int cpu = hn_bind_tx_taskq;
472 struct task cpuset_task;
475 if (cpu > mp_ncpus - 1)
477 CPU_SETOF(cpu, &cpu_set);
478 TASK_INIT(&cpuset_task, 0, hn_cpuset_setthread_task,
480 taskqueue_enqueue(sc->hn_tx_taskq, &cpuset_task);
481 taskqueue_drain(sc->hn_tx_taskq, &cpuset_task);
484 sc->hn_tx_taskq = hn_tx_taskq;
486 NV_LOCK_INIT(sc, "NetVSCLock");
488 ifp = sc->hn_ifp = sc->arpcom.ac_ifp = if_alloc(IFT_ETHER);
490 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
493 * Figure out the # of RX rings (ring_cnt) and the # of TX rings
494 * to use (tx_ring_cnt).
497 * The # of RX rings to use is same as the # of channels to use.
499 ring_cnt = hn_chan_cnt;
503 if (ring_cnt > HN_RING_CNT_DEF_MAX)
504 ring_cnt = HN_RING_CNT_DEF_MAX;
505 } else if (ring_cnt > mp_ncpus) {
509 tx_ring_cnt = hn_tx_ring_cnt;
510 if (tx_ring_cnt <= 0 || tx_ring_cnt > ring_cnt)
511 tx_ring_cnt = ring_cnt;
512 if (hn_use_if_start) {
513 /* ifnet.if_start only needs one TX ring. */
518 * Set the leader CPU for channels.
520 sc->hn_cpu = atomic_fetchadd_int(&hn_cpu_index, ring_cnt) % mp_ncpus;
522 error = hn_create_tx_data(sc, tx_ring_cnt);
525 error = hn_create_rx_data(sc, ring_cnt);
530 * Associate the first TX/RX ring w/ the primary channel.
532 hn_channel_attach(sc, sc->hn_prichan);
534 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
535 ifp->if_ioctl = hn_ioctl;
536 ifp->if_init = hn_ifinit;
537 /* needed by hv_rf_on_device_add() code */
538 ifp->if_mtu = ETHERMTU;
539 if (hn_use_if_start) {
540 int qdepth = hn_get_txswq_depth(&sc->hn_tx_ring[0]);
542 ifp->if_start = hn_start;
543 IFQ_SET_MAXLEN(&ifp->if_snd, qdepth);
544 ifp->if_snd.ifq_drv_maxlen = qdepth - 1;
545 IFQ_SET_READY(&ifp->if_snd);
547 ifp->if_transmit = hn_transmit;
548 ifp->if_qflush = hn_xmit_qflush;
551 ifmedia_init(&sc->hn_media, 0, hn_ifmedia_upd, hn_ifmedia_sts);
552 ifmedia_add(&sc->hn_media, IFM_ETHER | IFM_AUTO, 0, NULL);
553 ifmedia_set(&sc->hn_media, IFM_ETHER | IFM_AUTO);
554 /* XXX ifmedia_set really should do this for us */
555 sc->hn_media.ifm_media = sc->hn_media.ifm_cur->ifm_media;
558 * Tell upper layers that we support full VLAN capability.
560 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
561 ifp->if_capabilities |=
562 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_TSO |
565 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_TSO |
567 ifp->if_hwassist = sc->hn_tx_ring[0].hn_csum_assist | CSUM_TSO;
569 sc->hn_xact = vmbus_xact_ctx_create(bus_get_dma_tag(dev),
570 HN_XACT_REQ_SIZE, HN_XACT_RESP_SIZE, 0);
571 if (sc->hn_xact == NULL)
574 error = hv_rf_on_device_add(sc, &device_info, &ring_cnt,
578 KASSERT(ring_cnt > 0 && ring_cnt <= sc->hn_rx_ring_inuse,
579 ("invalid channel count %d, should be less than %d",
580 ring_cnt, sc->hn_rx_ring_inuse));
583 * Set the # of TX/RX rings that could be used according to
584 * the # of channels that host offered.
586 if (sc->hn_tx_ring_inuse > ring_cnt)
587 sc->hn_tx_ring_inuse = ring_cnt;
588 sc->hn_rx_ring_inuse = ring_cnt;
589 device_printf(dev, "%d TX ring, %d RX ring\n",
590 sc->hn_tx_ring_inuse, sc->hn_rx_ring_inuse);
592 if (sc->hn_rx_ring_inuse > 1)
593 hn_subchan_setup(sc);
595 #if __FreeBSD_version >= 1100099
596 if (sc->hn_rx_ring_inuse > 1) {
598 * Reduce TCP segment aggregation limit for multiple
599 * RX rings to increase ACK timeliness.
601 hn_set_lro_lenlim(sc, HN_LRO_LENLIM_MULTIRX_DEF);
605 if (device_info.link_state == NDIS_MEDIA_STATE_CONNECTED) {
609 tso_maxlen = hn_tso_maxlen;
610 if (tso_maxlen <= 0 || tso_maxlen > IP_MAXPACKET)
611 tso_maxlen = IP_MAXPACKET;
613 ifp->if_hw_tsomaxsegcount = HN_TX_DATA_SEGCNT_MAX;
614 ifp->if_hw_tsomaxsegsize = PAGE_SIZE;
615 ifp->if_hw_tsomax = tso_maxlen -
616 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
618 ether_ifattach(ifp, device_info.mac_addr);
620 if_printf(ifp, "TSO: %u/%u/%u\n", ifp->if_hw_tsomax,
621 ifp->if_hw_tsomaxsegcount, ifp->if_hw_tsomaxsegsize);
623 hn_set_chim_size(sc, sc->hn_chim_szmax);
624 if (hn_tx_chimney_size > 0 &&
625 hn_tx_chimney_size < sc->hn_chim_szmax)
626 hn_set_chim_size(sc, hn_tx_chimney_size);
628 ctx = device_get_sysctl_ctx(dev);
629 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
630 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "nvs_version", CTLFLAG_RD,
631 &sc->hn_nvs_ver, 0, "NVS version");
632 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "ndis_version",
633 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
634 hn_ndis_version_sysctl, "A", "NDIS version");
638 hn_destroy_tx_data(sc);
645 * Standard detach entry point
648 netvsc_detach(device_t dev)
650 struct hn_softc *sc = device_get_softc(dev);
653 printf("netvsc_detach\n");
656 * XXXKYS: Need to clean up all our
657 * driver state; this is the driver
662 * XXXKYS: Need to stop outgoing traffic and unregister
666 hv_rf_on_device_remove(sc);
668 hn_stop_tx_tasks(sc);
670 ifmedia_removeall(&sc->hn_media);
671 hn_destroy_rx_data(sc);
672 hn_destroy_tx_data(sc);
674 if (sc->hn_tx_taskq != hn_tx_taskq)
675 taskqueue_free(sc->hn_tx_taskq);
677 vmbus_xact_ctx_destroy(sc->hn_xact);
682 * Standard shutdown entry point
685 netvsc_shutdown(device_t dev)
691 hn_txdesc_dmamap_load(struct hn_tx_ring *txr, struct hn_txdesc *txd,
692 struct mbuf **m_head, bus_dma_segment_t *segs, int *nsegs)
694 struct mbuf *m = *m_head;
697 error = bus_dmamap_load_mbuf_sg(txr->hn_tx_data_dtag, txd->data_dmap,
698 m, segs, nsegs, BUS_DMA_NOWAIT);
699 if (error == EFBIG) {
702 m_new = m_collapse(m, M_NOWAIT, HN_TX_DATA_SEGCNT_MAX);
707 txr->hn_tx_collapsed++;
709 error = bus_dmamap_load_mbuf_sg(txr->hn_tx_data_dtag,
710 txd->data_dmap, m, segs, nsegs, BUS_DMA_NOWAIT);
713 bus_dmamap_sync(txr->hn_tx_data_dtag, txd->data_dmap,
714 BUS_DMASYNC_PREWRITE);
715 txd->flags |= HN_TXD_FLAG_DMAMAP;
721 hn_txdesc_dmamap_unload(struct hn_tx_ring *txr, struct hn_txdesc *txd)
724 if (txd->flags & HN_TXD_FLAG_DMAMAP) {
725 bus_dmamap_sync(txr->hn_tx_data_dtag,
726 txd->data_dmap, BUS_DMASYNC_POSTWRITE);
727 bus_dmamap_unload(txr->hn_tx_data_dtag,
729 txd->flags &= ~HN_TXD_FLAG_DMAMAP;
734 hn_txdesc_put(struct hn_tx_ring *txr, struct hn_txdesc *txd)
737 KASSERT((txd->flags & HN_TXD_FLAG_ONLIST) == 0,
738 ("put an onlist txd %#x", txd->flags));
740 KASSERT(txd->refs > 0, ("invalid txd refs %d", txd->refs));
741 if (atomic_fetchadd_int(&txd->refs, -1) != 1)
744 hn_txdesc_dmamap_unload(txr, txd);
745 if (txd->m != NULL) {
750 txd->flags |= HN_TXD_FLAG_ONLIST;
752 #ifndef HN_USE_TXDESC_BUFRING
753 mtx_lock_spin(&txr->hn_txlist_spin);
754 KASSERT(txr->hn_txdesc_avail >= 0 &&
755 txr->hn_txdesc_avail < txr->hn_txdesc_cnt,
756 ("txdesc_put: invalid txd avail %d", txr->hn_txdesc_avail));
757 txr->hn_txdesc_avail++;
758 SLIST_INSERT_HEAD(&txr->hn_txlist, txd, link);
759 mtx_unlock_spin(&txr->hn_txlist_spin);
761 atomic_add_int(&txr->hn_txdesc_avail, 1);
762 buf_ring_enqueue(txr->hn_txdesc_br, txd);
768 static __inline struct hn_txdesc *
769 hn_txdesc_get(struct hn_tx_ring *txr)
771 struct hn_txdesc *txd;
773 #ifndef HN_USE_TXDESC_BUFRING
774 mtx_lock_spin(&txr->hn_txlist_spin);
775 txd = SLIST_FIRST(&txr->hn_txlist);
777 KASSERT(txr->hn_txdesc_avail > 0,
778 ("txdesc_get: invalid txd avail %d", txr->hn_txdesc_avail));
779 txr->hn_txdesc_avail--;
780 SLIST_REMOVE_HEAD(&txr->hn_txlist, link);
782 mtx_unlock_spin(&txr->hn_txlist_spin);
784 txd = buf_ring_dequeue_sc(txr->hn_txdesc_br);
788 #ifdef HN_USE_TXDESC_BUFRING
789 atomic_subtract_int(&txr->hn_txdesc_avail, 1);
791 KASSERT(txd->m == NULL && txd->refs == 0 &&
792 (txd->flags & HN_TXD_FLAG_ONLIST), ("invalid txd"));
793 txd->flags &= ~HN_TXD_FLAG_ONLIST;
800 hn_txdesc_hold(struct hn_txdesc *txd)
803 /* 0->1 transition will never work */
804 KASSERT(txd->refs > 0, ("invalid refs %d", txd->refs));
805 atomic_add_int(&txd->refs, 1);
809 hn_txeof(struct hn_tx_ring *txr)
811 txr->hn_has_txeof = 0;
816 hn_tx_done(struct hn_send_ctx *sndc, struct hn_softc *sc,
817 struct vmbus_channel *chan, const void *data __unused, int dlen __unused)
819 struct hn_txdesc *txd = sndc->hn_cbarg;
820 struct hn_tx_ring *txr;
822 if (sndc->hn_chim_idx != HN_NVS_CHIM_IDX_INVALID)
823 hn_chim_free(sc, sndc->hn_chim_idx);
826 KASSERT(txr->hn_chan == chan,
827 ("channel mismatch, on chan%u, should be chan%u",
828 vmbus_chan_subidx(chan), vmbus_chan_subidx(txr->hn_chan)));
830 txr->hn_has_txeof = 1;
831 hn_txdesc_put(txr, txd);
833 ++txr->hn_txdone_cnt;
834 if (txr->hn_txdone_cnt >= HN_EARLY_TXEOF_THRESH) {
835 txr->hn_txdone_cnt = 0;
842 netvsc_channel_rollup(struct hn_rx_ring *rxr, struct hn_tx_ring *txr)
844 #if defined(INET) || defined(INET6)
845 struct lro_ctrl *lro = &rxr->hn_lro;
846 struct lro_entry *queued;
848 while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
849 SLIST_REMOVE_HEAD(&lro->lro_active, next);
850 tcp_lro_flush(lro, queued);
856 * 'txr' could be NULL, if multiple channels and
857 * ifnet.if_start method are enabled.
859 if (txr == NULL || !txr->hn_has_txeof)
862 txr->hn_txdone_cnt = 0;
868 * If this function fails, then both txd and m_head0 will be freed.
871 hn_encap(struct hn_tx_ring *txr, struct hn_txdesc *txd, struct mbuf **m_head0)
873 bus_dma_segment_t segs[HN_TX_DATA_SEGCNT_MAX];
875 struct mbuf *m_head = *m_head0;
876 rndis_msg *rndis_mesg;
877 rndis_packet *rndis_pkt;
878 rndis_per_packet_info *rppi;
879 struct rndis_hash_value *hash_value;
880 uint32_t rndis_msg_size, tot_data_buf_len, send_buf_section_idx;
881 int send_buf_section_size;
883 tot_data_buf_len = m_head->m_pkthdr.len;
886 * extension points to the area reserved for the
887 * rndis_filter_packet, which is placed just after
888 * the netvsc_packet (and rppi struct, if present;
889 * length is updated later).
891 rndis_mesg = txd->rndis_msg;
892 /* XXX not necessary */
893 memset(rndis_mesg, 0, HN_RNDIS_MSG_LEN);
894 rndis_mesg->ndis_msg_type = REMOTE_NDIS_PACKET_MSG;
896 rndis_pkt = &rndis_mesg->msg.packet;
897 rndis_pkt->data_offset = sizeof(rndis_packet);
898 rndis_pkt->data_length = tot_data_buf_len;
899 rndis_pkt->per_pkt_info_offset = sizeof(rndis_packet);
901 rndis_msg_size = RNDIS_MESSAGE_SIZE(rndis_packet);
904 * Set the hash value for this packet, so that the host could
905 * dispatch the TX done event for this packet back to this TX
908 rndis_msg_size += RNDIS_HASHVAL_PPI_SIZE;
909 rppi = hv_set_rppi_data(rndis_mesg, RNDIS_HASHVAL_PPI_SIZE,
911 hash_value = (struct rndis_hash_value *)((uint8_t *)rppi +
912 rppi->per_packet_info_offset);
913 hash_value->hash_value = txr->hn_tx_idx;
915 if (m_head->m_flags & M_VLANTAG) {
916 ndis_8021q_info *rppi_vlan_info;
918 rndis_msg_size += RNDIS_VLAN_PPI_SIZE;
919 rppi = hv_set_rppi_data(rndis_mesg, RNDIS_VLAN_PPI_SIZE,
922 rppi_vlan_info = (ndis_8021q_info *)((uint8_t *)rppi +
923 rppi->per_packet_info_offset);
924 rppi_vlan_info->u1.s1.vlan_id =
925 m_head->m_pkthdr.ether_vtag & 0xfff;
928 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
929 rndis_tcp_tso_info *tso_info;
930 struct ether_vlan_header *eh;
934 * XXX need m_pullup and use mtodo
936 eh = mtod(m_head, struct ether_vlan_header*);
937 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
938 ether_len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
940 ether_len = ETHER_HDR_LEN;
942 rndis_msg_size += RNDIS_TSO_PPI_SIZE;
943 rppi = hv_set_rppi_data(rndis_mesg, RNDIS_TSO_PPI_SIZE,
944 tcp_large_send_info);
946 tso_info = (rndis_tcp_tso_info *)((uint8_t *)rppi +
947 rppi->per_packet_info_offset);
948 tso_info->lso_v2_xmit.type =
949 RNDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
952 if (m_head->m_pkthdr.csum_flags & CSUM_IP_TSO) {
954 (struct ip *)(m_head->m_data + ether_len);
955 unsigned long iph_len = ip->ip_hl << 2;
957 (struct tcphdr *)((caddr_t)ip + iph_len);
959 tso_info->lso_v2_xmit.ip_version =
960 RNDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
964 th->th_sum = in_pseudo(ip->ip_src.s_addr,
965 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
968 #if defined(INET6) && defined(INET)
973 struct ip6_hdr *ip6 = (struct ip6_hdr *)
974 (m_head->m_data + ether_len);
975 struct tcphdr *th = (struct tcphdr *)(ip6 + 1);
977 tso_info->lso_v2_xmit.ip_version =
978 RNDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
980 th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
983 tso_info->lso_v2_xmit.tcp_header_offset = 0;
984 tso_info->lso_v2_xmit.mss = m_head->m_pkthdr.tso_segsz;
985 } else if (m_head->m_pkthdr.csum_flags & txr->hn_csum_assist) {
986 rndis_tcp_ip_csum_info *csum_info;
988 rndis_msg_size += RNDIS_CSUM_PPI_SIZE;
989 rppi = hv_set_rppi_data(rndis_mesg, RNDIS_CSUM_PPI_SIZE,
991 csum_info = (rndis_tcp_ip_csum_info *)((uint8_t *)rppi +
992 rppi->per_packet_info_offset);
994 csum_info->xmit.is_ipv4 = 1;
995 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
996 csum_info->xmit.ip_header_csum = 1;
998 if (m_head->m_pkthdr.csum_flags & CSUM_TCP) {
999 csum_info->xmit.tcp_csum = 1;
1000 csum_info->xmit.tcp_header_offset = 0;
1001 } else if (m_head->m_pkthdr.csum_flags & CSUM_UDP) {
1002 csum_info->xmit.udp_csum = 1;
1006 rndis_mesg->msg_len = tot_data_buf_len + rndis_msg_size;
1007 tot_data_buf_len = rndis_mesg->msg_len;
1010 * Chimney send, if the packet could fit into one chimney buffer.
1012 if (tot_data_buf_len < txr->hn_chim_size) {
1013 txr->hn_tx_chimney_tried++;
1014 send_buf_section_idx = hn_chim_alloc(txr->hn_sc);
1015 if (send_buf_section_idx != HN_NVS_CHIM_IDX_INVALID) {
1016 uint8_t *dest = txr->hn_sc->hn_chim +
1017 (send_buf_section_idx * txr->hn_sc->hn_chim_szmax);
1019 memcpy(dest, rndis_mesg, rndis_msg_size);
1020 dest += rndis_msg_size;
1021 m_copydata(m_head, 0, m_head->m_pkthdr.len, dest);
1023 send_buf_section_size = tot_data_buf_len;
1024 txr->hn_gpa_cnt = 0;
1025 txr->hn_tx_chimney++;
1030 error = hn_txdesc_dmamap_load(txr, txd, &m_head, segs, &nsegs);
1035 * This mbuf is not linked w/ the txd yet, so free it now.
1040 freed = hn_txdesc_put(txr, txd);
1042 ("fail to free txd upon txdma error"));
1044 txr->hn_txdma_failed++;
1045 if_inc_counter(txr->hn_sc->hn_ifp, IFCOUNTER_OERRORS, 1);
1050 /* +1 RNDIS packet message */
1051 txr->hn_gpa_cnt = nsegs + 1;
1053 /* send packet with page buffer */
1054 txr->hn_gpa[0].gpa_page = atop(txd->rndis_msg_paddr);
1055 txr->hn_gpa[0].gpa_ofs = txd->rndis_msg_paddr & PAGE_MASK;
1056 txr->hn_gpa[0].gpa_len = rndis_msg_size;
1059 * Fill the page buffers with mbuf info after the page
1060 * buffer for RNDIS packet message.
1062 for (i = 0; i < nsegs; ++i) {
1063 struct vmbus_gpa *gpa = &txr->hn_gpa[i + 1];
1065 gpa->gpa_page = atop(segs[i].ds_addr);
1066 gpa->gpa_ofs = segs[i].ds_addr & PAGE_MASK;
1067 gpa->gpa_len = segs[i].ds_len;
1070 send_buf_section_idx = HN_NVS_CHIM_IDX_INVALID;
1071 send_buf_section_size = 0;
1075 /* Set the completion routine */
1076 hn_send_ctx_init(&txd->send_ctx, hn_tx_done, txd,
1077 send_buf_section_idx, send_buf_section_size);
1084 * If this function fails, then txd will be freed, but the mbuf
1085 * associated w/ the txd will _not_ be freed.
1088 hn_send_pkt(struct ifnet *ifp, struct hn_tx_ring *txr, struct hn_txdesc *txd)
1090 int error, send_failed = 0;
1094 * Make sure that txd is not freed before ETHER_BPF_MTAP.
1096 hn_txdesc_hold(txd);
1097 error = hv_nv_on_send(txr->hn_chan, HN_NVS_RNDIS_MTYPE_DATA,
1098 &txd->send_ctx, txr->hn_gpa, txr->hn_gpa_cnt);
1100 ETHER_BPF_MTAP(ifp, txd->m);
1101 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1102 if (!hn_use_if_start) {
1103 if_inc_counter(ifp, IFCOUNTER_OBYTES,
1104 txd->m->m_pkthdr.len);
1105 if (txd->m->m_flags & M_MCAST)
1106 if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
1110 hn_txdesc_put(txr, txd);
1112 if (__predict_false(error)) {
1116 * This should "really rarely" happen.
1118 * XXX Too many RX to be acked or too many sideband
1119 * commands to run? Ask netvsc_channel_rollup()
1120 * to kick start later.
1122 txr->hn_has_txeof = 1;
1124 txr->hn_send_failed++;
1127 * Try sending again after set hn_has_txeof;
1128 * in case that we missed the last
1129 * netvsc_channel_rollup().
1133 if_printf(ifp, "send failed\n");
1136 * Caller will perform further processing on the
1137 * associated mbuf, so don't free it in hn_txdesc_put();
1138 * only unload it from the DMA map in hn_txdesc_put(),
1142 freed = hn_txdesc_put(txr, txd);
1144 ("fail to free txd upon send error"));
1146 txr->hn_send_failed++;
1152 * Start a transmit of one or more packets
1155 hn_start_locked(struct hn_tx_ring *txr, int len)
1157 struct hn_softc *sc = txr->hn_sc;
1158 struct ifnet *ifp = sc->hn_ifp;
1160 KASSERT(hn_use_if_start,
1161 ("hn_start_locked is called, when if_start is disabled"));
1162 KASSERT(txr == &sc->hn_tx_ring[0], ("not the first TX ring"));
1163 mtx_assert(&txr->hn_tx_lock, MA_OWNED);
1165 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1169 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
1170 struct hn_txdesc *txd;
1171 struct mbuf *m_head;
1174 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1178 if (len > 0 && m_head->m_pkthdr.len > len) {
1180 * This sending could be time consuming; let callers
1181 * dispatch this packet sending (and sending of any
1182 * following up packets) to tx taskqueue.
1184 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1188 txd = hn_txdesc_get(txr);
1190 txr->hn_no_txdescs++;
1191 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1192 atomic_set_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
1196 error = hn_encap(txr, txd, &m_head);
1198 /* Both txd and m_head are freed */
1202 error = hn_send_pkt(ifp, txr, txd);
1203 if (__predict_false(error)) {
1204 /* txd is freed, but m_head is not */
1205 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1206 atomic_set_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
1214 * Link up/down notification
1217 netvsc_linkstatus_callback(struct hn_softc *sc, uint32_t status)
1227 * Append the specified data to the indicated mbuf chain,
1228 * Extend the mbuf chain if the new data does not fit in
1231 * This is a minor rewrite of m_append() from sys/kern/uipc_mbuf.c.
1232 * There should be an equivalent in the kernel mbuf code,
1233 * but there does not appear to be one yet.
1235 * Differs from m_append() in that additional mbufs are
1236 * allocated with cluster size MJUMPAGESIZE, and filled
1239 * Return 1 if able to complete the job; otherwise 0.
1242 hv_m_append(struct mbuf *m0, int len, c_caddr_t cp)
1245 int remainder, space;
1247 for (m = m0; m->m_next != NULL; m = m->m_next)
1250 space = M_TRAILINGSPACE(m);
1253 * Copy into available space.
1255 if (space > remainder)
1257 bcopy(cp, mtod(m, caddr_t) + m->m_len, space);
1262 while (remainder > 0) {
1264 * Allocate a new mbuf; could check space
1265 * and allocate a cluster instead.
1267 n = m_getjcl(M_DONTWAIT, m->m_type, 0, MJUMPAGESIZE);
1270 n->m_len = min(MJUMPAGESIZE, remainder);
1271 bcopy(cp, mtod(n, caddr_t), n->m_len);
1273 remainder -= n->m_len;
1277 if (m0->m_flags & M_PKTHDR)
1278 m0->m_pkthdr.len += len - remainder;
1280 return (remainder == 0);
1283 #if defined(INET) || defined(INET6)
1285 hn_lro_rx(struct lro_ctrl *lc, struct mbuf *m)
1287 #if __FreeBSD_version >= 1100095
1288 if (hn_lro_mbufq_depth) {
1289 tcp_lro_queue_mbuf(lc, m);
1293 return tcp_lro_rx(lc, m, 0);
1298 * Called when we receive a data packet from the "wire" on the
1301 * Note: This is no longer used as a callback
1304 netvsc_recv(struct hn_rx_ring *rxr, const void *data, int dlen,
1305 const struct hn_recvinfo *info)
1307 struct ifnet *ifp = rxr->hn_ifp;
1309 int size, do_lro = 0, do_csum = 1;
1310 int hash_type = M_HASHTYPE_OPAQUE;
1312 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1316 * Bail out if packet contains more data than configured MTU.
1318 if (dlen > (ifp->if_mtu + ETHER_HDR_LEN)) {
1320 } else if (dlen <= MHLEN) {
1321 m_new = m_gethdr(M_NOWAIT, MT_DATA);
1322 if (m_new == NULL) {
1323 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1326 memcpy(mtod(m_new, void *), data, dlen);
1327 m_new->m_pkthdr.len = m_new->m_len = dlen;
1328 rxr->hn_small_pkts++;
1331 * Get an mbuf with a cluster. For packets 2K or less,
1332 * get a standard 2K cluster. For anything larger, get a
1333 * 4K cluster. Any buffers larger than 4K can cause problems
1334 * if looped around to the Hyper-V TX channel, so avoid them.
1337 if (dlen > MCLBYTES) {
1339 size = MJUMPAGESIZE;
1342 m_new = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, size);
1343 if (m_new == NULL) {
1344 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1348 hv_m_append(m_new, dlen, data);
1350 m_new->m_pkthdr.rcvif = ifp;
1352 if (__predict_false((ifp->if_capenable & IFCAP_RXCSUM) == 0))
1355 /* receive side checksum offload */
1356 if (info->csum_info != HN_NDIS_RXCSUM_INFO_INVALID) {
1357 /* IP csum offload */
1358 if ((info->csum_info & NDIS_RXCSUM_INFO_IPCS_OK) && do_csum) {
1359 m_new->m_pkthdr.csum_flags |=
1360 (CSUM_IP_CHECKED | CSUM_IP_VALID);
1364 /* TCP/UDP csum offload */
1365 if ((info->csum_info & (NDIS_RXCSUM_INFO_UDPCS_OK |
1366 NDIS_RXCSUM_INFO_TCPCS_OK)) && do_csum) {
1367 m_new->m_pkthdr.csum_flags |=
1368 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1369 m_new->m_pkthdr.csum_data = 0xffff;
1370 if (info->csum_info & NDIS_RXCSUM_INFO_TCPCS_OK)
1376 if ((info->csum_info &
1377 (NDIS_RXCSUM_INFO_TCPCS_OK | NDIS_RXCSUM_INFO_IPCS_OK)) ==
1378 (NDIS_RXCSUM_INFO_TCPCS_OK | NDIS_RXCSUM_INFO_IPCS_OK))
1381 const struct ether_header *eh;
1386 if (m_new->m_len < hoff)
1388 eh = mtod(m_new, struct ether_header *);
1389 etype = ntohs(eh->ether_type);
1390 if (etype == ETHERTYPE_VLAN) {
1391 const struct ether_vlan_header *evl;
1393 hoff = sizeof(*evl);
1394 if (m_new->m_len < hoff)
1396 evl = mtod(m_new, struct ether_vlan_header *);
1397 etype = ntohs(evl->evl_proto);
1400 if (etype == ETHERTYPE_IP) {
1403 pr = hn_check_iplen(m_new, hoff);
1404 if (pr == IPPROTO_TCP) {
1406 (rxr->hn_trust_hcsum &
1407 HN_TRUST_HCSUM_TCP)) {
1408 rxr->hn_csum_trusted++;
1409 m_new->m_pkthdr.csum_flags |=
1410 (CSUM_IP_CHECKED | CSUM_IP_VALID |
1411 CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1412 m_new->m_pkthdr.csum_data = 0xffff;
1415 } else if (pr == IPPROTO_UDP) {
1417 (rxr->hn_trust_hcsum &
1418 HN_TRUST_HCSUM_UDP)) {
1419 rxr->hn_csum_trusted++;
1420 m_new->m_pkthdr.csum_flags |=
1421 (CSUM_IP_CHECKED | CSUM_IP_VALID |
1422 CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1423 m_new->m_pkthdr.csum_data = 0xffff;
1425 } else if (pr != IPPROTO_DONE && do_csum &&
1426 (rxr->hn_trust_hcsum & HN_TRUST_HCSUM_IP)) {
1427 rxr->hn_csum_trusted++;
1428 m_new->m_pkthdr.csum_flags |=
1429 (CSUM_IP_CHECKED | CSUM_IP_VALID);
1434 if (info->vlan_info != HN_NDIS_VLAN_INFO_INVALID) {
1435 m_new->m_pkthdr.ether_vtag = EVL_MAKETAG(
1436 NDIS_VLAN_INFO_ID(info->vlan_info),
1437 NDIS_VLAN_INFO_PRI(info->vlan_info),
1438 NDIS_VLAN_INFO_CFI(info->vlan_info));
1439 m_new->m_flags |= M_VLANTAG;
1442 if (info->hash_info != HN_NDIS_HASH_INFO_INVALID) {
1444 m_new->m_pkthdr.flowid = info->hash_value;
1445 if ((info->hash_info & NDIS_HASH_FUNCTION_MASK) ==
1446 NDIS_HASH_FUNCTION_TOEPLITZ) {
1447 uint32_t type = (info->hash_info & NDIS_HASH_TYPE_MASK);
1450 case NDIS_HASH_IPV4:
1451 hash_type = M_HASHTYPE_RSS_IPV4;
1454 case NDIS_HASH_TCP_IPV4:
1455 hash_type = M_HASHTYPE_RSS_TCP_IPV4;
1458 case NDIS_HASH_IPV6:
1459 hash_type = M_HASHTYPE_RSS_IPV6;
1462 case NDIS_HASH_IPV6_EX:
1463 hash_type = M_HASHTYPE_RSS_IPV6_EX;
1466 case NDIS_HASH_TCP_IPV6:
1467 hash_type = M_HASHTYPE_RSS_TCP_IPV6;
1470 case NDIS_HASH_TCP_IPV6_EX:
1471 hash_type = M_HASHTYPE_RSS_TCP_IPV6_EX;
1476 m_new->m_pkthdr.flowid = rxr->hn_rx_idx;
1478 M_HASHTYPE_SET(m_new, hash_type);
1481 * Note: Moved RX completion back to hv_nv_on_receive() so all
1482 * messages (not just data messages) will trigger a response.
1488 if ((ifp->if_capenable & IFCAP_LRO) && do_lro) {
1489 #if defined(INET) || defined(INET6)
1490 struct lro_ctrl *lro = &rxr->hn_lro;
1493 rxr->hn_lro_tried++;
1494 if (hn_lro_rx(lro, m_new) == 0) {
1502 /* We're not holding the lock here, so don't release it */
1503 (*ifp->if_input)(ifp, m_new);
1509 * Rules for using sc->temp_unusable:
1510 * 1. sc->temp_unusable can only be read or written while holding NV_LOCK()
1511 * 2. code reading sc->temp_unusable under NV_LOCK(), and finding
1512 * sc->temp_unusable set, must release NV_LOCK() and exit
1513 * 3. to retain exclusive control of the interface,
1514 * sc->temp_unusable must be set by code before releasing NV_LOCK()
1515 * 4. only code setting sc->temp_unusable can clear sc->temp_unusable
1516 * 5. code setting sc->temp_unusable must eventually clear sc->temp_unusable
1520 * Standard ioctl entry point. Called when the user wants to configure
1524 hn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1526 hn_softc_t *sc = ifp->if_softc;
1527 struct ifreq *ifr = (struct ifreq *)data;
1529 struct ifaddr *ifa = (struct ifaddr *)data;
1531 netvsc_device_info device_info;
1532 int mask, error = 0, ring_cnt;
1533 int retry_cnt = 500;
1539 if (ifa->ifa_addr->sa_family == AF_INET) {
1540 ifp->if_flags |= IFF_UP;
1541 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1543 arp_ifinit(ifp, ifa);
1546 error = ether_ioctl(ifp, cmd, data);
1549 /* Check MTU value change */
1550 if (ifp->if_mtu == ifr->ifr_mtu)
1553 if (ifr->ifr_mtu > NETVSC_MAX_CONFIGURABLE_MTU) {
1558 /* Obtain and record requested MTU */
1559 ifp->if_mtu = ifr->ifr_mtu;
1561 #if __FreeBSD_version >= 1100099
1563 * Make sure that LRO aggregation length limit is still
1564 * valid, after the MTU change.
1567 if (sc->hn_rx_ring[0].hn_lro.lro_length_lim <
1568 HN_LRO_LENLIM_MIN(ifp))
1569 hn_set_lro_lenlim(sc, HN_LRO_LENLIM_MIN(ifp));
1575 if (!sc->temp_unusable) {
1576 sc->temp_unusable = TRUE;
1580 if (retry_cnt > 0) {
1584 } while (retry_cnt > 0);
1586 if (retry_cnt == 0) {
1591 /* We must remove and add back the device to cause the new
1592 * MTU to take effect. This includes tearing down, but not
1593 * deleting the channel, then bringing it back up.
1595 error = hv_rf_on_device_remove(sc);
1598 sc->temp_unusable = FALSE;
1603 /* Wait for subchannels to be destroyed */
1604 vmbus_subchan_drain(sc->hn_prichan);
1606 ring_cnt = sc->hn_rx_ring_inuse;
1607 error = hv_rf_on_device_add(sc, &device_info, &ring_cnt,
1608 &sc->hn_rx_ring[0]);
1611 sc->temp_unusable = FALSE;
1615 /* # of channels can _not_ be changed */
1616 KASSERT(sc->hn_rx_ring_inuse == ring_cnt,
1617 ("RX ring count %d and channel count %u mismatch",
1618 sc->hn_rx_ring_cnt, ring_cnt));
1619 if (sc->hn_rx_ring_inuse > 1) {
1623 * Skip the rings on primary channel; they are
1624 * handled by the hv_rf_on_device_add() above.
1626 for (r = 1; r < sc->hn_rx_ring_cnt; ++r) {
1627 sc->hn_rx_ring[r].hn_rx_flags &=
1628 ~HN_RX_FLAG_ATTACHED;
1630 for (r = 1; r < sc->hn_tx_ring_cnt; ++r) {
1631 sc->hn_tx_ring[r].hn_tx_flags &=
1632 ~HN_TX_FLAG_ATTACHED;
1634 hn_subchan_setup(sc);
1637 if (sc->hn_tx_ring[0].hn_chim_size > sc->hn_chim_szmax)
1638 hn_set_chim_size(sc, sc->hn_chim_szmax);
1640 hn_ifinit_locked(sc);
1643 sc->temp_unusable = FALSE;
1649 if (!sc->temp_unusable) {
1650 sc->temp_unusable = TRUE;
1654 if (retry_cnt > 0) {
1658 } while (retry_cnt > 0);
1660 if (retry_cnt == 0) {
1665 if (ifp->if_flags & IFF_UP) {
1667 * If only the state of the PROMISC flag changed,
1668 * then just use the 'set promisc mode' command
1669 * instead of reinitializing the entire NIC. Doing
1670 * a full re-init means reloading the firmware and
1671 * waiting for it to start up, which may take a
1675 /* Fixme: Promiscuous mode? */
1676 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1677 ifp->if_flags & IFF_PROMISC &&
1678 !(sc->hn_if_flags & IFF_PROMISC)) {
1679 /* do something here for Hyper-V */
1680 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1681 !(ifp->if_flags & IFF_PROMISC) &&
1682 sc->hn_if_flags & IFF_PROMISC) {
1683 /* do something here for Hyper-V */
1686 hn_ifinit_locked(sc);
1688 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1693 sc->temp_unusable = FALSE;
1695 sc->hn_if_flags = ifp->if_flags;
1701 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1702 if (mask & IFCAP_TXCSUM) {
1703 ifp->if_capenable ^= IFCAP_TXCSUM;
1704 if (ifp->if_capenable & IFCAP_TXCSUM) {
1706 sc->hn_tx_ring[0].hn_csum_assist;
1709 ~sc->hn_tx_ring[0].hn_csum_assist;
1713 if (mask & IFCAP_RXCSUM)
1714 ifp->if_capenable ^= IFCAP_RXCSUM;
1716 if (mask & IFCAP_LRO)
1717 ifp->if_capenable ^= IFCAP_LRO;
1719 if (mask & IFCAP_TSO4) {
1720 ifp->if_capenable ^= IFCAP_TSO4;
1721 if (ifp->if_capenable & IFCAP_TSO4)
1722 ifp->if_hwassist |= CSUM_IP_TSO;
1724 ifp->if_hwassist &= ~CSUM_IP_TSO;
1727 if (mask & IFCAP_TSO6) {
1728 ifp->if_capenable ^= IFCAP_TSO6;
1729 if (ifp->if_capenable & IFCAP_TSO6)
1730 ifp->if_hwassist |= CSUM_IP6_TSO;
1732 ifp->if_hwassist &= ~CSUM_IP6_TSO;
1741 /* Fixme: Multicast mode? */
1742 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1744 netvsc_setmulti(sc);
1753 error = ifmedia_ioctl(ifp, ifr, &sc->hn_media, cmd);
1756 error = ether_ioctl(ifp, cmd, data);
1767 hn_stop(hn_softc_t *sc)
1775 printf(" Closing Device ...\n");
1777 atomic_clear_int(&ifp->if_drv_flags,
1778 (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
1779 for (i = 0; i < sc->hn_tx_ring_inuse; ++i)
1780 sc->hn_tx_ring[i].hn_oactive = 0;
1782 if_link_state_change(ifp, LINK_STATE_DOWN);
1783 sc->hn_initdone = 0;
1785 ret = hv_rf_on_close(sc);
1789 * FreeBSD transmit entry point
1792 hn_start(struct ifnet *ifp)
1794 struct hn_softc *sc = ifp->if_softc;
1795 struct hn_tx_ring *txr = &sc->hn_tx_ring[0];
1797 if (txr->hn_sched_tx)
1800 if (mtx_trylock(&txr->hn_tx_lock)) {
1803 sched = hn_start_locked(txr, txr->hn_direct_tx_size);
1804 mtx_unlock(&txr->hn_tx_lock);
1809 taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_tx_task);
1813 hn_start_txeof(struct hn_tx_ring *txr)
1815 struct hn_softc *sc = txr->hn_sc;
1816 struct ifnet *ifp = sc->hn_ifp;
1818 KASSERT(txr == &sc->hn_tx_ring[0], ("not the first TX ring"));
1820 if (txr->hn_sched_tx)
1823 if (mtx_trylock(&txr->hn_tx_lock)) {
1826 atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
1827 sched = hn_start_locked(txr, txr->hn_direct_tx_size);
1828 mtx_unlock(&txr->hn_tx_lock);
1830 taskqueue_enqueue(txr->hn_tx_taskq,
1836 * Release the OACTIVE earlier, with the hope, that
1837 * others could catch up. The task will clear the
1838 * flag again with the hn_tx_lock to avoid possible
1841 atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
1842 taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_txeof_task);
1850 hn_ifinit_locked(hn_softc_t *sc)
1857 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1861 hv_promisc_mode = 1;
1863 ret = hv_rf_on_open(sc);
1867 sc->hn_initdone = 1;
1870 atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
1871 for (i = 0; i < sc->hn_tx_ring_inuse; ++i)
1872 sc->hn_tx_ring[i].hn_oactive = 0;
1874 atomic_set_int(&ifp->if_drv_flags, IFF_DRV_RUNNING);
1875 if_link_state_change(ifp, LINK_STATE_UP);
1882 hn_ifinit(void *xsc)
1884 hn_softc_t *sc = xsc;
1887 if (sc->temp_unusable) {
1891 sc->temp_unusable = TRUE;
1894 hn_ifinit_locked(sc);
1897 sc->temp_unusable = FALSE;
1906 hn_watchdog(struct ifnet *ifp)
1911 printf("hn%d: watchdog timeout -- resetting\n", sc->hn_unit);
1912 hn_ifinit(sc); /*???*/
1917 #if __FreeBSD_version >= 1100099
1920 hn_lro_lenlim_sysctl(SYSCTL_HANDLER_ARGS)
1922 struct hn_softc *sc = arg1;
1923 unsigned int lenlim;
1926 lenlim = sc->hn_rx_ring[0].hn_lro.lro_length_lim;
1927 error = sysctl_handle_int(oidp, &lenlim, 0, req);
1928 if (error || req->newptr == NULL)
1931 if (lenlim < HN_LRO_LENLIM_MIN(sc->hn_ifp) ||
1932 lenlim > TCP_LRO_LENGTH_MAX)
1936 hn_set_lro_lenlim(sc, lenlim);
1942 hn_lro_ackcnt_sysctl(SYSCTL_HANDLER_ARGS)
1944 struct hn_softc *sc = arg1;
1945 int ackcnt, error, i;
1948 * lro_ackcnt_lim is append count limit,
1949 * +1 to turn it into aggregation limit.
1951 ackcnt = sc->hn_rx_ring[0].hn_lro.lro_ackcnt_lim + 1;
1952 error = sysctl_handle_int(oidp, &ackcnt, 0, req);
1953 if (error || req->newptr == NULL)
1956 if (ackcnt < 2 || ackcnt > (TCP_LRO_ACKCNT_MAX + 1))
1960 * Convert aggregation limit back to append
1965 for (i = 0; i < sc->hn_rx_ring_inuse; ++i)
1966 sc->hn_rx_ring[i].hn_lro.lro_ackcnt_lim = ackcnt;
1974 hn_trust_hcsum_sysctl(SYSCTL_HANDLER_ARGS)
1976 struct hn_softc *sc = arg1;
1981 if (sc->hn_rx_ring[0].hn_trust_hcsum & hcsum)
1984 error = sysctl_handle_int(oidp, &on, 0, req);
1985 if (error || req->newptr == NULL)
1989 for (i = 0; i < sc->hn_rx_ring_inuse; ++i) {
1990 struct hn_rx_ring *rxr = &sc->hn_rx_ring[i];
1993 rxr->hn_trust_hcsum |= hcsum;
1995 rxr->hn_trust_hcsum &= ~hcsum;
2002 hn_chim_size_sysctl(SYSCTL_HANDLER_ARGS)
2004 struct hn_softc *sc = arg1;
2005 int chim_size, error;
2007 chim_size = sc->hn_tx_ring[0].hn_chim_size;
2008 error = sysctl_handle_int(oidp, &chim_size, 0, req);
2009 if (error || req->newptr == NULL)
2012 if (chim_size > sc->hn_chim_szmax || chim_size <= 0)
2015 hn_set_chim_size(sc, chim_size);
2019 #if __FreeBSD_version < 1100095
2021 hn_rx_stat_int_sysctl(SYSCTL_HANDLER_ARGS)
2023 struct hn_softc *sc = arg1;
2024 int ofs = arg2, i, error;
2025 struct hn_rx_ring *rxr;
2029 for (i = 0; i < sc->hn_rx_ring_inuse; ++i) {
2030 rxr = &sc->hn_rx_ring[i];
2031 stat += *((int *)((uint8_t *)rxr + ofs));
2034 error = sysctl_handle_64(oidp, &stat, 0, req);
2035 if (error || req->newptr == NULL)
2038 /* Zero out this stat. */
2039 for (i = 0; i < sc->hn_rx_ring_inuse; ++i) {
2040 rxr = &sc->hn_rx_ring[i];
2041 *((int *)((uint8_t *)rxr + ofs)) = 0;
2047 hn_rx_stat_u64_sysctl(SYSCTL_HANDLER_ARGS)
2049 struct hn_softc *sc = arg1;
2050 int ofs = arg2, i, error;
2051 struct hn_rx_ring *rxr;
2055 for (i = 0; i < sc->hn_rx_ring_inuse; ++i) {
2056 rxr = &sc->hn_rx_ring[i];
2057 stat += *((uint64_t *)((uint8_t *)rxr + ofs));
2060 error = sysctl_handle_64(oidp, &stat, 0, req);
2061 if (error || req->newptr == NULL)
2064 /* Zero out this stat. */
2065 for (i = 0; i < sc->hn_rx_ring_inuse; ++i) {
2066 rxr = &sc->hn_rx_ring[i];
2067 *((uint64_t *)((uint8_t *)rxr + ofs)) = 0;
2075 hn_rx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS)
2077 struct hn_softc *sc = arg1;
2078 int ofs = arg2, i, error;
2079 struct hn_rx_ring *rxr;
2083 for (i = 0; i < sc->hn_rx_ring_cnt; ++i) {
2084 rxr = &sc->hn_rx_ring[i];
2085 stat += *((u_long *)((uint8_t *)rxr + ofs));
2088 error = sysctl_handle_long(oidp, &stat, 0, req);
2089 if (error || req->newptr == NULL)
2092 /* Zero out this stat. */
2093 for (i = 0; i < sc->hn_rx_ring_cnt; ++i) {
2094 rxr = &sc->hn_rx_ring[i];
2095 *((u_long *)((uint8_t *)rxr + ofs)) = 0;
2101 hn_tx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS)
2103 struct hn_softc *sc = arg1;
2104 int ofs = arg2, i, error;
2105 struct hn_tx_ring *txr;
2109 for (i = 0; i < sc->hn_tx_ring_inuse; ++i) {
2110 txr = &sc->hn_tx_ring[i];
2111 stat += *((u_long *)((uint8_t *)txr + ofs));
2114 error = sysctl_handle_long(oidp, &stat, 0, req);
2115 if (error || req->newptr == NULL)
2118 /* Zero out this stat. */
2119 for (i = 0; i < sc->hn_tx_ring_inuse; ++i) {
2120 txr = &sc->hn_tx_ring[i];
2121 *((u_long *)((uint8_t *)txr + ofs)) = 0;
2127 hn_tx_conf_int_sysctl(SYSCTL_HANDLER_ARGS)
2129 struct hn_softc *sc = arg1;
2130 int ofs = arg2, i, error, conf;
2131 struct hn_tx_ring *txr;
2133 txr = &sc->hn_tx_ring[0];
2134 conf = *((int *)((uint8_t *)txr + ofs));
2136 error = sysctl_handle_int(oidp, &conf, 0, req);
2137 if (error || req->newptr == NULL)
2141 for (i = 0; i < sc->hn_tx_ring_inuse; ++i) {
2142 txr = &sc->hn_tx_ring[i];
2143 *((int *)((uint8_t *)txr + ofs)) = conf;
2151 hn_ndis_version_sysctl(SYSCTL_HANDLER_ARGS)
2153 struct hn_softc *sc = arg1;
2156 snprintf(verstr, sizeof(verstr), "%u.%u",
2157 NDIS_VERSION_MAJOR(sc->hn_ndis_ver),
2158 NDIS_VERSION_MINOR(sc->hn_ndis_ver));
2159 return sysctl_handle_string(oidp, verstr, sizeof(verstr), req);
2163 hn_check_iplen(const struct mbuf *m, int hoff)
2165 const struct ip *ip;
2166 int len, iphlen, iplen;
2167 const struct tcphdr *th;
2168 int thoff; /* TCP data offset */
2170 len = hoff + sizeof(struct ip);
2172 /* The packet must be at least the size of an IP header. */
2173 if (m->m_pkthdr.len < len)
2174 return IPPROTO_DONE;
2176 /* The fixed IP header must reside completely in the first mbuf. */
2178 return IPPROTO_DONE;
2180 ip = mtodo(m, hoff);
2182 /* Bound check the packet's stated IP header length. */
2183 iphlen = ip->ip_hl << 2;
2184 if (iphlen < sizeof(struct ip)) /* minimum header length */
2185 return IPPROTO_DONE;
2187 /* The full IP header must reside completely in the one mbuf. */
2188 if (m->m_len < hoff + iphlen)
2189 return IPPROTO_DONE;
2191 iplen = ntohs(ip->ip_len);
2194 * Check that the amount of data in the buffers is as
2195 * at least much as the IP header would have us expect.
2197 if (m->m_pkthdr.len < hoff + iplen)
2198 return IPPROTO_DONE;
2201 * Ignore IP fragments.
2203 if (ntohs(ip->ip_off) & (IP_OFFMASK | IP_MF))
2204 return IPPROTO_DONE;
2207 * The TCP/IP or UDP/IP header must be entirely contained within
2208 * the first fragment of a packet.
2212 if (iplen < iphlen + sizeof(struct tcphdr))
2213 return IPPROTO_DONE;
2214 if (m->m_len < hoff + iphlen + sizeof(struct tcphdr))
2215 return IPPROTO_DONE;
2216 th = (const struct tcphdr *)((const uint8_t *)ip + iphlen);
2217 thoff = th->th_off << 2;
2218 if (thoff < sizeof(struct tcphdr) || thoff + iphlen > iplen)
2219 return IPPROTO_DONE;
2220 if (m->m_len < hoff + iphlen + thoff)
2221 return IPPROTO_DONE;
2224 if (iplen < iphlen + sizeof(struct udphdr))
2225 return IPPROTO_DONE;
2226 if (m->m_len < hoff + iphlen + sizeof(struct udphdr))
2227 return IPPROTO_DONE;
2231 return IPPROTO_DONE;
2238 hn_create_rx_data(struct hn_softc *sc, int ring_cnt)
2240 struct sysctl_oid_list *child;
2241 struct sysctl_ctx_list *ctx;
2242 device_t dev = sc->hn_dev;
2243 #if defined(INET) || defined(INET6)
2244 #if __FreeBSD_version >= 1100095
2251 * Create RXBUF for reception.
2254 * - It is shared by all channels.
2255 * - A large enough buffer is allocated, certain version of NVSes
2256 * may further limit the usable space.
2258 sc->hn_rxbuf = hyperv_dmamem_alloc(bus_get_dma_tag(dev),
2259 PAGE_SIZE, 0, NETVSC_RECEIVE_BUFFER_SIZE, &sc->hn_rxbuf_dma,
2260 BUS_DMA_WAITOK | BUS_DMA_ZERO);
2261 if (sc->hn_rxbuf == NULL) {
2262 device_printf(sc->hn_dev, "allocate rxbuf failed\n");
2266 sc->hn_rx_ring_cnt = ring_cnt;
2267 sc->hn_rx_ring_inuse = sc->hn_rx_ring_cnt;
2269 sc->hn_rx_ring = malloc(sizeof(struct hn_rx_ring) * sc->hn_rx_ring_cnt,
2270 M_NETVSC, M_WAITOK | M_ZERO);
2272 #if defined(INET) || defined(INET6)
2273 #if __FreeBSD_version >= 1100095
2274 lroent_cnt = hn_lro_entry_count;
2275 if (lroent_cnt < TCP_LRO_ENTRIES)
2276 lroent_cnt = TCP_LRO_ENTRIES;
2277 device_printf(dev, "LRO: entry count %d\n", lroent_cnt);
2279 #endif /* INET || INET6 */
2281 ctx = device_get_sysctl_ctx(dev);
2282 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2284 /* Create dev.hn.UNIT.rx sysctl tree */
2285 sc->hn_rx_sysctl_tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "rx",
2286 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
2288 for (i = 0; i < sc->hn_rx_ring_cnt; ++i) {
2289 struct hn_rx_ring *rxr = &sc->hn_rx_ring[i];
2291 if (hn_trust_hosttcp)
2292 rxr->hn_trust_hcsum |= HN_TRUST_HCSUM_TCP;
2293 if (hn_trust_hostudp)
2294 rxr->hn_trust_hcsum |= HN_TRUST_HCSUM_UDP;
2295 if (hn_trust_hostip)
2296 rxr->hn_trust_hcsum |= HN_TRUST_HCSUM_IP;
2297 rxr->hn_ifp = sc->hn_ifp;
2298 if (i < sc->hn_tx_ring_cnt)
2299 rxr->hn_txr = &sc->hn_tx_ring[i];
2300 rxr->hn_rdbuf = malloc(NETVSC_PACKET_SIZE, M_NETVSC, M_WAITOK);
2302 rxr->hn_rxbuf = sc->hn_rxbuf;
2307 #if defined(INET) || defined(INET6)
2308 #if __FreeBSD_version >= 1100095
2309 tcp_lro_init_args(&rxr->hn_lro, sc->hn_ifp, lroent_cnt,
2310 hn_lro_mbufq_depth);
2312 tcp_lro_init(&rxr->hn_lro);
2313 rxr->hn_lro.ifp = sc->hn_ifp;
2315 #if __FreeBSD_version >= 1100099
2316 rxr->hn_lro.lro_length_lim = HN_LRO_LENLIM_DEF;
2317 rxr->hn_lro.lro_ackcnt_lim = HN_LRO_ACKCNT_DEF;
2319 #endif /* INET || INET6 */
2321 if (sc->hn_rx_sysctl_tree != NULL) {
2325 * Create per RX ring sysctl tree:
2326 * dev.hn.UNIT.rx.RINGID
2328 snprintf(name, sizeof(name), "%d", i);
2329 rxr->hn_rx_sysctl_tree = SYSCTL_ADD_NODE(ctx,
2330 SYSCTL_CHILDREN(sc->hn_rx_sysctl_tree),
2331 OID_AUTO, name, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
2333 if (rxr->hn_rx_sysctl_tree != NULL) {
2334 SYSCTL_ADD_ULONG(ctx,
2335 SYSCTL_CHILDREN(rxr->hn_rx_sysctl_tree),
2336 OID_AUTO, "packets", CTLFLAG_RW,
2337 &rxr->hn_pkts, "# of packets received");
2338 SYSCTL_ADD_ULONG(ctx,
2339 SYSCTL_CHILDREN(rxr->hn_rx_sysctl_tree),
2340 OID_AUTO, "rss_pkts", CTLFLAG_RW,
2342 "# of packets w/ RSS info received");
2347 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_queued",
2348 CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2349 __offsetof(struct hn_rx_ring, hn_lro.lro_queued),
2350 #if __FreeBSD_version < 1100095
2351 hn_rx_stat_int_sysctl,
2353 hn_rx_stat_u64_sysctl,
2355 "LU", "LRO queued");
2356 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_flushed",
2357 CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2358 __offsetof(struct hn_rx_ring, hn_lro.lro_flushed),
2359 #if __FreeBSD_version < 1100095
2360 hn_rx_stat_int_sysctl,
2362 hn_rx_stat_u64_sysctl,
2364 "LU", "LRO flushed");
2365 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_tried",
2366 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2367 __offsetof(struct hn_rx_ring, hn_lro_tried),
2368 hn_rx_stat_ulong_sysctl, "LU", "# of LRO tries");
2369 #if __FreeBSD_version >= 1100099
2370 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_length_lim",
2371 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
2372 hn_lro_lenlim_sysctl, "IU",
2373 "Max # of data bytes to be aggregated by LRO");
2374 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_ackcnt_lim",
2375 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
2376 hn_lro_ackcnt_sysctl, "I",
2377 "Max # of ACKs to be aggregated by LRO");
2379 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hosttcp",
2380 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, HN_TRUST_HCSUM_TCP,
2381 hn_trust_hcsum_sysctl, "I",
2382 "Trust tcp segement verification on host side, "
2383 "when csum info is missing");
2384 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hostudp",
2385 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, HN_TRUST_HCSUM_UDP,
2386 hn_trust_hcsum_sysctl, "I",
2387 "Trust udp datagram verification on host side, "
2388 "when csum info is missing");
2389 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hostip",
2390 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, HN_TRUST_HCSUM_IP,
2391 hn_trust_hcsum_sysctl, "I",
2392 "Trust ip packet verification on host side, "
2393 "when csum info is missing");
2394 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_ip",
2395 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2396 __offsetof(struct hn_rx_ring, hn_csum_ip),
2397 hn_rx_stat_ulong_sysctl, "LU", "RXCSUM IP");
2398 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_tcp",
2399 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2400 __offsetof(struct hn_rx_ring, hn_csum_tcp),
2401 hn_rx_stat_ulong_sysctl, "LU", "RXCSUM TCP");
2402 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_udp",
2403 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2404 __offsetof(struct hn_rx_ring, hn_csum_udp),
2405 hn_rx_stat_ulong_sysctl, "LU", "RXCSUM UDP");
2406 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_trusted",
2407 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2408 __offsetof(struct hn_rx_ring, hn_csum_trusted),
2409 hn_rx_stat_ulong_sysctl, "LU",
2410 "# of packets that we trust host's csum verification");
2411 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "small_pkts",
2412 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2413 __offsetof(struct hn_rx_ring, hn_small_pkts),
2414 hn_rx_stat_ulong_sysctl, "LU", "# of small packets received");
2415 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rx_ring_cnt",
2416 CTLFLAG_RD, &sc->hn_rx_ring_cnt, 0, "# created RX rings");
2417 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rx_ring_inuse",
2418 CTLFLAG_RD, &sc->hn_rx_ring_inuse, 0, "# used RX rings");
2424 hn_destroy_rx_data(struct hn_softc *sc)
2428 if (sc->hn_rxbuf != NULL) {
2429 hyperv_dmamem_free(&sc->hn_rxbuf_dma, sc->hn_rxbuf);
2430 sc->hn_rxbuf = NULL;
2433 if (sc->hn_rx_ring_cnt == 0)
2436 for (i = 0; i < sc->hn_rx_ring_cnt; ++i) {
2437 struct hn_rx_ring *rxr = &sc->hn_rx_ring[i];
2439 #if defined(INET) || defined(INET6)
2440 tcp_lro_free(&rxr->hn_lro);
2442 free(rxr->hn_rdbuf, M_NETVSC);
2444 free(sc->hn_rx_ring, M_NETVSC);
2445 sc->hn_rx_ring = NULL;
2447 sc->hn_rx_ring_cnt = 0;
2448 sc->hn_rx_ring_inuse = 0;
2452 hn_create_tx_ring(struct hn_softc *sc, int id)
2454 struct hn_tx_ring *txr = &sc->hn_tx_ring[id];
2455 device_t dev = sc->hn_dev;
2456 bus_dma_tag_t parent_dtag;
2461 txr->hn_tx_idx = id;
2463 #ifndef HN_USE_TXDESC_BUFRING
2464 mtx_init(&txr->hn_txlist_spin, "hn txlist", NULL, MTX_SPIN);
2466 mtx_init(&txr->hn_tx_lock, "hn tx", NULL, MTX_DEF);
2468 txr->hn_txdesc_cnt = HN_TX_DESC_CNT;
2469 txr->hn_txdesc = malloc(sizeof(struct hn_txdesc) * txr->hn_txdesc_cnt,
2470 M_NETVSC, M_WAITOK | M_ZERO);
2471 #ifndef HN_USE_TXDESC_BUFRING
2472 SLIST_INIT(&txr->hn_txlist);
2474 txr->hn_txdesc_br = buf_ring_alloc(txr->hn_txdesc_cnt, M_NETVSC,
2475 M_WAITOK, &txr->hn_tx_lock);
2478 txr->hn_tx_taskq = sc->hn_tx_taskq;
2480 if (hn_use_if_start) {
2481 txr->hn_txeof = hn_start_txeof;
2482 TASK_INIT(&txr->hn_tx_task, 0, hn_start_taskfunc, txr);
2483 TASK_INIT(&txr->hn_txeof_task, 0, hn_start_txeof_taskfunc, txr);
2487 txr->hn_txeof = hn_xmit_txeof;
2488 TASK_INIT(&txr->hn_tx_task, 0, hn_xmit_taskfunc, txr);
2489 TASK_INIT(&txr->hn_txeof_task, 0, hn_xmit_txeof_taskfunc, txr);
2491 br_depth = hn_get_txswq_depth(txr);
2492 txr->hn_mbuf_br = buf_ring_alloc(br_depth, M_NETVSC,
2493 M_WAITOK, &txr->hn_tx_lock);
2496 txr->hn_direct_tx_size = hn_direct_tx_size;
2497 version = VMBUS_GET_VERSION(device_get_parent(dev), dev);
2498 if (version >= VMBUS_VERSION_WIN8_1) {
2499 txr->hn_csum_assist = HN_CSUM_ASSIST;
2501 txr->hn_csum_assist = HN_CSUM_ASSIST_WIN8;
2503 device_printf(dev, "bus version %u.%u, "
2504 "no UDP checksum offloading\n",
2505 VMBUS_VERSION_MAJOR(version),
2506 VMBUS_VERSION_MINOR(version));
2511 * Always schedule transmission instead of trying to do direct
2512 * transmission. This one gives the best performance so far.
2514 txr->hn_sched_tx = 1;
2516 parent_dtag = bus_get_dma_tag(dev);
2518 /* DMA tag for RNDIS messages. */
2519 error = bus_dma_tag_create(parent_dtag, /* parent */
2520 HN_RNDIS_MSG_ALIGN, /* alignment */
2521 HN_RNDIS_MSG_BOUNDARY, /* boundary */
2522 BUS_SPACE_MAXADDR, /* lowaddr */
2523 BUS_SPACE_MAXADDR, /* highaddr */
2524 NULL, NULL, /* filter, filterarg */
2525 HN_RNDIS_MSG_LEN, /* maxsize */
2527 HN_RNDIS_MSG_LEN, /* maxsegsize */
2529 NULL, /* lockfunc */
2530 NULL, /* lockfuncarg */
2531 &txr->hn_tx_rndis_dtag);
2533 device_printf(dev, "failed to create rndis dmatag\n");
2537 /* DMA tag for data. */
2538 error = bus_dma_tag_create(parent_dtag, /* parent */
2540 HN_TX_DATA_BOUNDARY, /* boundary */
2541 BUS_SPACE_MAXADDR, /* lowaddr */
2542 BUS_SPACE_MAXADDR, /* highaddr */
2543 NULL, NULL, /* filter, filterarg */
2544 HN_TX_DATA_MAXSIZE, /* maxsize */
2545 HN_TX_DATA_SEGCNT_MAX, /* nsegments */
2546 HN_TX_DATA_SEGSIZE, /* maxsegsize */
2548 NULL, /* lockfunc */
2549 NULL, /* lockfuncarg */
2550 &txr->hn_tx_data_dtag);
2552 device_printf(dev, "failed to create data dmatag\n");
2556 for (i = 0; i < txr->hn_txdesc_cnt; ++i) {
2557 struct hn_txdesc *txd = &txr->hn_txdesc[i];
2562 * Allocate and load RNDIS messages.
2564 error = bus_dmamem_alloc(txr->hn_tx_rndis_dtag,
2565 (void **)&txd->rndis_msg,
2566 BUS_DMA_WAITOK | BUS_DMA_COHERENT,
2567 &txd->rndis_msg_dmap);
2570 "failed to allocate rndis_msg, %d\n", i);
2574 error = bus_dmamap_load(txr->hn_tx_rndis_dtag,
2575 txd->rndis_msg_dmap,
2576 txd->rndis_msg, HN_RNDIS_MSG_LEN,
2577 hyperv_dma_map_paddr, &txd->rndis_msg_paddr,
2581 "failed to load rndis_msg, %d\n", i);
2582 bus_dmamem_free(txr->hn_tx_rndis_dtag,
2583 txd->rndis_msg, txd->rndis_msg_dmap);
2587 /* DMA map for TX data. */
2588 error = bus_dmamap_create(txr->hn_tx_data_dtag, 0,
2592 "failed to allocate tx data dmamap\n");
2593 bus_dmamap_unload(txr->hn_tx_rndis_dtag,
2594 txd->rndis_msg_dmap);
2595 bus_dmamem_free(txr->hn_tx_rndis_dtag,
2596 txd->rndis_msg, txd->rndis_msg_dmap);
2600 /* All set, put it to list */
2601 txd->flags |= HN_TXD_FLAG_ONLIST;
2602 #ifndef HN_USE_TXDESC_BUFRING
2603 SLIST_INSERT_HEAD(&txr->hn_txlist, txd, link);
2605 buf_ring_enqueue(txr->hn_txdesc_br, txd);
2608 txr->hn_txdesc_avail = txr->hn_txdesc_cnt;
2610 if (sc->hn_tx_sysctl_tree != NULL) {
2611 struct sysctl_oid_list *child;
2612 struct sysctl_ctx_list *ctx;
2616 * Create per TX ring sysctl tree:
2617 * dev.hn.UNIT.tx.RINGID
2619 ctx = device_get_sysctl_ctx(dev);
2620 child = SYSCTL_CHILDREN(sc->hn_tx_sysctl_tree);
2622 snprintf(name, sizeof(name), "%d", id);
2623 txr->hn_tx_sysctl_tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO,
2624 name, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
2626 if (txr->hn_tx_sysctl_tree != NULL) {
2627 child = SYSCTL_CHILDREN(txr->hn_tx_sysctl_tree);
2629 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "txdesc_avail",
2630 CTLFLAG_RD, &txr->hn_txdesc_avail, 0,
2631 "# of available TX descs");
2632 if (!hn_use_if_start) {
2633 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "oactive",
2634 CTLFLAG_RD, &txr->hn_oactive, 0,
2637 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "packets",
2638 CTLFLAG_RW, &txr->hn_pkts,
2639 "# of packets transmitted");
2647 hn_txdesc_dmamap_destroy(struct hn_txdesc *txd)
2649 struct hn_tx_ring *txr = txd->txr;
2651 KASSERT(txd->m == NULL, ("still has mbuf installed"));
2652 KASSERT((txd->flags & HN_TXD_FLAG_DMAMAP) == 0, ("still dma mapped"));
2654 bus_dmamap_unload(txr->hn_tx_rndis_dtag, txd->rndis_msg_dmap);
2655 bus_dmamem_free(txr->hn_tx_rndis_dtag, txd->rndis_msg,
2656 txd->rndis_msg_dmap);
2657 bus_dmamap_destroy(txr->hn_tx_data_dtag, txd->data_dmap);
2661 hn_destroy_tx_ring(struct hn_tx_ring *txr)
2663 struct hn_txdesc *txd;
2665 if (txr->hn_txdesc == NULL)
2668 #ifndef HN_USE_TXDESC_BUFRING
2669 while ((txd = SLIST_FIRST(&txr->hn_txlist)) != NULL) {
2670 SLIST_REMOVE_HEAD(&txr->hn_txlist, link);
2671 hn_txdesc_dmamap_destroy(txd);
2674 mtx_lock(&txr->hn_tx_lock);
2675 while ((txd = buf_ring_dequeue_sc(txr->hn_txdesc_br)) != NULL)
2676 hn_txdesc_dmamap_destroy(txd);
2677 mtx_unlock(&txr->hn_tx_lock);
2680 if (txr->hn_tx_data_dtag != NULL)
2681 bus_dma_tag_destroy(txr->hn_tx_data_dtag);
2682 if (txr->hn_tx_rndis_dtag != NULL)
2683 bus_dma_tag_destroy(txr->hn_tx_rndis_dtag);
2685 #ifdef HN_USE_TXDESC_BUFRING
2686 buf_ring_free(txr->hn_txdesc_br, M_NETVSC);
2689 free(txr->hn_txdesc, M_NETVSC);
2690 txr->hn_txdesc = NULL;
2692 if (txr->hn_mbuf_br != NULL)
2693 buf_ring_free(txr->hn_mbuf_br, M_NETVSC);
2695 #ifndef HN_USE_TXDESC_BUFRING
2696 mtx_destroy(&txr->hn_txlist_spin);
2698 mtx_destroy(&txr->hn_tx_lock);
2702 hn_create_tx_data(struct hn_softc *sc, int ring_cnt)
2704 struct sysctl_oid_list *child;
2705 struct sysctl_ctx_list *ctx;
2709 * Create TXBUF for chimney sending.
2711 * NOTE: It is shared by all channels.
2713 sc->hn_chim = hyperv_dmamem_alloc(bus_get_dma_tag(sc->hn_dev),
2714 PAGE_SIZE, 0, NETVSC_SEND_BUFFER_SIZE, &sc->hn_chim_dma,
2715 BUS_DMA_WAITOK | BUS_DMA_ZERO);
2716 if (sc->hn_chim == NULL) {
2717 device_printf(sc->hn_dev, "allocate txbuf failed\n");
2721 sc->hn_tx_ring_cnt = ring_cnt;
2722 sc->hn_tx_ring_inuse = sc->hn_tx_ring_cnt;
2724 sc->hn_tx_ring = malloc(sizeof(struct hn_tx_ring) * sc->hn_tx_ring_cnt,
2725 M_NETVSC, M_WAITOK | M_ZERO);
2727 ctx = device_get_sysctl_ctx(sc->hn_dev);
2728 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->hn_dev));
2730 /* Create dev.hn.UNIT.tx sysctl tree */
2731 sc->hn_tx_sysctl_tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "tx",
2732 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
2734 for (i = 0; i < sc->hn_tx_ring_cnt; ++i) {
2737 error = hn_create_tx_ring(sc, i);
2742 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "no_txdescs",
2743 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2744 __offsetof(struct hn_tx_ring, hn_no_txdescs),
2745 hn_tx_stat_ulong_sysctl, "LU", "# of times short of TX descs");
2746 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "send_failed",
2747 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2748 __offsetof(struct hn_tx_ring, hn_send_failed),
2749 hn_tx_stat_ulong_sysctl, "LU", "# of hyper-v sending failure");
2750 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txdma_failed",
2751 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2752 __offsetof(struct hn_tx_ring, hn_txdma_failed),
2753 hn_tx_stat_ulong_sysctl, "LU", "# of TX DMA failure");
2754 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_collapsed",
2755 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2756 __offsetof(struct hn_tx_ring, hn_tx_collapsed),
2757 hn_tx_stat_ulong_sysctl, "LU", "# of TX mbuf collapsed");
2758 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_chimney",
2759 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2760 __offsetof(struct hn_tx_ring, hn_tx_chimney),
2761 hn_tx_stat_ulong_sysctl, "LU", "# of chimney send");
2762 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_chimney_tried",
2763 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2764 __offsetof(struct hn_tx_ring, hn_tx_chimney_tried),
2765 hn_tx_stat_ulong_sysctl, "LU", "# of chimney send tries");
2766 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "txdesc_cnt",
2767 CTLFLAG_RD, &sc->hn_tx_ring[0].hn_txdesc_cnt, 0,
2768 "# of total TX descs");
2769 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_chimney_max",
2770 CTLFLAG_RD, &sc->hn_chim_szmax, 0,
2771 "Chimney send packet size upper boundary");
2772 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_chimney_size",
2773 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
2774 hn_chim_size_sysctl, "I", "Chimney send packet size limit");
2775 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "direct_tx_size",
2776 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2777 __offsetof(struct hn_tx_ring, hn_direct_tx_size),
2778 hn_tx_conf_int_sysctl, "I",
2779 "Size of the packet for direct transmission");
2780 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "sched_tx",
2781 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2782 __offsetof(struct hn_tx_ring, hn_sched_tx),
2783 hn_tx_conf_int_sysctl, "I",
2784 "Always schedule transmission "
2785 "instead of doing direct transmission");
2786 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_ring_cnt",
2787 CTLFLAG_RD, &sc->hn_tx_ring_cnt, 0, "# created TX rings");
2788 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_ring_inuse",
2789 CTLFLAG_RD, &sc->hn_tx_ring_inuse, 0, "# used TX rings");
2795 hn_set_chim_size(struct hn_softc *sc, int chim_size)
2800 for (i = 0; i < sc->hn_tx_ring_inuse; ++i)
2801 sc->hn_tx_ring[i].hn_chim_size = chim_size;
2806 hn_destroy_tx_data(struct hn_softc *sc)
2810 if (sc->hn_chim != NULL) {
2811 hyperv_dmamem_free(&sc->hn_chim_dma, sc->hn_chim);
2815 if (sc->hn_tx_ring_cnt == 0)
2818 for (i = 0; i < sc->hn_tx_ring_cnt; ++i)
2819 hn_destroy_tx_ring(&sc->hn_tx_ring[i]);
2821 free(sc->hn_tx_ring, M_NETVSC);
2822 sc->hn_tx_ring = NULL;
2824 sc->hn_tx_ring_cnt = 0;
2825 sc->hn_tx_ring_inuse = 0;
2829 hn_start_taskfunc(void *xtxr, int pending __unused)
2831 struct hn_tx_ring *txr = xtxr;
2833 mtx_lock(&txr->hn_tx_lock);
2834 hn_start_locked(txr, 0);
2835 mtx_unlock(&txr->hn_tx_lock);
2839 hn_start_txeof_taskfunc(void *xtxr, int pending __unused)
2841 struct hn_tx_ring *txr = xtxr;
2843 mtx_lock(&txr->hn_tx_lock);
2844 atomic_clear_int(&txr->hn_sc->hn_ifp->if_drv_flags, IFF_DRV_OACTIVE);
2845 hn_start_locked(txr, 0);
2846 mtx_unlock(&txr->hn_tx_lock);
2850 hn_stop_tx_tasks(struct hn_softc *sc)
2854 for (i = 0; i < sc->hn_tx_ring_inuse; ++i) {
2855 struct hn_tx_ring *txr = &sc->hn_tx_ring[i];
2857 taskqueue_drain(txr->hn_tx_taskq, &txr->hn_tx_task);
2858 taskqueue_drain(txr->hn_tx_taskq, &txr->hn_txeof_task);
2863 hn_xmit(struct hn_tx_ring *txr, int len)
2865 struct hn_softc *sc = txr->hn_sc;
2866 struct ifnet *ifp = sc->hn_ifp;
2867 struct mbuf *m_head;
2869 mtx_assert(&txr->hn_tx_lock, MA_OWNED);
2870 KASSERT(hn_use_if_start == 0,
2871 ("hn_xmit is called, when if_start is enabled"));
2873 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || txr->hn_oactive)
2876 while ((m_head = drbr_peek(ifp, txr->hn_mbuf_br)) != NULL) {
2877 struct hn_txdesc *txd;
2880 if (len > 0 && m_head->m_pkthdr.len > len) {
2882 * This sending could be time consuming; let callers
2883 * dispatch this packet sending (and sending of any
2884 * following up packets) to tx taskqueue.
2886 drbr_putback(ifp, txr->hn_mbuf_br, m_head);
2890 txd = hn_txdesc_get(txr);
2892 txr->hn_no_txdescs++;
2893 drbr_putback(ifp, txr->hn_mbuf_br, m_head);
2894 txr->hn_oactive = 1;
2898 error = hn_encap(txr, txd, &m_head);
2900 /* Both txd and m_head are freed; discard */
2901 drbr_advance(ifp, txr->hn_mbuf_br);
2905 error = hn_send_pkt(ifp, txr, txd);
2906 if (__predict_false(error)) {
2907 /* txd is freed, but m_head is not */
2908 drbr_putback(ifp, txr->hn_mbuf_br, m_head);
2909 txr->hn_oactive = 1;
2914 drbr_advance(ifp, txr->hn_mbuf_br);
2920 hn_transmit(struct ifnet *ifp, struct mbuf *m)
2922 struct hn_softc *sc = ifp->if_softc;
2923 struct hn_tx_ring *txr;
2927 * Select the TX ring based on flowid
2929 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
2930 idx = m->m_pkthdr.flowid % sc->hn_tx_ring_inuse;
2931 txr = &sc->hn_tx_ring[idx];
2933 error = drbr_enqueue(ifp, txr->hn_mbuf_br, m);
2935 if_inc_counter(ifp, IFCOUNTER_OQDROPS, 1);
2939 if (txr->hn_oactive)
2942 if (txr->hn_sched_tx)
2945 if (mtx_trylock(&txr->hn_tx_lock)) {
2948 sched = hn_xmit(txr, txr->hn_direct_tx_size);
2949 mtx_unlock(&txr->hn_tx_lock);
2954 taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_tx_task);
2959 hn_xmit_qflush(struct ifnet *ifp)
2961 struct hn_softc *sc = ifp->if_softc;
2964 for (i = 0; i < sc->hn_tx_ring_inuse; ++i) {
2965 struct hn_tx_ring *txr = &sc->hn_tx_ring[i];
2968 mtx_lock(&txr->hn_tx_lock);
2969 while ((m = buf_ring_dequeue_sc(txr->hn_mbuf_br)) != NULL)
2971 mtx_unlock(&txr->hn_tx_lock);
2977 hn_xmit_txeof(struct hn_tx_ring *txr)
2980 if (txr->hn_sched_tx)
2983 if (mtx_trylock(&txr->hn_tx_lock)) {
2986 txr->hn_oactive = 0;
2987 sched = hn_xmit(txr, txr->hn_direct_tx_size);
2988 mtx_unlock(&txr->hn_tx_lock);
2990 taskqueue_enqueue(txr->hn_tx_taskq,
2996 * Release the oactive earlier, with the hope, that
2997 * others could catch up. The task will clear the
2998 * oactive again with the hn_tx_lock to avoid possible
3001 txr->hn_oactive = 0;
3002 taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_txeof_task);
3007 hn_xmit_taskfunc(void *xtxr, int pending __unused)
3009 struct hn_tx_ring *txr = xtxr;
3011 mtx_lock(&txr->hn_tx_lock);
3013 mtx_unlock(&txr->hn_tx_lock);
3017 hn_xmit_txeof_taskfunc(void *xtxr, int pending __unused)
3019 struct hn_tx_ring *txr = xtxr;
3021 mtx_lock(&txr->hn_tx_lock);
3022 txr->hn_oactive = 0;
3024 mtx_unlock(&txr->hn_tx_lock);
3028 hn_channel_attach(struct hn_softc *sc, struct vmbus_channel *chan)
3030 struct hn_rx_ring *rxr;
3033 idx = vmbus_chan_subidx(chan);
3035 KASSERT(idx >= 0 && idx < sc->hn_rx_ring_inuse,
3036 ("invalid channel index %d, should > 0 && < %d",
3037 idx, sc->hn_rx_ring_inuse));
3038 rxr = &sc->hn_rx_ring[idx];
3039 KASSERT((rxr->hn_rx_flags & HN_RX_FLAG_ATTACHED) == 0,
3040 ("RX ring %d already attached", idx));
3041 rxr->hn_rx_flags |= HN_RX_FLAG_ATTACHED;
3044 if_printf(sc->hn_ifp, "link RX ring %d to channel%u\n",
3045 idx, vmbus_chan_id(chan));
3048 if (idx < sc->hn_tx_ring_inuse) {
3049 struct hn_tx_ring *txr = &sc->hn_tx_ring[idx];
3051 KASSERT((txr->hn_tx_flags & HN_TX_FLAG_ATTACHED) == 0,
3052 ("TX ring %d already attached", idx));
3053 txr->hn_tx_flags |= HN_TX_FLAG_ATTACHED;
3055 txr->hn_chan = chan;
3057 if_printf(sc->hn_ifp, "link TX ring %d to channel%u\n",
3058 idx, vmbus_chan_id(chan));
3062 /* Bind channel to a proper CPU */
3063 vmbus_chan_cpu_set(chan, (sc->hn_cpu + idx) % mp_ncpus);
3067 hn_subchan_attach(struct hn_softc *sc, struct vmbus_channel *chan)
3070 KASSERT(!vmbus_chan_is_primary(chan),
3071 ("subchannel callback on primary channel"));
3072 hn_channel_attach(sc, chan);
3076 hn_subchan_setup(struct hn_softc *sc)
3078 struct vmbus_channel **subchans;
3079 int subchan_cnt = sc->hn_rx_ring_inuse - 1;
3082 /* Wait for sub-channels setup to complete. */
3083 subchans = vmbus_subchan_get(sc->hn_prichan, subchan_cnt);
3085 /* Attach the sub-channels. */
3086 for (i = 0; i < subchan_cnt; ++i) {
3087 struct vmbus_channel *subchan = subchans[i];
3089 /* NOTE: Calling order is critical. */
3090 hn_subchan_attach(sc, subchan);
3091 hv_nv_subchan_attach(subchan,
3092 &sc->hn_rx_ring[vmbus_chan_subidx(subchan)]);
3095 /* Release the sub-channels */
3096 vmbus_subchan_rel(subchans, subchan_cnt);
3097 if_printf(sc->hn_ifp, "%d sub-channels setup done\n", subchan_cnt);
3101 hn_tx_taskq_create(void *arg __unused)
3103 if (!hn_share_tx_taskq)
3106 hn_tx_taskq = taskqueue_create("hn_tx", M_WAITOK,
3107 taskqueue_thread_enqueue, &hn_tx_taskq);
3108 taskqueue_start_threads(&hn_tx_taskq, 1, PI_NET, "hn tx");
3109 if (hn_bind_tx_taskq >= 0) {
3110 int cpu = hn_bind_tx_taskq;
3111 struct task cpuset_task;
3114 if (cpu > mp_ncpus - 1)
3116 CPU_SETOF(cpu, &cpu_set);
3117 TASK_INIT(&cpuset_task, 0, hn_cpuset_setthread_task, &cpu_set);
3118 taskqueue_enqueue(hn_tx_taskq, &cpuset_task);
3119 taskqueue_drain(hn_tx_taskq, &cpuset_task);
3122 SYSINIT(hn_txtq_create, SI_SUB_DRIVERS, SI_ORDER_FIRST,
3123 hn_tx_taskq_create, NULL);
3126 hn_tx_taskq_destroy(void *arg __unused)
3128 if (hn_tx_taskq != NULL)
3129 taskqueue_free(hn_tx_taskq);
3131 SYSUNINIT(hn_txtq_destroy, SI_SUB_DRIVERS, SI_ORDER_FIRST,
3132 hn_tx_taskq_destroy, NULL);
3134 static device_method_t netvsc_methods[] = {
3135 /* Device interface */
3136 DEVMETHOD(device_probe, netvsc_probe),
3137 DEVMETHOD(device_attach, netvsc_attach),
3138 DEVMETHOD(device_detach, netvsc_detach),
3139 DEVMETHOD(device_shutdown, netvsc_shutdown),
3144 static driver_t netvsc_driver = {
3150 static devclass_t netvsc_devclass;
3152 DRIVER_MODULE(hn, vmbus, netvsc_driver, netvsc_devclass, 0, 0);
3153 MODULE_VERSION(hn, 1);
3154 MODULE_DEPEND(hn, vmbus, 1, 1, 1);