2 * Copyright (c) 2010-2012 Citrix Inc.
3 * Copyright (c) 2009-2012,2016 Microsoft Corp.
4 * Copyright (c) 2012 NetApp Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * Copyright (c) 2004-2006 Kip Macy
31 * All rights reserved.
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 #include <sys/cdefs.h>
56 __FBSDID("$FreeBSD$");
58 #include "opt_inet6.h"
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/sockio.h>
65 #include <sys/malloc.h>
66 #include <sys/module.h>
67 #include <sys/kernel.h>
68 #include <sys/socket.h>
70 #include <sys/queue.h>
74 #include <sys/sysctl.h>
75 #include <sys/buf_ring.h>
78 #include <net/if_arp.h>
79 #include <net/ethernet.h>
80 #include <net/if_dl.h>
81 #include <net/if_media.h>
82 #include <net/rndis.h>
85 #include <net/if_types.h>
86 #include <net/if_vlan_var.h>
89 #include <netinet/in_systm.h>
90 #include <netinet/in.h>
91 #include <netinet/ip.h>
92 #include <netinet/if_ether.h>
93 #include <netinet/tcp.h>
94 #include <netinet/udp.h>
95 #include <netinet/ip6.h>
98 #include <vm/vm_param.h>
99 #include <vm/vm_kern.h>
102 #include <machine/bus.h>
103 #include <machine/resource.h>
104 #include <machine/frame.h>
105 #include <machine/vmparam.h>
108 #include <sys/rman.h>
109 #include <sys/mutex.h>
110 #include <sys/errno.h>
111 #include <sys/types.h>
112 #include <machine/atomic.h>
114 #include <machine/intr_machdep.h>
116 #include <machine/in_cksum.h>
118 #include <dev/hyperv/include/hyperv.h>
119 #include <dev/hyperv/include/hyperv_busdma.h>
120 #include <dev/hyperv/include/vmbus_xact.h>
122 #include <dev/hyperv/netvsc/hv_net_vsc.h>
123 #include <dev/hyperv/netvsc/hv_rndis_filter.h>
124 #include <dev/hyperv/netvsc/ndis.h>
126 #include "vmbus_if.h"
128 /* Short for Hyper-V network interface */
129 #define NETVSC_DEVNAME "hn"
132 * It looks like offset 0 of buf is reserved to hold the softc pointer.
133 * The sc pointer evidently not needed, and is not presently populated.
134 * The packet offset is where the netvsc_packet starts in the buffer.
136 #define HV_NV_SC_PTR_OFFSET_IN_BUF 0
137 #define HV_NV_PACKET_OFFSET_IN_BUF 16
139 /* YYY should get it from the underlying channel */
140 #define HN_TX_DESC_CNT 512
142 #define HN_LROENT_CNT_DEF 128
144 #define HN_RING_CNT_DEF_MAX 8
146 #define HN_RNDIS_PKT_LEN \
147 (sizeof(struct rndis_packet_msg) + \
148 HN_RNDIS_PKTINFO_SIZE(HN_NDIS_HASH_VALUE_SIZE) + \
149 HN_RNDIS_PKTINFO_SIZE(NDIS_VLAN_INFO_SIZE) + \
150 HN_RNDIS_PKTINFO_SIZE(NDIS_LSO2_INFO_SIZE) + \
151 HN_RNDIS_PKTINFO_SIZE(NDIS_TXCSUM_INFO_SIZE))
152 #define HN_RNDIS_PKT_BOUNDARY PAGE_SIZE
153 #define HN_RNDIS_PKT_ALIGN CACHE_LINE_SIZE
155 #define HN_TX_DATA_BOUNDARY PAGE_SIZE
156 #define HN_TX_DATA_MAXSIZE IP_MAXPACKET
157 #define HN_TX_DATA_SEGSIZE PAGE_SIZE
158 /* -1 for RNDIS packet message */
159 #define HN_TX_DATA_SEGCNT_MAX (NETVSC_PACKET_MAXPAGE - 1)
161 #define HN_DIRECT_TX_SIZE_DEF 128
163 #define HN_EARLY_TXEOF_THRESH 8
166 #ifndef HN_USE_TXDESC_BUFRING
167 SLIST_ENTRY(hn_txdesc) link;
170 struct hn_tx_ring *txr;
172 uint32_t flags; /* HN_TXD_FLAG_ */
173 struct hn_send_ctx send_ctx;
175 bus_dmamap_t data_dmap;
177 bus_addr_t rndis_pkt_paddr;
178 struct rndis_packet_msg *rndis_pkt;
179 bus_dmamap_t rndis_pkt_dmap;
182 #define HN_TXD_FLAG_ONLIST 0x1
183 #define HN_TXD_FLAG_DMAMAP 0x2
186 * Only enable UDP checksum offloading when it is on 2012R2 or
187 * later. UDP checksum offloading doesn't work on earlier
190 #define HN_CSUM_ASSIST_WIN8 (CSUM_IP | CSUM_TCP)
191 #define HN_CSUM_ASSIST (CSUM_IP | CSUM_UDP | CSUM_TCP)
193 #define HN_LRO_LENLIM_MULTIRX_DEF (12 * ETHERMTU)
194 #define HN_LRO_LENLIM_DEF (25 * ETHERMTU)
195 /* YYY 2*MTU is a bit rough, but should be good enough. */
196 #define HN_LRO_LENLIM_MIN(ifp) (2 * (ifp)->if_mtu)
198 #define HN_LRO_ACKCNT_DEF 1
201 * Be aware that this sleepable mutex will exhibit WITNESS errors when
202 * certain TCP and ARP code paths are taken. This appears to be a
203 * well-known condition, as all other drivers checked use a sleeping
204 * mutex to protect their transmit paths.
205 * Also Be aware that mutexes do not play well with semaphores, and there
206 * is a conflicting semaphore in a certain channel code path.
208 #define NV_LOCK_INIT(_sc, _name) \
209 mtx_init(&(_sc)->hn_lock, _name, MTX_NETWORK_LOCK, MTX_DEF)
210 #define NV_LOCK(_sc) mtx_lock(&(_sc)->hn_lock)
211 #define NV_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->hn_lock, MA_OWNED)
212 #define NV_UNLOCK(_sc) mtx_unlock(&(_sc)->hn_lock)
213 #define NV_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->hn_lock)
220 int hv_promisc_mode = 0; /* normal mode by default */
222 SYSCTL_NODE(_hw, OID_AUTO, hn, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
223 "Hyper-V network interface");
225 /* Trust tcp segements verification on host side. */
226 static int hn_trust_hosttcp = 1;
227 SYSCTL_INT(_hw_hn, OID_AUTO, trust_hosttcp, CTLFLAG_RDTUN,
228 &hn_trust_hosttcp, 0,
229 "Trust tcp segement verification on host side, "
230 "when csum info is missing (global setting)");
232 /* Trust udp datagrams verification on host side. */
233 static int hn_trust_hostudp = 1;
234 SYSCTL_INT(_hw_hn, OID_AUTO, trust_hostudp, CTLFLAG_RDTUN,
235 &hn_trust_hostudp, 0,
236 "Trust udp datagram verification on host side, "
237 "when csum info is missing (global setting)");
239 /* Trust ip packets verification on host side. */
240 static int hn_trust_hostip = 1;
241 SYSCTL_INT(_hw_hn, OID_AUTO, trust_hostip, CTLFLAG_RDTUN,
243 "Trust ip packet verification on host side, "
244 "when csum info is missing (global setting)");
246 /* Limit TSO burst size */
247 static int hn_tso_maxlen = 0;
248 SYSCTL_INT(_hw_hn, OID_AUTO, tso_maxlen, CTLFLAG_RDTUN,
249 &hn_tso_maxlen, 0, "TSO burst limit");
251 /* Limit chimney send size */
252 static int hn_tx_chimney_size = 0;
253 SYSCTL_INT(_hw_hn, OID_AUTO, tx_chimney_size, CTLFLAG_RDTUN,
254 &hn_tx_chimney_size, 0, "Chimney send packet size limit");
256 /* Limit the size of packet for direct transmission */
257 static int hn_direct_tx_size = HN_DIRECT_TX_SIZE_DEF;
258 SYSCTL_INT(_hw_hn, OID_AUTO, direct_tx_size, CTLFLAG_RDTUN,
259 &hn_direct_tx_size, 0, "Size of the packet for direct transmission");
261 #if defined(INET) || defined(INET6)
262 #if __FreeBSD_version >= 1100095
263 static int hn_lro_entry_count = HN_LROENT_CNT_DEF;
264 SYSCTL_INT(_hw_hn, OID_AUTO, lro_entry_count, CTLFLAG_RDTUN,
265 &hn_lro_entry_count, 0, "LRO entry count");
269 static int hn_share_tx_taskq = 0;
270 SYSCTL_INT(_hw_hn, OID_AUTO, share_tx_taskq, CTLFLAG_RDTUN,
271 &hn_share_tx_taskq, 0, "Enable shared TX taskqueue");
273 static struct taskqueue *hn_tx_taskq;
275 #ifndef HN_USE_TXDESC_BUFRING
276 static int hn_use_txdesc_bufring = 0;
278 static int hn_use_txdesc_bufring = 1;
280 SYSCTL_INT(_hw_hn, OID_AUTO, use_txdesc_bufring, CTLFLAG_RD,
281 &hn_use_txdesc_bufring, 0, "Use buf_ring for TX descriptors");
283 static int hn_bind_tx_taskq = -1;
284 SYSCTL_INT(_hw_hn, OID_AUTO, bind_tx_taskq, CTLFLAG_RDTUN,
285 &hn_bind_tx_taskq, 0, "Bind TX taskqueue to the specified cpu");
287 static int hn_use_if_start = 0;
288 SYSCTL_INT(_hw_hn, OID_AUTO, use_if_start, CTLFLAG_RDTUN,
289 &hn_use_if_start, 0, "Use if_start TX method");
291 static int hn_chan_cnt = 0;
292 SYSCTL_INT(_hw_hn, OID_AUTO, chan_cnt, CTLFLAG_RDTUN,
294 "# of channels to use; each channel has one RX ring and one TX ring");
296 static int hn_tx_ring_cnt = 0;
297 SYSCTL_INT(_hw_hn, OID_AUTO, tx_ring_cnt, CTLFLAG_RDTUN,
298 &hn_tx_ring_cnt, 0, "# of TX rings to use");
300 static int hn_tx_swq_depth = 0;
301 SYSCTL_INT(_hw_hn, OID_AUTO, tx_swq_depth, CTLFLAG_RDTUN,
302 &hn_tx_swq_depth, 0, "Depth of IFQ or BUFRING");
304 #if __FreeBSD_version >= 1100095
305 static u_int hn_lro_mbufq_depth = 0;
306 SYSCTL_UINT(_hw_hn, OID_AUTO, lro_mbufq_depth, CTLFLAG_RDTUN,
307 &hn_lro_mbufq_depth, 0, "Depth of LRO mbuf queue");
310 static u_int hn_cpu_index;
313 * Forward declarations
315 static void hn_stop(hn_softc_t *sc);
316 static void hn_ifinit_locked(hn_softc_t *sc);
317 static void hn_ifinit(void *xsc);
318 static int hn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
319 static int hn_start_locked(struct hn_tx_ring *txr, int len);
320 static void hn_start(struct ifnet *ifp);
321 static void hn_start_txeof(struct hn_tx_ring *);
322 static int hn_ifmedia_upd(struct ifnet *ifp);
323 static void hn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
324 #if __FreeBSD_version >= 1100099
325 static int hn_lro_lenlim_sysctl(SYSCTL_HANDLER_ARGS);
326 static int hn_lro_ackcnt_sysctl(SYSCTL_HANDLER_ARGS);
328 static int hn_trust_hcsum_sysctl(SYSCTL_HANDLER_ARGS);
329 static int hn_chim_size_sysctl(SYSCTL_HANDLER_ARGS);
330 #if __FreeBSD_version < 1100095
331 static int hn_rx_stat_int_sysctl(SYSCTL_HANDLER_ARGS);
333 static int hn_rx_stat_u64_sysctl(SYSCTL_HANDLER_ARGS);
335 static int hn_rx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS);
336 static int hn_tx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS);
337 static int hn_tx_conf_int_sysctl(SYSCTL_HANDLER_ARGS);
338 static int hn_ndis_version_sysctl(SYSCTL_HANDLER_ARGS);
339 static int hn_check_iplen(const struct mbuf *, int);
340 static int hn_create_tx_ring(struct hn_softc *, int);
341 static void hn_destroy_tx_ring(struct hn_tx_ring *);
342 static int hn_create_tx_data(struct hn_softc *, int);
343 static void hn_destroy_tx_data(struct hn_softc *);
344 static void hn_start_taskfunc(void *, int);
345 static void hn_start_txeof_taskfunc(void *, int);
346 static void hn_stop_tx_tasks(struct hn_softc *);
347 static int hn_encap(struct hn_tx_ring *, struct hn_txdesc *, struct mbuf **);
348 static int hn_create_rx_data(struct hn_softc *sc, int);
349 static void hn_destroy_rx_data(struct hn_softc *sc);
350 static void hn_set_chim_size(struct hn_softc *, int);
351 static int hn_chan_attach(struct hn_softc *, struct vmbus_channel *);
352 static int hn_attach_subchans(struct hn_softc *);
353 static void hn_chan_callback(struct vmbus_channel *chan, void *xrxr);
355 static void hn_nvs_handle_notify(struct hn_softc *sc,
356 const struct vmbus_chanpkt_hdr *pkt);
357 static void hn_nvs_handle_comp(struct hn_softc *sc, struct vmbus_channel *chan,
358 const struct vmbus_chanpkt_hdr *pkt);
359 static void hn_nvs_handle_rxbuf(struct hn_softc *sc, struct hn_rx_ring *rxr,
360 struct vmbus_channel *chan,
361 const struct vmbus_chanpkt_hdr *pkthdr);
362 static void hn_nvs_ack_rxbuf(struct vmbus_channel *chan, uint64_t tid);
364 static int hn_transmit(struct ifnet *, struct mbuf *);
365 static void hn_xmit_qflush(struct ifnet *);
366 static int hn_xmit(struct hn_tx_ring *, int);
367 static void hn_xmit_txeof(struct hn_tx_ring *);
368 static void hn_xmit_taskfunc(void *, int);
369 static void hn_xmit_txeof_taskfunc(void *, int);
371 #if __FreeBSD_version >= 1100099
373 hn_set_lro_lenlim(struct hn_softc *sc, int lenlim)
377 for (i = 0; i < sc->hn_rx_ring_inuse; ++i)
378 sc->hn_rx_ring[i].hn_lro.lro_length_lim = lenlim;
383 hn_get_txswq_depth(const struct hn_tx_ring *txr)
386 KASSERT(txr->hn_txdesc_cnt > 0, ("tx ring is not setup yet"));
387 if (hn_tx_swq_depth < txr->hn_txdesc_cnt)
388 return txr->hn_txdesc_cnt;
389 return hn_tx_swq_depth;
393 hn_ifmedia_upd(struct ifnet *ifp __unused)
400 hn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
402 struct hn_softc *sc = ifp->if_softc;
404 ifmr->ifm_status = IFM_AVALID;
405 ifmr->ifm_active = IFM_ETHER;
407 if (!sc->hn_carrier) {
408 ifmr->ifm_active |= IFM_NONE;
411 ifmr->ifm_status |= IFM_ACTIVE;
412 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
415 /* {F8615163-DF3E-46c5-913F-F2D2F965ED0E} */
416 static const struct hyperv_guid g_net_vsc_device_type = {
417 .hv_guid = {0x63, 0x51, 0x61, 0xF8, 0x3E, 0xDF, 0xc5, 0x46,
418 0x91, 0x3F, 0xF2, 0xD2, 0xF9, 0x65, 0xED, 0x0E}
422 * Standard probe entry point.
426 netvsc_probe(device_t dev)
428 if (VMBUS_PROBE_GUID(device_get_parent(dev), dev,
429 &g_net_vsc_device_type) == 0) {
430 device_set_desc(dev, "Hyper-V Network Interface");
431 return BUS_PROBE_DEFAULT;
437 hn_cpuset_setthread_task(void *xmask, int pending __unused)
439 cpuset_t *mask = xmask;
442 error = cpuset_setthread(curthread->td_tid, mask);
444 panic("curthread=%ju: can't pin; error=%d",
445 (uintmax_t)curthread->td_tid, error);
450 * Standard attach entry point.
452 * Called when the driver is loaded. It allocates needed resources,
453 * and initializes the "hardware" and software.
456 netvsc_attach(device_t dev)
458 struct sysctl_oid_list *child;
459 struct sysctl_ctx_list *ctx;
460 netvsc_device_info device_info;
462 int unit = device_get_unit(dev);
463 struct ifnet *ifp = NULL;
464 int error, ring_cnt, tx_ring_cnt;
467 sc = device_get_softc(dev);
471 sc->hn_prichan = vmbus_get_channel(dev);
473 if (hn_tx_taskq == NULL) {
474 sc->hn_tx_taskq = taskqueue_create("hn_tx", M_WAITOK,
475 taskqueue_thread_enqueue, &sc->hn_tx_taskq);
476 taskqueue_start_threads(&sc->hn_tx_taskq, 1, PI_NET, "%s tx",
477 device_get_nameunit(dev));
478 if (hn_bind_tx_taskq >= 0) {
479 int cpu = hn_bind_tx_taskq;
480 struct task cpuset_task;
483 if (cpu > mp_ncpus - 1)
485 CPU_SETOF(cpu, &cpu_set);
486 TASK_INIT(&cpuset_task, 0, hn_cpuset_setthread_task,
488 taskqueue_enqueue(sc->hn_tx_taskq, &cpuset_task);
489 taskqueue_drain(sc->hn_tx_taskq, &cpuset_task);
492 sc->hn_tx_taskq = hn_tx_taskq;
494 NV_LOCK_INIT(sc, "NetVSCLock");
496 ifp = sc->hn_ifp = sc->arpcom.ac_ifp = if_alloc(IFT_ETHER);
498 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
501 * Figure out the # of RX rings (ring_cnt) and the # of TX rings
502 * to use (tx_ring_cnt).
505 * The # of RX rings to use is same as the # of channels to use.
507 ring_cnt = hn_chan_cnt;
511 if (ring_cnt > HN_RING_CNT_DEF_MAX)
512 ring_cnt = HN_RING_CNT_DEF_MAX;
513 } else if (ring_cnt > mp_ncpus) {
517 tx_ring_cnt = hn_tx_ring_cnt;
518 if (tx_ring_cnt <= 0 || tx_ring_cnt > ring_cnt)
519 tx_ring_cnt = ring_cnt;
520 if (hn_use_if_start) {
521 /* ifnet.if_start only needs one TX ring. */
526 * Set the leader CPU for channels.
528 sc->hn_cpu = atomic_fetchadd_int(&hn_cpu_index, ring_cnt) % mp_ncpus;
530 error = hn_create_tx_data(sc, tx_ring_cnt);
533 error = hn_create_rx_data(sc, ring_cnt);
538 * Associate the first TX/RX ring w/ the primary channel.
540 error = hn_chan_attach(sc, sc->hn_prichan);
544 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
545 ifp->if_ioctl = hn_ioctl;
546 ifp->if_init = hn_ifinit;
547 ifp->if_mtu = ETHERMTU;
548 if (hn_use_if_start) {
549 int qdepth = hn_get_txswq_depth(&sc->hn_tx_ring[0]);
551 ifp->if_start = hn_start;
552 IFQ_SET_MAXLEN(&ifp->if_snd, qdepth);
553 ifp->if_snd.ifq_drv_maxlen = qdepth - 1;
554 IFQ_SET_READY(&ifp->if_snd);
556 ifp->if_transmit = hn_transmit;
557 ifp->if_qflush = hn_xmit_qflush;
560 ifmedia_init(&sc->hn_media, 0, hn_ifmedia_upd, hn_ifmedia_sts);
561 ifmedia_add(&sc->hn_media, IFM_ETHER | IFM_AUTO, 0, NULL);
562 ifmedia_set(&sc->hn_media, IFM_ETHER | IFM_AUTO);
563 /* XXX ifmedia_set really should do this for us */
564 sc->hn_media.ifm_media = sc->hn_media.ifm_cur->ifm_media;
567 * Tell upper layers that we support full VLAN capability.
569 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
570 ifp->if_capabilities |=
571 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_TSO |
574 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_TSO |
576 ifp->if_hwassist = sc->hn_tx_ring[0].hn_csum_assist | CSUM_TSO;
578 sc->hn_xact = vmbus_xact_ctx_create(bus_get_dma_tag(dev),
579 HN_XACT_REQ_SIZE, HN_XACT_RESP_SIZE, 0);
580 if (sc->hn_xact == NULL)
583 error = hv_rf_on_device_add(sc, &device_info, &ring_cnt, ETHERMTU);
586 KASSERT(ring_cnt > 0 && ring_cnt <= sc->hn_rx_ring_inuse,
587 ("invalid channel count %d, should be less than %d",
588 ring_cnt, sc->hn_rx_ring_inuse));
591 * Set the # of TX/RX rings that could be used according to
592 * the # of channels that host offered.
594 if (sc->hn_tx_ring_inuse > ring_cnt)
595 sc->hn_tx_ring_inuse = ring_cnt;
596 sc->hn_rx_ring_inuse = ring_cnt;
597 device_printf(dev, "%d TX ring, %d RX ring\n",
598 sc->hn_tx_ring_inuse, sc->hn_rx_ring_inuse);
600 if (sc->hn_rx_ring_inuse > 1) {
601 error = hn_attach_subchans(sc);
606 #if __FreeBSD_version >= 1100099
607 if (sc->hn_rx_ring_inuse > 1) {
609 * Reduce TCP segment aggregation limit for multiple
610 * RX rings to increase ACK timeliness.
612 hn_set_lro_lenlim(sc, HN_LRO_LENLIM_MULTIRX_DEF);
616 if (device_info.link_state == NDIS_MEDIA_STATE_CONNECTED) {
620 tso_maxlen = hn_tso_maxlen;
621 if (tso_maxlen <= 0 || tso_maxlen > IP_MAXPACKET)
622 tso_maxlen = IP_MAXPACKET;
624 ifp->if_hw_tsomaxsegcount = HN_TX_DATA_SEGCNT_MAX;
625 ifp->if_hw_tsomaxsegsize = PAGE_SIZE;
626 ifp->if_hw_tsomax = tso_maxlen -
627 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
629 ether_ifattach(ifp, device_info.mac_addr);
631 if_printf(ifp, "TSO: %u/%u/%u\n", ifp->if_hw_tsomax,
632 ifp->if_hw_tsomaxsegcount, ifp->if_hw_tsomaxsegsize);
634 hn_set_chim_size(sc, sc->hn_chim_szmax);
635 if (hn_tx_chimney_size > 0 &&
636 hn_tx_chimney_size < sc->hn_chim_szmax)
637 hn_set_chim_size(sc, hn_tx_chimney_size);
639 ctx = device_get_sysctl_ctx(dev);
640 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
641 SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "nvs_version", CTLFLAG_RD,
642 &sc->hn_nvs_ver, 0, "NVS version");
643 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "ndis_version",
644 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, sc, 0,
645 hn_ndis_version_sysctl, "A", "NDIS version");
649 hn_destroy_tx_data(sc);
656 * Standard detach entry point
659 netvsc_detach(device_t dev)
661 struct hn_softc *sc = device_get_softc(dev);
664 printf("netvsc_detach\n");
667 * XXXKYS: Need to clean up all our
668 * driver state; this is the driver
673 * XXXKYS: Need to stop outgoing traffic and unregister
677 hv_rf_on_device_remove(sc);
679 hn_stop_tx_tasks(sc);
681 ifmedia_removeall(&sc->hn_media);
682 hn_destroy_rx_data(sc);
683 hn_destroy_tx_data(sc);
685 if (sc->hn_tx_taskq != hn_tx_taskq)
686 taskqueue_free(sc->hn_tx_taskq);
688 vmbus_xact_ctx_destroy(sc->hn_xact);
693 * Standard shutdown entry point
696 netvsc_shutdown(device_t dev)
702 hn_txdesc_dmamap_load(struct hn_tx_ring *txr, struct hn_txdesc *txd,
703 struct mbuf **m_head, bus_dma_segment_t *segs, int *nsegs)
705 struct mbuf *m = *m_head;
708 error = bus_dmamap_load_mbuf_sg(txr->hn_tx_data_dtag, txd->data_dmap,
709 m, segs, nsegs, BUS_DMA_NOWAIT);
710 if (error == EFBIG) {
713 m_new = m_collapse(m, M_NOWAIT, HN_TX_DATA_SEGCNT_MAX);
718 txr->hn_tx_collapsed++;
720 error = bus_dmamap_load_mbuf_sg(txr->hn_tx_data_dtag,
721 txd->data_dmap, m, segs, nsegs, BUS_DMA_NOWAIT);
724 bus_dmamap_sync(txr->hn_tx_data_dtag, txd->data_dmap,
725 BUS_DMASYNC_PREWRITE);
726 txd->flags |= HN_TXD_FLAG_DMAMAP;
732 hn_txdesc_dmamap_unload(struct hn_tx_ring *txr, struct hn_txdesc *txd)
735 if (txd->flags & HN_TXD_FLAG_DMAMAP) {
736 bus_dmamap_sync(txr->hn_tx_data_dtag,
737 txd->data_dmap, BUS_DMASYNC_POSTWRITE);
738 bus_dmamap_unload(txr->hn_tx_data_dtag,
740 txd->flags &= ~HN_TXD_FLAG_DMAMAP;
745 hn_txdesc_put(struct hn_tx_ring *txr, struct hn_txdesc *txd)
748 KASSERT((txd->flags & HN_TXD_FLAG_ONLIST) == 0,
749 ("put an onlist txd %#x", txd->flags));
751 KASSERT(txd->refs > 0, ("invalid txd refs %d", txd->refs));
752 if (atomic_fetchadd_int(&txd->refs, -1) != 1)
755 hn_txdesc_dmamap_unload(txr, txd);
756 if (txd->m != NULL) {
761 txd->flags |= HN_TXD_FLAG_ONLIST;
763 #ifndef HN_USE_TXDESC_BUFRING
764 mtx_lock_spin(&txr->hn_txlist_spin);
765 KASSERT(txr->hn_txdesc_avail >= 0 &&
766 txr->hn_txdesc_avail < txr->hn_txdesc_cnt,
767 ("txdesc_put: invalid txd avail %d", txr->hn_txdesc_avail));
768 txr->hn_txdesc_avail++;
769 SLIST_INSERT_HEAD(&txr->hn_txlist, txd, link);
770 mtx_unlock_spin(&txr->hn_txlist_spin);
772 atomic_add_int(&txr->hn_txdesc_avail, 1);
773 buf_ring_enqueue(txr->hn_txdesc_br, txd);
779 static __inline struct hn_txdesc *
780 hn_txdesc_get(struct hn_tx_ring *txr)
782 struct hn_txdesc *txd;
784 #ifndef HN_USE_TXDESC_BUFRING
785 mtx_lock_spin(&txr->hn_txlist_spin);
786 txd = SLIST_FIRST(&txr->hn_txlist);
788 KASSERT(txr->hn_txdesc_avail > 0,
789 ("txdesc_get: invalid txd avail %d", txr->hn_txdesc_avail));
790 txr->hn_txdesc_avail--;
791 SLIST_REMOVE_HEAD(&txr->hn_txlist, link);
793 mtx_unlock_spin(&txr->hn_txlist_spin);
795 txd = buf_ring_dequeue_sc(txr->hn_txdesc_br);
799 #ifdef HN_USE_TXDESC_BUFRING
800 atomic_subtract_int(&txr->hn_txdesc_avail, 1);
802 KASSERT(txd->m == NULL && txd->refs == 0 &&
803 (txd->flags & HN_TXD_FLAG_ONLIST), ("invalid txd"));
804 txd->flags &= ~HN_TXD_FLAG_ONLIST;
811 hn_txdesc_hold(struct hn_txdesc *txd)
814 /* 0->1 transition will never work */
815 KASSERT(txd->refs > 0, ("invalid refs %d", txd->refs));
816 atomic_add_int(&txd->refs, 1);
820 hn_txeof(struct hn_tx_ring *txr)
822 txr->hn_has_txeof = 0;
827 hn_tx_done(struct hn_send_ctx *sndc, struct hn_softc *sc,
828 struct vmbus_channel *chan, const void *data __unused, int dlen __unused)
830 struct hn_txdesc *txd = sndc->hn_cbarg;
831 struct hn_tx_ring *txr;
833 if (sndc->hn_chim_idx != HN_NVS_CHIM_IDX_INVALID)
834 hn_chim_free(sc, sndc->hn_chim_idx);
837 KASSERT(txr->hn_chan == chan,
838 ("channel mismatch, on chan%u, should be chan%u",
839 vmbus_chan_subidx(chan), vmbus_chan_subidx(txr->hn_chan)));
841 txr->hn_has_txeof = 1;
842 hn_txdesc_put(txr, txd);
844 ++txr->hn_txdone_cnt;
845 if (txr->hn_txdone_cnt >= HN_EARLY_TXEOF_THRESH) {
846 txr->hn_txdone_cnt = 0;
853 hn_chan_rollup(struct hn_rx_ring *rxr, struct hn_tx_ring *txr)
855 #if defined(INET) || defined(INET6)
856 struct lro_ctrl *lro = &rxr->hn_lro;
857 struct lro_entry *queued;
859 while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
860 SLIST_REMOVE_HEAD(&lro->lro_active, next);
861 tcp_lro_flush(lro, queued);
867 * 'txr' could be NULL, if multiple channels and
868 * ifnet.if_start method are enabled.
870 if (txr == NULL || !txr->hn_has_txeof)
873 txr->hn_txdone_cnt = 0;
877 static __inline uint32_t
878 hn_rndis_pktmsg_offset(uint32_t ofs)
881 KASSERT(ofs >= sizeof(struct rndis_packet_msg),
882 ("invalid RNDIS packet msg offset %u", ofs));
883 return (ofs - __offsetof(struct rndis_packet_msg, rm_dataoffset));
888 * If this function fails, then both txd and m_head0 will be freed.
891 hn_encap(struct hn_tx_ring *txr, struct hn_txdesc *txd, struct mbuf **m_head0)
893 bus_dma_segment_t segs[HN_TX_DATA_SEGCNT_MAX];
895 struct mbuf *m_head = *m_head0;
896 struct rndis_packet_msg *pkt;
897 uint32_t send_buf_section_idx;
898 int send_buf_section_size, pktlen;
902 * extension points to the area reserved for the
903 * rndis_filter_packet, which is placed just after
904 * the netvsc_packet (and rppi struct, if present;
905 * length is updated later).
907 pkt = txd->rndis_pkt;
908 pkt->rm_type = REMOTE_NDIS_PACKET_MSG;
909 pkt->rm_len = sizeof(*pkt) + m_head->m_pkthdr.len;
910 pkt->rm_dataoffset = sizeof(*pkt);
911 pkt->rm_datalen = m_head->m_pkthdr.len;
912 pkt->rm_pktinfooffset = sizeof(*pkt);
913 pkt->rm_pktinfolen = 0;
916 * Set the hash value for this packet, so that the host could
917 * dispatch the TX done event for this packet back to this TX
920 pi_data = hn_rndis_pktinfo_append(pkt, HN_RNDIS_PKT_LEN,
921 HN_NDIS_HASH_VALUE_SIZE, HN_NDIS_PKTINFO_TYPE_HASHVAL);
922 *pi_data = txr->hn_tx_idx;
924 if (m_head->m_flags & M_VLANTAG) {
925 pi_data = hn_rndis_pktinfo_append(pkt, HN_RNDIS_PKT_LEN,
926 NDIS_VLAN_INFO_SIZE, NDIS_PKTINFO_TYPE_VLAN);
927 *pi_data = NDIS_VLAN_INFO_MAKE(
928 EVL_VLANOFTAG(m_head->m_pkthdr.ether_vtag),
929 EVL_PRIOFTAG(m_head->m_pkthdr.ether_vtag),
930 EVL_CFIOFTAG(m_head->m_pkthdr.ether_vtag));
933 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
934 #if defined(INET6) || defined(INET)
935 struct ether_vlan_header *eh;
939 * XXX need m_pullup and use mtodo
941 eh = mtod(m_head, struct ether_vlan_header*);
942 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
943 ether_len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
945 ether_len = ETHER_HDR_LEN;
947 pi_data = hn_rndis_pktinfo_append(pkt, HN_RNDIS_PKT_LEN,
948 NDIS_LSO2_INFO_SIZE, NDIS_PKTINFO_TYPE_LSO);
950 if (m_head->m_pkthdr.csum_flags & CSUM_IP_TSO) {
952 (struct ip *)(m_head->m_data + ether_len);
953 unsigned long iph_len = ip->ip_hl << 2;
955 (struct tcphdr *)((caddr_t)ip + iph_len);
959 th->th_sum = in_pseudo(ip->ip_src.s_addr,
960 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
961 *pi_data = NDIS_LSO2_INFO_MAKEIPV4(0,
962 m_head->m_pkthdr.tso_segsz);
965 #if defined(INET6) && defined(INET)
970 struct ip6_hdr *ip6 = (struct ip6_hdr *)
971 (m_head->m_data + ether_len);
972 struct tcphdr *th = (struct tcphdr *)(ip6 + 1);
975 th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
976 *pi_data = NDIS_LSO2_INFO_MAKEIPV6(0,
977 m_head->m_pkthdr.tso_segsz);
980 #endif /* INET6 || INET */
981 } else if (m_head->m_pkthdr.csum_flags & txr->hn_csum_assist) {
982 pi_data = hn_rndis_pktinfo_append(pkt, HN_RNDIS_PKT_LEN,
983 NDIS_TXCSUM_INFO_SIZE, NDIS_PKTINFO_TYPE_CSUM);
984 *pi_data = NDIS_TXCSUM_INFO_IPV4;
986 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
987 *pi_data |= NDIS_TXCSUM_INFO_IPCS;
989 if (m_head->m_pkthdr.csum_flags & CSUM_TCP)
990 *pi_data |= NDIS_TXCSUM_INFO_TCPCS;
991 else if (m_head->m_pkthdr.csum_flags & CSUM_UDP)
992 *pi_data |= NDIS_TXCSUM_INFO_UDPCS;
995 pktlen = pkt->rm_pktinfooffset + pkt->rm_pktinfolen;
996 /* Convert RNDIS packet message offsets */
997 pkt->rm_dataoffset = hn_rndis_pktmsg_offset(pkt->rm_dataoffset);
998 pkt->rm_pktinfooffset = hn_rndis_pktmsg_offset(pkt->rm_pktinfooffset);
1001 * Chimney send, if the packet could fit into one chimney buffer.
1003 if (pkt->rm_len < txr->hn_chim_size) {
1004 txr->hn_tx_chimney_tried++;
1005 send_buf_section_idx = hn_chim_alloc(txr->hn_sc);
1006 if (send_buf_section_idx != HN_NVS_CHIM_IDX_INVALID) {
1007 uint8_t *dest = txr->hn_sc->hn_chim +
1008 (send_buf_section_idx * txr->hn_sc->hn_chim_szmax);
1010 memcpy(dest, pkt, pktlen);
1012 m_copydata(m_head, 0, m_head->m_pkthdr.len, dest);
1014 send_buf_section_size = pkt->rm_len;
1015 txr->hn_gpa_cnt = 0;
1016 txr->hn_tx_chimney++;
1021 error = hn_txdesc_dmamap_load(txr, txd, &m_head, segs, &nsegs);
1026 * This mbuf is not linked w/ the txd yet, so free it now.
1031 freed = hn_txdesc_put(txr, txd);
1033 ("fail to free txd upon txdma error"));
1035 txr->hn_txdma_failed++;
1036 if_inc_counter(txr->hn_sc->hn_ifp, IFCOUNTER_OERRORS, 1);
1041 /* +1 RNDIS packet message */
1042 txr->hn_gpa_cnt = nsegs + 1;
1044 /* send packet with page buffer */
1045 txr->hn_gpa[0].gpa_page = atop(txd->rndis_pkt_paddr);
1046 txr->hn_gpa[0].gpa_ofs = txd->rndis_pkt_paddr & PAGE_MASK;
1047 txr->hn_gpa[0].gpa_len = pktlen;
1050 * Fill the page buffers with mbuf info after the page
1051 * buffer for RNDIS packet message.
1053 for (i = 0; i < nsegs; ++i) {
1054 struct vmbus_gpa *gpa = &txr->hn_gpa[i + 1];
1056 gpa->gpa_page = atop(segs[i].ds_addr);
1057 gpa->gpa_ofs = segs[i].ds_addr & PAGE_MASK;
1058 gpa->gpa_len = segs[i].ds_len;
1061 send_buf_section_idx = HN_NVS_CHIM_IDX_INVALID;
1062 send_buf_section_size = 0;
1066 /* Set the completion routine */
1067 hn_send_ctx_init(&txd->send_ctx, hn_tx_done, txd,
1068 send_buf_section_idx, send_buf_section_size);
1075 * If this function fails, then txd will be freed, but the mbuf
1076 * associated w/ the txd will _not_ be freed.
1079 hn_send_pkt(struct ifnet *ifp, struct hn_tx_ring *txr, struct hn_txdesc *txd)
1081 int error, send_failed = 0;
1085 * Make sure that txd is not freed before ETHER_BPF_MTAP.
1087 hn_txdesc_hold(txd);
1088 error = hv_nv_on_send(txr->hn_chan, HN_NVS_RNDIS_MTYPE_DATA,
1089 &txd->send_ctx, txr->hn_gpa, txr->hn_gpa_cnt);
1091 ETHER_BPF_MTAP(ifp, txd->m);
1092 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1093 if (!hn_use_if_start) {
1094 if_inc_counter(ifp, IFCOUNTER_OBYTES,
1095 txd->m->m_pkthdr.len);
1096 if (txd->m->m_flags & M_MCAST)
1097 if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
1101 hn_txdesc_put(txr, txd);
1103 if (__predict_false(error)) {
1107 * This should "really rarely" happen.
1109 * XXX Too many RX to be acked or too many sideband
1110 * commands to run? Ask netvsc_channel_rollup()
1111 * to kick start later.
1113 txr->hn_has_txeof = 1;
1115 txr->hn_send_failed++;
1118 * Try sending again after set hn_has_txeof;
1119 * in case that we missed the last
1120 * netvsc_channel_rollup().
1124 if_printf(ifp, "send failed\n");
1127 * Caller will perform further processing on the
1128 * associated mbuf, so don't free it in hn_txdesc_put();
1129 * only unload it from the DMA map in hn_txdesc_put(),
1133 freed = hn_txdesc_put(txr, txd);
1135 ("fail to free txd upon send error"));
1137 txr->hn_send_failed++;
1143 * Start a transmit of one or more packets
1146 hn_start_locked(struct hn_tx_ring *txr, int len)
1148 struct hn_softc *sc = txr->hn_sc;
1149 struct ifnet *ifp = sc->hn_ifp;
1151 KASSERT(hn_use_if_start,
1152 ("hn_start_locked is called, when if_start is disabled"));
1153 KASSERT(txr == &sc->hn_tx_ring[0], ("not the first TX ring"));
1154 mtx_assert(&txr->hn_tx_lock, MA_OWNED);
1156 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1160 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
1161 struct hn_txdesc *txd;
1162 struct mbuf *m_head;
1165 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1169 if (len > 0 && m_head->m_pkthdr.len > len) {
1171 * This sending could be time consuming; let callers
1172 * dispatch this packet sending (and sending of any
1173 * following up packets) to tx taskqueue.
1175 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1179 txd = hn_txdesc_get(txr);
1181 txr->hn_no_txdescs++;
1182 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1183 atomic_set_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
1187 error = hn_encap(txr, txd, &m_head);
1189 /* Both txd and m_head are freed */
1193 error = hn_send_pkt(ifp, txr, txd);
1194 if (__predict_false(error)) {
1195 /* txd is freed, but m_head is not */
1196 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1197 atomic_set_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
1205 * Link up/down notification
1208 netvsc_linkstatus_callback(struct hn_softc *sc, uint32_t status)
1218 * Append the specified data to the indicated mbuf chain,
1219 * Extend the mbuf chain if the new data does not fit in
1222 * This is a minor rewrite of m_append() from sys/kern/uipc_mbuf.c.
1223 * There should be an equivalent in the kernel mbuf code,
1224 * but there does not appear to be one yet.
1226 * Differs from m_append() in that additional mbufs are
1227 * allocated with cluster size MJUMPAGESIZE, and filled
1230 * Return 1 if able to complete the job; otherwise 0.
1233 hv_m_append(struct mbuf *m0, int len, c_caddr_t cp)
1236 int remainder, space;
1238 for (m = m0; m->m_next != NULL; m = m->m_next)
1241 space = M_TRAILINGSPACE(m);
1244 * Copy into available space.
1246 if (space > remainder)
1248 bcopy(cp, mtod(m, caddr_t) + m->m_len, space);
1253 while (remainder > 0) {
1255 * Allocate a new mbuf; could check space
1256 * and allocate a cluster instead.
1258 n = m_getjcl(M_DONTWAIT, m->m_type, 0, MJUMPAGESIZE);
1261 n->m_len = min(MJUMPAGESIZE, remainder);
1262 bcopy(cp, mtod(n, caddr_t), n->m_len);
1264 remainder -= n->m_len;
1268 if (m0->m_flags & M_PKTHDR)
1269 m0->m_pkthdr.len += len - remainder;
1271 return (remainder == 0);
1274 #if defined(INET) || defined(INET6)
1276 hn_lro_rx(struct lro_ctrl *lc, struct mbuf *m)
1278 #if __FreeBSD_version >= 1100095
1279 if (hn_lro_mbufq_depth) {
1280 tcp_lro_queue_mbuf(lc, m);
1284 return tcp_lro_rx(lc, m, 0);
1289 * Called when we receive a data packet from the "wire" on the
1292 * Note: This is no longer used as a callback
1295 hn_rxpkt(struct hn_rx_ring *rxr, const void *data, int dlen,
1296 const struct hn_recvinfo *info)
1298 struct ifnet *ifp = rxr->hn_ifp;
1300 int size, do_lro = 0, do_csum = 1;
1301 int hash_type = M_HASHTYPE_OPAQUE;
1303 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1307 * Bail out if packet contains more data than configured MTU.
1309 if (dlen > (ifp->if_mtu + ETHER_HDR_LEN)) {
1311 } else if (dlen <= MHLEN) {
1312 m_new = m_gethdr(M_NOWAIT, MT_DATA);
1313 if (m_new == NULL) {
1314 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1317 memcpy(mtod(m_new, void *), data, dlen);
1318 m_new->m_pkthdr.len = m_new->m_len = dlen;
1319 rxr->hn_small_pkts++;
1322 * Get an mbuf with a cluster. For packets 2K or less,
1323 * get a standard 2K cluster. For anything larger, get a
1324 * 4K cluster. Any buffers larger than 4K can cause problems
1325 * if looped around to the Hyper-V TX channel, so avoid them.
1328 if (dlen > MCLBYTES) {
1330 size = MJUMPAGESIZE;
1333 m_new = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, size);
1334 if (m_new == NULL) {
1335 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1339 hv_m_append(m_new, dlen, data);
1341 m_new->m_pkthdr.rcvif = ifp;
1343 if (__predict_false((ifp->if_capenable & IFCAP_RXCSUM) == 0))
1346 /* receive side checksum offload */
1347 if (info->csum_info != HN_NDIS_RXCSUM_INFO_INVALID) {
1348 /* IP csum offload */
1349 if ((info->csum_info & NDIS_RXCSUM_INFO_IPCS_OK) && do_csum) {
1350 m_new->m_pkthdr.csum_flags |=
1351 (CSUM_IP_CHECKED | CSUM_IP_VALID);
1355 /* TCP/UDP csum offload */
1356 if ((info->csum_info & (NDIS_RXCSUM_INFO_UDPCS_OK |
1357 NDIS_RXCSUM_INFO_TCPCS_OK)) && do_csum) {
1358 m_new->m_pkthdr.csum_flags |=
1359 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1360 m_new->m_pkthdr.csum_data = 0xffff;
1361 if (info->csum_info & NDIS_RXCSUM_INFO_TCPCS_OK)
1367 if ((info->csum_info &
1368 (NDIS_RXCSUM_INFO_TCPCS_OK | NDIS_RXCSUM_INFO_IPCS_OK)) ==
1369 (NDIS_RXCSUM_INFO_TCPCS_OK | NDIS_RXCSUM_INFO_IPCS_OK))
1372 const struct ether_header *eh;
1377 if (m_new->m_len < hoff)
1379 eh = mtod(m_new, struct ether_header *);
1380 etype = ntohs(eh->ether_type);
1381 if (etype == ETHERTYPE_VLAN) {
1382 const struct ether_vlan_header *evl;
1384 hoff = sizeof(*evl);
1385 if (m_new->m_len < hoff)
1387 evl = mtod(m_new, struct ether_vlan_header *);
1388 etype = ntohs(evl->evl_proto);
1391 if (etype == ETHERTYPE_IP) {
1394 pr = hn_check_iplen(m_new, hoff);
1395 if (pr == IPPROTO_TCP) {
1397 (rxr->hn_trust_hcsum &
1398 HN_TRUST_HCSUM_TCP)) {
1399 rxr->hn_csum_trusted++;
1400 m_new->m_pkthdr.csum_flags |=
1401 (CSUM_IP_CHECKED | CSUM_IP_VALID |
1402 CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1403 m_new->m_pkthdr.csum_data = 0xffff;
1406 } else if (pr == IPPROTO_UDP) {
1408 (rxr->hn_trust_hcsum &
1409 HN_TRUST_HCSUM_UDP)) {
1410 rxr->hn_csum_trusted++;
1411 m_new->m_pkthdr.csum_flags |=
1412 (CSUM_IP_CHECKED | CSUM_IP_VALID |
1413 CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1414 m_new->m_pkthdr.csum_data = 0xffff;
1416 } else if (pr != IPPROTO_DONE && do_csum &&
1417 (rxr->hn_trust_hcsum & HN_TRUST_HCSUM_IP)) {
1418 rxr->hn_csum_trusted++;
1419 m_new->m_pkthdr.csum_flags |=
1420 (CSUM_IP_CHECKED | CSUM_IP_VALID);
1425 if (info->vlan_info != HN_NDIS_VLAN_INFO_INVALID) {
1426 m_new->m_pkthdr.ether_vtag = EVL_MAKETAG(
1427 NDIS_VLAN_INFO_ID(info->vlan_info),
1428 NDIS_VLAN_INFO_PRI(info->vlan_info),
1429 NDIS_VLAN_INFO_CFI(info->vlan_info));
1430 m_new->m_flags |= M_VLANTAG;
1433 if (info->hash_info != HN_NDIS_HASH_INFO_INVALID) {
1435 m_new->m_pkthdr.flowid = info->hash_value;
1436 if ((info->hash_info & NDIS_HASH_FUNCTION_MASK) ==
1437 NDIS_HASH_FUNCTION_TOEPLITZ) {
1438 uint32_t type = (info->hash_info & NDIS_HASH_TYPE_MASK);
1441 case NDIS_HASH_IPV4:
1442 hash_type = M_HASHTYPE_RSS_IPV4;
1445 case NDIS_HASH_TCP_IPV4:
1446 hash_type = M_HASHTYPE_RSS_TCP_IPV4;
1449 case NDIS_HASH_IPV6:
1450 hash_type = M_HASHTYPE_RSS_IPV6;
1453 case NDIS_HASH_IPV6_EX:
1454 hash_type = M_HASHTYPE_RSS_IPV6_EX;
1457 case NDIS_HASH_TCP_IPV6:
1458 hash_type = M_HASHTYPE_RSS_TCP_IPV6;
1461 case NDIS_HASH_TCP_IPV6_EX:
1462 hash_type = M_HASHTYPE_RSS_TCP_IPV6_EX;
1467 m_new->m_pkthdr.flowid = rxr->hn_rx_idx;
1469 M_HASHTYPE_SET(m_new, hash_type);
1472 * Note: Moved RX completion back to hv_nv_on_receive() so all
1473 * messages (not just data messages) will trigger a response.
1479 if ((ifp->if_capenable & IFCAP_LRO) && do_lro) {
1480 #if defined(INET) || defined(INET6)
1481 struct lro_ctrl *lro = &rxr->hn_lro;
1484 rxr->hn_lro_tried++;
1485 if (hn_lro_rx(lro, m_new) == 0) {
1493 /* We're not holding the lock here, so don't release it */
1494 (*ifp->if_input)(ifp, m_new);
1500 * Rules for using sc->temp_unusable:
1501 * 1. sc->temp_unusable can only be read or written while holding NV_LOCK()
1502 * 2. code reading sc->temp_unusable under NV_LOCK(), and finding
1503 * sc->temp_unusable set, must release NV_LOCK() and exit
1504 * 3. to retain exclusive control of the interface,
1505 * sc->temp_unusable must be set by code before releasing NV_LOCK()
1506 * 4. only code setting sc->temp_unusable can clear sc->temp_unusable
1507 * 5. code setting sc->temp_unusable must eventually clear sc->temp_unusable
1511 * Standard ioctl entry point. Called when the user wants to configure
1515 hn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1517 hn_softc_t *sc = ifp->if_softc;
1518 struct ifreq *ifr = (struct ifreq *)data;
1520 struct ifaddr *ifa = (struct ifaddr *)data;
1522 netvsc_device_info device_info;
1523 int mask, error = 0, ring_cnt;
1524 int retry_cnt = 500;
1530 if (ifa->ifa_addr->sa_family == AF_INET) {
1531 ifp->if_flags |= IFF_UP;
1532 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1534 arp_ifinit(ifp, ifa);
1537 error = ether_ioctl(ifp, cmd, data);
1540 /* Check MTU value change */
1541 if (ifp->if_mtu == ifr->ifr_mtu)
1544 if (ifr->ifr_mtu > NETVSC_MAX_CONFIGURABLE_MTU) {
1549 /* Obtain and record requested MTU */
1550 ifp->if_mtu = ifr->ifr_mtu;
1552 #if __FreeBSD_version >= 1100099
1554 * Make sure that LRO aggregation length limit is still
1555 * valid, after the MTU change.
1558 if (sc->hn_rx_ring[0].hn_lro.lro_length_lim <
1559 HN_LRO_LENLIM_MIN(ifp))
1560 hn_set_lro_lenlim(sc, HN_LRO_LENLIM_MIN(ifp));
1566 if (!sc->temp_unusable) {
1567 sc->temp_unusable = TRUE;
1571 if (retry_cnt > 0) {
1575 } while (retry_cnt > 0);
1577 if (retry_cnt == 0) {
1582 /* We must remove and add back the device to cause the new
1583 * MTU to take effect. This includes tearing down, but not
1584 * deleting the channel, then bringing it back up.
1586 error = hv_rf_on_device_remove(sc);
1589 sc->temp_unusable = FALSE;
1594 /* Wait for subchannels to be destroyed */
1595 vmbus_subchan_drain(sc->hn_prichan);
1597 sc->hn_rx_ring[0].hn_rx_flags &= ~HN_RX_FLAG_ATTACHED;
1598 sc->hn_tx_ring[0].hn_tx_flags &= ~HN_TX_FLAG_ATTACHED;
1599 hn_chan_attach(sc, sc->hn_prichan); /* XXX check error */
1601 ring_cnt = sc->hn_rx_ring_inuse;
1602 error = hv_rf_on_device_add(sc, &device_info, &ring_cnt,
1606 sc->temp_unusable = FALSE;
1610 /* # of channels can _not_ be changed */
1611 KASSERT(sc->hn_rx_ring_inuse == ring_cnt,
1612 ("RX ring count %d and channel count %u mismatch",
1613 sc->hn_rx_ring_cnt, ring_cnt));
1614 if (sc->hn_rx_ring_inuse > 1) {
1618 * Skip the rings on primary channel; they are
1619 * handled by the hv_rf_on_device_add() above.
1621 for (r = 1; r < sc->hn_rx_ring_cnt; ++r) {
1622 sc->hn_rx_ring[r].hn_rx_flags &=
1623 ~HN_RX_FLAG_ATTACHED;
1625 for (r = 1; r < sc->hn_tx_ring_cnt; ++r) {
1626 sc->hn_tx_ring[r].hn_tx_flags &=
1627 ~HN_TX_FLAG_ATTACHED;
1629 hn_attach_subchans(sc); /* XXX check error */
1632 if (sc->hn_tx_ring[0].hn_chim_size > sc->hn_chim_szmax)
1633 hn_set_chim_size(sc, sc->hn_chim_szmax);
1635 hn_ifinit_locked(sc);
1638 sc->temp_unusable = FALSE;
1644 if (!sc->temp_unusable) {
1645 sc->temp_unusable = TRUE;
1649 if (retry_cnt > 0) {
1653 } while (retry_cnt > 0);
1655 if (retry_cnt == 0) {
1660 if (ifp->if_flags & IFF_UP) {
1662 * If only the state of the PROMISC flag changed,
1663 * then just use the 'set promisc mode' command
1664 * instead of reinitializing the entire NIC. Doing
1665 * a full re-init means reloading the firmware and
1666 * waiting for it to start up, which may take a
1670 /* Fixme: Promiscuous mode? */
1671 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1672 ifp->if_flags & IFF_PROMISC &&
1673 !(sc->hn_if_flags & IFF_PROMISC)) {
1674 /* do something here for Hyper-V */
1675 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1676 !(ifp->if_flags & IFF_PROMISC) &&
1677 sc->hn_if_flags & IFF_PROMISC) {
1678 /* do something here for Hyper-V */
1681 hn_ifinit_locked(sc);
1683 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1688 sc->temp_unusable = FALSE;
1690 sc->hn_if_flags = ifp->if_flags;
1696 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1697 if (mask & IFCAP_TXCSUM) {
1698 ifp->if_capenable ^= IFCAP_TXCSUM;
1699 if (ifp->if_capenable & IFCAP_TXCSUM) {
1701 sc->hn_tx_ring[0].hn_csum_assist;
1704 ~sc->hn_tx_ring[0].hn_csum_assist;
1708 if (mask & IFCAP_RXCSUM)
1709 ifp->if_capenable ^= IFCAP_RXCSUM;
1711 if (mask & IFCAP_LRO)
1712 ifp->if_capenable ^= IFCAP_LRO;
1714 if (mask & IFCAP_TSO4) {
1715 ifp->if_capenable ^= IFCAP_TSO4;
1716 if (ifp->if_capenable & IFCAP_TSO4)
1717 ifp->if_hwassist |= CSUM_IP_TSO;
1719 ifp->if_hwassist &= ~CSUM_IP_TSO;
1722 if (mask & IFCAP_TSO6) {
1723 ifp->if_capenable ^= IFCAP_TSO6;
1724 if (ifp->if_capenable & IFCAP_TSO6)
1725 ifp->if_hwassist |= CSUM_IP6_TSO;
1727 ifp->if_hwassist &= ~CSUM_IP6_TSO;
1736 /* Fixme: Multicast mode? */
1737 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1739 netvsc_setmulti(sc);
1748 error = ifmedia_ioctl(ifp, ifr, &sc->hn_media, cmd);
1751 error = ether_ioctl(ifp, cmd, data);
1762 hn_stop(hn_softc_t *sc)
1770 printf(" Closing Device ...\n");
1772 atomic_clear_int(&ifp->if_drv_flags,
1773 (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
1774 for (i = 0; i < sc->hn_tx_ring_inuse; ++i)
1775 sc->hn_tx_ring[i].hn_oactive = 0;
1777 if_link_state_change(ifp, LINK_STATE_DOWN);
1778 sc->hn_initdone = 0;
1780 ret = hv_rf_on_close(sc);
1784 * FreeBSD transmit entry point
1787 hn_start(struct ifnet *ifp)
1789 struct hn_softc *sc = ifp->if_softc;
1790 struct hn_tx_ring *txr = &sc->hn_tx_ring[0];
1792 if (txr->hn_sched_tx)
1795 if (mtx_trylock(&txr->hn_tx_lock)) {
1798 sched = hn_start_locked(txr, txr->hn_direct_tx_size);
1799 mtx_unlock(&txr->hn_tx_lock);
1804 taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_tx_task);
1808 hn_start_txeof(struct hn_tx_ring *txr)
1810 struct hn_softc *sc = txr->hn_sc;
1811 struct ifnet *ifp = sc->hn_ifp;
1813 KASSERT(txr == &sc->hn_tx_ring[0], ("not the first TX ring"));
1815 if (txr->hn_sched_tx)
1818 if (mtx_trylock(&txr->hn_tx_lock)) {
1821 atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
1822 sched = hn_start_locked(txr, txr->hn_direct_tx_size);
1823 mtx_unlock(&txr->hn_tx_lock);
1825 taskqueue_enqueue(txr->hn_tx_taskq,
1831 * Release the OACTIVE earlier, with the hope, that
1832 * others could catch up. The task will clear the
1833 * flag again with the hn_tx_lock to avoid possible
1836 atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
1837 taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_txeof_task);
1845 hn_ifinit_locked(hn_softc_t *sc)
1852 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1856 hv_promisc_mode = 1;
1858 ret = hv_rf_on_open(sc);
1862 sc->hn_initdone = 1;
1865 atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
1866 for (i = 0; i < sc->hn_tx_ring_inuse; ++i)
1867 sc->hn_tx_ring[i].hn_oactive = 0;
1869 atomic_set_int(&ifp->if_drv_flags, IFF_DRV_RUNNING);
1870 if_link_state_change(ifp, LINK_STATE_UP);
1877 hn_ifinit(void *xsc)
1879 hn_softc_t *sc = xsc;
1882 if (sc->temp_unusable) {
1886 sc->temp_unusable = TRUE;
1889 hn_ifinit_locked(sc);
1892 sc->temp_unusable = FALSE;
1901 hn_watchdog(struct ifnet *ifp)
1906 printf("hn%d: watchdog timeout -- resetting\n", sc->hn_unit);
1907 hn_ifinit(sc); /*???*/
1912 #if __FreeBSD_version >= 1100099
1915 hn_lro_lenlim_sysctl(SYSCTL_HANDLER_ARGS)
1917 struct hn_softc *sc = arg1;
1918 unsigned int lenlim;
1921 lenlim = sc->hn_rx_ring[0].hn_lro.lro_length_lim;
1922 error = sysctl_handle_int(oidp, &lenlim, 0, req);
1923 if (error || req->newptr == NULL)
1926 if (lenlim < HN_LRO_LENLIM_MIN(sc->hn_ifp) ||
1927 lenlim > TCP_LRO_LENGTH_MAX)
1931 hn_set_lro_lenlim(sc, lenlim);
1937 hn_lro_ackcnt_sysctl(SYSCTL_HANDLER_ARGS)
1939 struct hn_softc *sc = arg1;
1940 int ackcnt, error, i;
1943 * lro_ackcnt_lim is append count limit,
1944 * +1 to turn it into aggregation limit.
1946 ackcnt = sc->hn_rx_ring[0].hn_lro.lro_ackcnt_lim + 1;
1947 error = sysctl_handle_int(oidp, &ackcnt, 0, req);
1948 if (error || req->newptr == NULL)
1951 if (ackcnt < 2 || ackcnt > (TCP_LRO_ACKCNT_MAX + 1))
1955 * Convert aggregation limit back to append
1960 for (i = 0; i < sc->hn_rx_ring_inuse; ++i)
1961 sc->hn_rx_ring[i].hn_lro.lro_ackcnt_lim = ackcnt;
1969 hn_trust_hcsum_sysctl(SYSCTL_HANDLER_ARGS)
1971 struct hn_softc *sc = arg1;
1976 if (sc->hn_rx_ring[0].hn_trust_hcsum & hcsum)
1979 error = sysctl_handle_int(oidp, &on, 0, req);
1980 if (error || req->newptr == NULL)
1984 for (i = 0; i < sc->hn_rx_ring_inuse; ++i) {
1985 struct hn_rx_ring *rxr = &sc->hn_rx_ring[i];
1988 rxr->hn_trust_hcsum |= hcsum;
1990 rxr->hn_trust_hcsum &= ~hcsum;
1997 hn_chim_size_sysctl(SYSCTL_HANDLER_ARGS)
1999 struct hn_softc *sc = arg1;
2000 int chim_size, error;
2002 chim_size = sc->hn_tx_ring[0].hn_chim_size;
2003 error = sysctl_handle_int(oidp, &chim_size, 0, req);
2004 if (error || req->newptr == NULL)
2007 if (chim_size > sc->hn_chim_szmax || chim_size <= 0)
2010 hn_set_chim_size(sc, chim_size);
2014 #if __FreeBSD_version < 1100095
2016 hn_rx_stat_int_sysctl(SYSCTL_HANDLER_ARGS)
2018 struct hn_softc *sc = arg1;
2019 int ofs = arg2, i, error;
2020 struct hn_rx_ring *rxr;
2024 for (i = 0; i < sc->hn_rx_ring_inuse; ++i) {
2025 rxr = &sc->hn_rx_ring[i];
2026 stat += *((int *)((uint8_t *)rxr + ofs));
2029 error = sysctl_handle_64(oidp, &stat, 0, req);
2030 if (error || req->newptr == NULL)
2033 /* Zero out this stat. */
2034 for (i = 0; i < sc->hn_rx_ring_inuse; ++i) {
2035 rxr = &sc->hn_rx_ring[i];
2036 *((int *)((uint8_t *)rxr + ofs)) = 0;
2042 hn_rx_stat_u64_sysctl(SYSCTL_HANDLER_ARGS)
2044 struct hn_softc *sc = arg1;
2045 int ofs = arg2, i, error;
2046 struct hn_rx_ring *rxr;
2050 for (i = 0; i < sc->hn_rx_ring_inuse; ++i) {
2051 rxr = &sc->hn_rx_ring[i];
2052 stat += *((uint64_t *)((uint8_t *)rxr + ofs));
2055 error = sysctl_handle_64(oidp, &stat, 0, req);
2056 if (error || req->newptr == NULL)
2059 /* Zero out this stat. */
2060 for (i = 0; i < sc->hn_rx_ring_inuse; ++i) {
2061 rxr = &sc->hn_rx_ring[i];
2062 *((uint64_t *)((uint8_t *)rxr + ofs)) = 0;
2070 hn_rx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS)
2072 struct hn_softc *sc = arg1;
2073 int ofs = arg2, i, error;
2074 struct hn_rx_ring *rxr;
2078 for (i = 0; i < sc->hn_rx_ring_cnt; ++i) {
2079 rxr = &sc->hn_rx_ring[i];
2080 stat += *((u_long *)((uint8_t *)rxr + ofs));
2083 error = sysctl_handle_long(oidp, &stat, 0, req);
2084 if (error || req->newptr == NULL)
2087 /* Zero out this stat. */
2088 for (i = 0; i < sc->hn_rx_ring_cnt; ++i) {
2089 rxr = &sc->hn_rx_ring[i];
2090 *((u_long *)((uint8_t *)rxr + ofs)) = 0;
2096 hn_tx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS)
2098 struct hn_softc *sc = arg1;
2099 int ofs = arg2, i, error;
2100 struct hn_tx_ring *txr;
2104 for (i = 0; i < sc->hn_tx_ring_inuse; ++i) {
2105 txr = &sc->hn_tx_ring[i];
2106 stat += *((u_long *)((uint8_t *)txr + ofs));
2109 error = sysctl_handle_long(oidp, &stat, 0, req);
2110 if (error || req->newptr == NULL)
2113 /* Zero out this stat. */
2114 for (i = 0; i < sc->hn_tx_ring_inuse; ++i) {
2115 txr = &sc->hn_tx_ring[i];
2116 *((u_long *)((uint8_t *)txr + ofs)) = 0;
2122 hn_tx_conf_int_sysctl(SYSCTL_HANDLER_ARGS)
2124 struct hn_softc *sc = arg1;
2125 int ofs = arg2, i, error, conf;
2126 struct hn_tx_ring *txr;
2128 txr = &sc->hn_tx_ring[0];
2129 conf = *((int *)((uint8_t *)txr + ofs));
2131 error = sysctl_handle_int(oidp, &conf, 0, req);
2132 if (error || req->newptr == NULL)
2136 for (i = 0; i < sc->hn_tx_ring_inuse; ++i) {
2137 txr = &sc->hn_tx_ring[i];
2138 *((int *)((uint8_t *)txr + ofs)) = conf;
2146 hn_ndis_version_sysctl(SYSCTL_HANDLER_ARGS)
2148 struct hn_softc *sc = arg1;
2151 snprintf(verstr, sizeof(verstr), "%u.%u",
2152 HN_NDIS_VERSION_MAJOR(sc->hn_ndis_ver),
2153 HN_NDIS_VERSION_MINOR(sc->hn_ndis_ver));
2154 return sysctl_handle_string(oidp, verstr, sizeof(verstr), req);
2158 hn_check_iplen(const struct mbuf *m, int hoff)
2160 const struct ip *ip;
2161 int len, iphlen, iplen;
2162 const struct tcphdr *th;
2163 int thoff; /* TCP data offset */
2165 len = hoff + sizeof(struct ip);
2167 /* The packet must be at least the size of an IP header. */
2168 if (m->m_pkthdr.len < len)
2169 return IPPROTO_DONE;
2171 /* The fixed IP header must reside completely in the first mbuf. */
2173 return IPPROTO_DONE;
2175 ip = mtodo(m, hoff);
2177 /* Bound check the packet's stated IP header length. */
2178 iphlen = ip->ip_hl << 2;
2179 if (iphlen < sizeof(struct ip)) /* minimum header length */
2180 return IPPROTO_DONE;
2182 /* The full IP header must reside completely in the one mbuf. */
2183 if (m->m_len < hoff + iphlen)
2184 return IPPROTO_DONE;
2186 iplen = ntohs(ip->ip_len);
2189 * Check that the amount of data in the buffers is as
2190 * at least much as the IP header would have us expect.
2192 if (m->m_pkthdr.len < hoff + iplen)
2193 return IPPROTO_DONE;
2196 * Ignore IP fragments.
2198 if (ntohs(ip->ip_off) & (IP_OFFMASK | IP_MF))
2199 return IPPROTO_DONE;
2202 * The TCP/IP or UDP/IP header must be entirely contained within
2203 * the first fragment of a packet.
2207 if (iplen < iphlen + sizeof(struct tcphdr))
2208 return IPPROTO_DONE;
2209 if (m->m_len < hoff + iphlen + sizeof(struct tcphdr))
2210 return IPPROTO_DONE;
2211 th = (const struct tcphdr *)((const uint8_t *)ip + iphlen);
2212 thoff = th->th_off << 2;
2213 if (thoff < sizeof(struct tcphdr) || thoff + iphlen > iplen)
2214 return IPPROTO_DONE;
2215 if (m->m_len < hoff + iphlen + thoff)
2216 return IPPROTO_DONE;
2219 if (iplen < iphlen + sizeof(struct udphdr))
2220 return IPPROTO_DONE;
2221 if (m->m_len < hoff + iphlen + sizeof(struct udphdr))
2222 return IPPROTO_DONE;
2226 return IPPROTO_DONE;
2233 hn_create_rx_data(struct hn_softc *sc, int ring_cnt)
2235 struct sysctl_oid_list *child;
2236 struct sysctl_ctx_list *ctx;
2237 device_t dev = sc->hn_dev;
2238 #if defined(INET) || defined(INET6)
2239 #if __FreeBSD_version >= 1100095
2246 * Create RXBUF for reception.
2249 * - It is shared by all channels.
2250 * - A large enough buffer is allocated, certain version of NVSes
2251 * may further limit the usable space.
2253 sc->hn_rxbuf = hyperv_dmamem_alloc(bus_get_dma_tag(dev),
2254 PAGE_SIZE, 0, NETVSC_RECEIVE_BUFFER_SIZE, &sc->hn_rxbuf_dma,
2255 BUS_DMA_WAITOK | BUS_DMA_ZERO);
2256 if (sc->hn_rxbuf == NULL) {
2257 device_printf(sc->hn_dev, "allocate rxbuf failed\n");
2261 sc->hn_rx_ring_cnt = ring_cnt;
2262 sc->hn_rx_ring_inuse = sc->hn_rx_ring_cnt;
2264 sc->hn_rx_ring = malloc(sizeof(struct hn_rx_ring) * sc->hn_rx_ring_cnt,
2265 M_NETVSC, M_WAITOK | M_ZERO);
2267 #if defined(INET) || defined(INET6)
2268 #if __FreeBSD_version >= 1100095
2269 lroent_cnt = hn_lro_entry_count;
2270 if (lroent_cnt < TCP_LRO_ENTRIES)
2271 lroent_cnt = TCP_LRO_ENTRIES;
2272 device_printf(dev, "LRO: entry count %d\n", lroent_cnt);
2274 #endif /* INET || INET6 */
2276 ctx = device_get_sysctl_ctx(dev);
2277 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2279 /* Create dev.hn.UNIT.rx sysctl tree */
2280 sc->hn_rx_sysctl_tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "rx",
2281 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
2283 for (i = 0; i < sc->hn_rx_ring_cnt; ++i) {
2284 struct hn_rx_ring *rxr = &sc->hn_rx_ring[i];
2286 if (hn_trust_hosttcp)
2287 rxr->hn_trust_hcsum |= HN_TRUST_HCSUM_TCP;
2288 if (hn_trust_hostudp)
2289 rxr->hn_trust_hcsum |= HN_TRUST_HCSUM_UDP;
2290 if (hn_trust_hostip)
2291 rxr->hn_trust_hcsum |= HN_TRUST_HCSUM_IP;
2292 rxr->hn_ifp = sc->hn_ifp;
2293 if (i < sc->hn_tx_ring_cnt)
2294 rxr->hn_txr = &sc->hn_tx_ring[i];
2295 rxr->hn_rdbuf = malloc(NETVSC_PACKET_SIZE, M_NETVSC, M_WAITOK);
2297 rxr->hn_rxbuf = sc->hn_rxbuf;
2302 #if defined(INET) || defined(INET6)
2303 #if __FreeBSD_version >= 1100095
2304 tcp_lro_init_args(&rxr->hn_lro, sc->hn_ifp, lroent_cnt,
2305 hn_lro_mbufq_depth);
2307 tcp_lro_init(&rxr->hn_lro);
2308 rxr->hn_lro.ifp = sc->hn_ifp;
2310 #if __FreeBSD_version >= 1100099
2311 rxr->hn_lro.lro_length_lim = HN_LRO_LENLIM_DEF;
2312 rxr->hn_lro.lro_ackcnt_lim = HN_LRO_ACKCNT_DEF;
2314 #endif /* INET || INET6 */
2316 if (sc->hn_rx_sysctl_tree != NULL) {
2320 * Create per RX ring sysctl tree:
2321 * dev.hn.UNIT.rx.RINGID
2323 snprintf(name, sizeof(name), "%d", i);
2324 rxr->hn_rx_sysctl_tree = SYSCTL_ADD_NODE(ctx,
2325 SYSCTL_CHILDREN(sc->hn_rx_sysctl_tree),
2326 OID_AUTO, name, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
2328 if (rxr->hn_rx_sysctl_tree != NULL) {
2329 SYSCTL_ADD_ULONG(ctx,
2330 SYSCTL_CHILDREN(rxr->hn_rx_sysctl_tree),
2331 OID_AUTO, "packets", CTLFLAG_RW,
2332 &rxr->hn_pkts, "# of packets received");
2333 SYSCTL_ADD_ULONG(ctx,
2334 SYSCTL_CHILDREN(rxr->hn_rx_sysctl_tree),
2335 OID_AUTO, "rss_pkts", CTLFLAG_RW,
2337 "# of packets w/ RSS info received");
2342 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_queued",
2343 CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2344 __offsetof(struct hn_rx_ring, hn_lro.lro_queued),
2345 #if __FreeBSD_version < 1100095
2346 hn_rx_stat_int_sysctl,
2348 hn_rx_stat_u64_sysctl,
2350 "LU", "LRO queued");
2351 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_flushed",
2352 CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2353 __offsetof(struct hn_rx_ring, hn_lro.lro_flushed),
2354 #if __FreeBSD_version < 1100095
2355 hn_rx_stat_int_sysctl,
2357 hn_rx_stat_u64_sysctl,
2359 "LU", "LRO flushed");
2360 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_tried",
2361 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2362 __offsetof(struct hn_rx_ring, hn_lro_tried),
2363 hn_rx_stat_ulong_sysctl, "LU", "# of LRO tries");
2364 #if __FreeBSD_version >= 1100099
2365 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_length_lim",
2366 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
2367 hn_lro_lenlim_sysctl, "IU",
2368 "Max # of data bytes to be aggregated by LRO");
2369 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_ackcnt_lim",
2370 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
2371 hn_lro_ackcnt_sysctl, "I",
2372 "Max # of ACKs to be aggregated by LRO");
2374 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hosttcp",
2375 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, HN_TRUST_HCSUM_TCP,
2376 hn_trust_hcsum_sysctl, "I",
2377 "Trust tcp segement verification on host side, "
2378 "when csum info is missing");
2379 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hostudp",
2380 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, HN_TRUST_HCSUM_UDP,
2381 hn_trust_hcsum_sysctl, "I",
2382 "Trust udp datagram verification on host side, "
2383 "when csum info is missing");
2384 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hostip",
2385 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, HN_TRUST_HCSUM_IP,
2386 hn_trust_hcsum_sysctl, "I",
2387 "Trust ip packet verification on host side, "
2388 "when csum info is missing");
2389 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_ip",
2390 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2391 __offsetof(struct hn_rx_ring, hn_csum_ip),
2392 hn_rx_stat_ulong_sysctl, "LU", "RXCSUM IP");
2393 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_tcp",
2394 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2395 __offsetof(struct hn_rx_ring, hn_csum_tcp),
2396 hn_rx_stat_ulong_sysctl, "LU", "RXCSUM TCP");
2397 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_udp",
2398 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2399 __offsetof(struct hn_rx_ring, hn_csum_udp),
2400 hn_rx_stat_ulong_sysctl, "LU", "RXCSUM UDP");
2401 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_trusted",
2402 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2403 __offsetof(struct hn_rx_ring, hn_csum_trusted),
2404 hn_rx_stat_ulong_sysctl, "LU",
2405 "# of packets that we trust host's csum verification");
2406 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "small_pkts",
2407 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2408 __offsetof(struct hn_rx_ring, hn_small_pkts),
2409 hn_rx_stat_ulong_sysctl, "LU", "# of small packets received");
2410 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rx_ring_cnt",
2411 CTLFLAG_RD, &sc->hn_rx_ring_cnt, 0, "# created RX rings");
2412 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rx_ring_inuse",
2413 CTLFLAG_RD, &sc->hn_rx_ring_inuse, 0, "# used RX rings");
2419 hn_destroy_rx_data(struct hn_softc *sc)
2423 if (sc->hn_rxbuf != NULL) {
2424 hyperv_dmamem_free(&sc->hn_rxbuf_dma, sc->hn_rxbuf);
2425 sc->hn_rxbuf = NULL;
2428 if (sc->hn_rx_ring_cnt == 0)
2431 for (i = 0; i < sc->hn_rx_ring_cnt; ++i) {
2432 struct hn_rx_ring *rxr = &sc->hn_rx_ring[i];
2434 #if defined(INET) || defined(INET6)
2435 tcp_lro_free(&rxr->hn_lro);
2437 free(rxr->hn_rdbuf, M_NETVSC);
2439 free(sc->hn_rx_ring, M_NETVSC);
2440 sc->hn_rx_ring = NULL;
2442 sc->hn_rx_ring_cnt = 0;
2443 sc->hn_rx_ring_inuse = 0;
2447 hn_create_tx_ring(struct hn_softc *sc, int id)
2449 struct hn_tx_ring *txr = &sc->hn_tx_ring[id];
2450 device_t dev = sc->hn_dev;
2451 bus_dma_tag_t parent_dtag;
2456 txr->hn_tx_idx = id;
2458 #ifndef HN_USE_TXDESC_BUFRING
2459 mtx_init(&txr->hn_txlist_spin, "hn txlist", NULL, MTX_SPIN);
2461 mtx_init(&txr->hn_tx_lock, "hn tx", NULL, MTX_DEF);
2463 txr->hn_txdesc_cnt = HN_TX_DESC_CNT;
2464 txr->hn_txdesc = malloc(sizeof(struct hn_txdesc) * txr->hn_txdesc_cnt,
2465 M_NETVSC, M_WAITOK | M_ZERO);
2466 #ifndef HN_USE_TXDESC_BUFRING
2467 SLIST_INIT(&txr->hn_txlist);
2469 txr->hn_txdesc_br = buf_ring_alloc(txr->hn_txdesc_cnt, M_NETVSC,
2470 M_WAITOK, &txr->hn_tx_lock);
2473 txr->hn_tx_taskq = sc->hn_tx_taskq;
2475 if (hn_use_if_start) {
2476 txr->hn_txeof = hn_start_txeof;
2477 TASK_INIT(&txr->hn_tx_task, 0, hn_start_taskfunc, txr);
2478 TASK_INIT(&txr->hn_txeof_task, 0, hn_start_txeof_taskfunc, txr);
2482 txr->hn_txeof = hn_xmit_txeof;
2483 TASK_INIT(&txr->hn_tx_task, 0, hn_xmit_taskfunc, txr);
2484 TASK_INIT(&txr->hn_txeof_task, 0, hn_xmit_txeof_taskfunc, txr);
2486 br_depth = hn_get_txswq_depth(txr);
2487 txr->hn_mbuf_br = buf_ring_alloc(br_depth, M_NETVSC,
2488 M_WAITOK, &txr->hn_tx_lock);
2491 txr->hn_direct_tx_size = hn_direct_tx_size;
2492 version = VMBUS_GET_VERSION(device_get_parent(dev), dev);
2493 if (version >= VMBUS_VERSION_WIN8_1) {
2494 txr->hn_csum_assist = HN_CSUM_ASSIST;
2496 txr->hn_csum_assist = HN_CSUM_ASSIST_WIN8;
2498 device_printf(dev, "bus version %u.%u, "
2499 "no UDP checksum offloading\n",
2500 VMBUS_VERSION_MAJOR(version),
2501 VMBUS_VERSION_MINOR(version));
2506 * Always schedule transmission instead of trying to do direct
2507 * transmission. This one gives the best performance so far.
2509 txr->hn_sched_tx = 1;
2511 parent_dtag = bus_get_dma_tag(dev);
2513 /* DMA tag for RNDIS packet messages. */
2514 error = bus_dma_tag_create(parent_dtag, /* parent */
2515 HN_RNDIS_PKT_ALIGN, /* alignment */
2516 HN_RNDIS_PKT_BOUNDARY, /* boundary */
2517 BUS_SPACE_MAXADDR, /* lowaddr */
2518 BUS_SPACE_MAXADDR, /* highaddr */
2519 NULL, NULL, /* filter, filterarg */
2520 HN_RNDIS_PKT_LEN, /* maxsize */
2522 HN_RNDIS_PKT_LEN, /* maxsegsize */
2524 NULL, /* lockfunc */
2525 NULL, /* lockfuncarg */
2526 &txr->hn_tx_rndis_dtag);
2528 device_printf(dev, "failed to create rndis dmatag\n");
2532 /* DMA tag for data. */
2533 error = bus_dma_tag_create(parent_dtag, /* parent */
2535 HN_TX_DATA_BOUNDARY, /* boundary */
2536 BUS_SPACE_MAXADDR, /* lowaddr */
2537 BUS_SPACE_MAXADDR, /* highaddr */
2538 NULL, NULL, /* filter, filterarg */
2539 HN_TX_DATA_MAXSIZE, /* maxsize */
2540 HN_TX_DATA_SEGCNT_MAX, /* nsegments */
2541 HN_TX_DATA_SEGSIZE, /* maxsegsize */
2543 NULL, /* lockfunc */
2544 NULL, /* lockfuncarg */
2545 &txr->hn_tx_data_dtag);
2547 device_printf(dev, "failed to create data dmatag\n");
2551 for (i = 0; i < txr->hn_txdesc_cnt; ++i) {
2552 struct hn_txdesc *txd = &txr->hn_txdesc[i];
2557 * Allocate and load RNDIS packet message.
2559 error = bus_dmamem_alloc(txr->hn_tx_rndis_dtag,
2560 (void **)&txd->rndis_pkt,
2561 BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
2562 &txd->rndis_pkt_dmap);
2565 "failed to allocate rndis_packet_msg, %d\n", i);
2569 error = bus_dmamap_load(txr->hn_tx_rndis_dtag,
2570 txd->rndis_pkt_dmap,
2571 txd->rndis_pkt, HN_RNDIS_PKT_LEN,
2572 hyperv_dma_map_paddr, &txd->rndis_pkt_paddr,
2576 "failed to load rndis_packet_msg, %d\n", i);
2577 bus_dmamem_free(txr->hn_tx_rndis_dtag,
2578 txd->rndis_pkt, txd->rndis_pkt_dmap);
2582 /* DMA map for TX data. */
2583 error = bus_dmamap_create(txr->hn_tx_data_dtag, 0,
2587 "failed to allocate tx data dmamap\n");
2588 bus_dmamap_unload(txr->hn_tx_rndis_dtag,
2589 txd->rndis_pkt_dmap);
2590 bus_dmamem_free(txr->hn_tx_rndis_dtag,
2591 txd->rndis_pkt, txd->rndis_pkt_dmap);
2595 /* All set, put it to list */
2596 txd->flags |= HN_TXD_FLAG_ONLIST;
2597 #ifndef HN_USE_TXDESC_BUFRING
2598 SLIST_INSERT_HEAD(&txr->hn_txlist, txd, link);
2600 buf_ring_enqueue(txr->hn_txdesc_br, txd);
2603 txr->hn_txdesc_avail = txr->hn_txdesc_cnt;
2605 if (sc->hn_tx_sysctl_tree != NULL) {
2606 struct sysctl_oid_list *child;
2607 struct sysctl_ctx_list *ctx;
2611 * Create per TX ring sysctl tree:
2612 * dev.hn.UNIT.tx.RINGID
2614 ctx = device_get_sysctl_ctx(dev);
2615 child = SYSCTL_CHILDREN(sc->hn_tx_sysctl_tree);
2617 snprintf(name, sizeof(name), "%d", id);
2618 txr->hn_tx_sysctl_tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO,
2619 name, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
2621 if (txr->hn_tx_sysctl_tree != NULL) {
2622 child = SYSCTL_CHILDREN(txr->hn_tx_sysctl_tree);
2624 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "txdesc_avail",
2625 CTLFLAG_RD, &txr->hn_txdesc_avail, 0,
2626 "# of available TX descs");
2627 if (!hn_use_if_start) {
2628 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "oactive",
2629 CTLFLAG_RD, &txr->hn_oactive, 0,
2632 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "packets",
2633 CTLFLAG_RW, &txr->hn_pkts,
2634 "# of packets transmitted");
2642 hn_txdesc_dmamap_destroy(struct hn_txdesc *txd)
2644 struct hn_tx_ring *txr = txd->txr;
2646 KASSERT(txd->m == NULL, ("still has mbuf installed"));
2647 KASSERT((txd->flags & HN_TXD_FLAG_DMAMAP) == 0, ("still dma mapped"));
2649 bus_dmamap_unload(txr->hn_tx_rndis_dtag, txd->rndis_pkt_dmap);
2650 bus_dmamem_free(txr->hn_tx_rndis_dtag, txd->rndis_pkt,
2651 txd->rndis_pkt_dmap);
2652 bus_dmamap_destroy(txr->hn_tx_data_dtag, txd->data_dmap);
2656 hn_destroy_tx_ring(struct hn_tx_ring *txr)
2658 struct hn_txdesc *txd;
2660 if (txr->hn_txdesc == NULL)
2663 #ifndef HN_USE_TXDESC_BUFRING
2664 while ((txd = SLIST_FIRST(&txr->hn_txlist)) != NULL) {
2665 SLIST_REMOVE_HEAD(&txr->hn_txlist, link);
2666 hn_txdesc_dmamap_destroy(txd);
2669 mtx_lock(&txr->hn_tx_lock);
2670 while ((txd = buf_ring_dequeue_sc(txr->hn_txdesc_br)) != NULL)
2671 hn_txdesc_dmamap_destroy(txd);
2672 mtx_unlock(&txr->hn_tx_lock);
2675 if (txr->hn_tx_data_dtag != NULL)
2676 bus_dma_tag_destroy(txr->hn_tx_data_dtag);
2677 if (txr->hn_tx_rndis_dtag != NULL)
2678 bus_dma_tag_destroy(txr->hn_tx_rndis_dtag);
2680 #ifdef HN_USE_TXDESC_BUFRING
2681 buf_ring_free(txr->hn_txdesc_br, M_NETVSC);
2684 free(txr->hn_txdesc, M_NETVSC);
2685 txr->hn_txdesc = NULL;
2687 if (txr->hn_mbuf_br != NULL)
2688 buf_ring_free(txr->hn_mbuf_br, M_NETVSC);
2690 #ifndef HN_USE_TXDESC_BUFRING
2691 mtx_destroy(&txr->hn_txlist_spin);
2693 mtx_destroy(&txr->hn_tx_lock);
2697 hn_create_tx_data(struct hn_softc *sc, int ring_cnt)
2699 struct sysctl_oid_list *child;
2700 struct sysctl_ctx_list *ctx;
2704 * Create TXBUF for chimney sending.
2706 * NOTE: It is shared by all channels.
2708 sc->hn_chim = hyperv_dmamem_alloc(bus_get_dma_tag(sc->hn_dev),
2709 PAGE_SIZE, 0, NETVSC_SEND_BUFFER_SIZE, &sc->hn_chim_dma,
2710 BUS_DMA_WAITOK | BUS_DMA_ZERO);
2711 if (sc->hn_chim == NULL) {
2712 device_printf(sc->hn_dev, "allocate txbuf failed\n");
2716 sc->hn_tx_ring_cnt = ring_cnt;
2717 sc->hn_tx_ring_inuse = sc->hn_tx_ring_cnt;
2719 sc->hn_tx_ring = malloc(sizeof(struct hn_tx_ring) * sc->hn_tx_ring_cnt,
2720 M_NETVSC, M_WAITOK | M_ZERO);
2722 ctx = device_get_sysctl_ctx(sc->hn_dev);
2723 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->hn_dev));
2725 /* Create dev.hn.UNIT.tx sysctl tree */
2726 sc->hn_tx_sysctl_tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "tx",
2727 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
2729 for (i = 0; i < sc->hn_tx_ring_cnt; ++i) {
2732 error = hn_create_tx_ring(sc, i);
2737 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "no_txdescs",
2738 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2739 __offsetof(struct hn_tx_ring, hn_no_txdescs),
2740 hn_tx_stat_ulong_sysctl, "LU", "# of times short of TX descs");
2741 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "send_failed",
2742 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2743 __offsetof(struct hn_tx_ring, hn_send_failed),
2744 hn_tx_stat_ulong_sysctl, "LU", "# of hyper-v sending failure");
2745 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txdma_failed",
2746 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2747 __offsetof(struct hn_tx_ring, hn_txdma_failed),
2748 hn_tx_stat_ulong_sysctl, "LU", "# of TX DMA failure");
2749 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_collapsed",
2750 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2751 __offsetof(struct hn_tx_ring, hn_tx_collapsed),
2752 hn_tx_stat_ulong_sysctl, "LU", "# of TX mbuf collapsed");
2753 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_chimney",
2754 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2755 __offsetof(struct hn_tx_ring, hn_tx_chimney),
2756 hn_tx_stat_ulong_sysctl, "LU", "# of chimney send");
2757 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_chimney_tried",
2758 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2759 __offsetof(struct hn_tx_ring, hn_tx_chimney_tried),
2760 hn_tx_stat_ulong_sysctl, "LU", "# of chimney send tries");
2761 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "txdesc_cnt",
2762 CTLFLAG_RD, &sc->hn_tx_ring[0].hn_txdesc_cnt, 0,
2763 "# of total TX descs");
2764 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_chimney_max",
2765 CTLFLAG_RD, &sc->hn_chim_szmax, 0,
2766 "Chimney send packet size upper boundary");
2767 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_chimney_size",
2768 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
2769 hn_chim_size_sysctl, "I", "Chimney send packet size limit");
2770 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "direct_tx_size",
2771 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2772 __offsetof(struct hn_tx_ring, hn_direct_tx_size),
2773 hn_tx_conf_int_sysctl, "I",
2774 "Size of the packet for direct transmission");
2775 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "sched_tx",
2776 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2777 __offsetof(struct hn_tx_ring, hn_sched_tx),
2778 hn_tx_conf_int_sysctl, "I",
2779 "Always schedule transmission "
2780 "instead of doing direct transmission");
2781 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_ring_cnt",
2782 CTLFLAG_RD, &sc->hn_tx_ring_cnt, 0, "# created TX rings");
2783 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_ring_inuse",
2784 CTLFLAG_RD, &sc->hn_tx_ring_inuse, 0, "# used TX rings");
2790 hn_set_chim_size(struct hn_softc *sc, int chim_size)
2795 for (i = 0; i < sc->hn_tx_ring_inuse; ++i)
2796 sc->hn_tx_ring[i].hn_chim_size = chim_size;
2801 hn_destroy_tx_data(struct hn_softc *sc)
2805 if (sc->hn_chim != NULL) {
2806 hyperv_dmamem_free(&sc->hn_chim_dma, sc->hn_chim);
2810 if (sc->hn_tx_ring_cnt == 0)
2813 for (i = 0; i < sc->hn_tx_ring_cnt; ++i)
2814 hn_destroy_tx_ring(&sc->hn_tx_ring[i]);
2816 free(sc->hn_tx_ring, M_NETVSC);
2817 sc->hn_tx_ring = NULL;
2819 sc->hn_tx_ring_cnt = 0;
2820 sc->hn_tx_ring_inuse = 0;
2824 hn_start_taskfunc(void *xtxr, int pending __unused)
2826 struct hn_tx_ring *txr = xtxr;
2828 mtx_lock(&txr->hn_tx_lock);
2829 hn_start_locked(txr, 0);
2830 mtx_unlock(&txr->hn_tx_lock);
2834 hn_start_txeof_taskfunc(void *xtxr, int pending __unused)
2836 struct hn_tx_ring *txr = xtxr;
2838 mtx_lock(&txr->hn_tx_lock);
2839 atomic_clear_int(&txr->hn_sc->hn_ifp->if_drv_flags, IFF_DRV_OACTIVE);
2840 hn_start_locked(txr, 0);
2841 mtx_unlock(&txr->hn_tx_lock);
2845 hn_stop_tx_tasks(struct hn_softc *sc)
2849 for (i = 0; i < sc->hn_tx_ring_inuse; ++i) {
2850 struct hn_tx_ring *txr = &sc->hn_tx_ring[i];
2852 taskqueue_drain(txr->hn_tx_taskq, &txr->hn_tx_task);
2853 taskqueue_drain(txr->hn_tx_taskq, &txr->hn_txeof_task);
2858 hn_xmit(struct hn_tx_ring *txr, int len)
2860 struct hn_softc *sc = txr->hn_sc;
2861 struct ifnet *ifp = sc->hn_ifp;
2862 struct mbuf *m_head;
2864 mtx_assert(&txr->hn_tx_lock, MA_OWNED);
2865 KASSERT(hn_use_if_start == 0,
2866 ("hn_xmit is called, when if_start is enabled"));
2868 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || txr->hn_oactive)
2871 while ((m_head = drbr_peek(ifp, txr->hn_mbuf_br)) != NULL) {
2872 struct hn_txdesc *txd;
2875 if (len > 0 && m_head->m_pkthdr.len > len) {
2877 * This sending could be time consuming; let callers
2878 * dispatch this packet sending (and sending of any
2879 * following up packets) to tx taskqueue.
2881 drbr_putback(ifp, txr->hn_mbuf_br, m_head);
2885 txd = hn_txdesc_get(txr);
2887 txr->hn_no_txdescs++;
2888 drbr_putback(ifp, txr->hn_mbuf_br, m_head);
2889 txr->hn_oactive = 1;
2893 error = hn_encap(txr, txd, &m_head);
2895 /* Both txd and m_head are freed; discard */
2896 drbr_advance(ifp, txr->hn_mbuf_br);
2900 error = hn_send_pkt(ifp, txr, txd);
2901 if (__predict_false(error)) {
2902 /* txd is freed, but m_head is not */
2903 drbr_putback(ifp, txr->hn_mbuf_br, m_head);
2904 txr->hn_oactive = 1;
2909 drbr_advance(ifp, txr->hn_mbuf_br);
2915 hn_transmit(struct ifnet *ifp, struct mbuf *m)
2917 struct hn_softc *sc = ifp->if_softc;
2918 struct hn_tx_ring *txr;
2922 * Select the TX ring based on flowid
2924 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
2925 idx = m->m_pkthdr.flowid % sc->hn_tx_ring_inuse;
2926 txr = &sc->hn_tx_ring[idx];
2928 error = drbr_enqueue(ifp, txr->hn_mbuf_br, m);
2930 if_inc_counter(ifp, IFCOUNTER_OQDROPS, 1);
2934 if (txr->hn_oactive)
2937 if (txr->hn_sched_tx)
2940 if (mtx_trylock(&txr->hn_tx_lock)) {
2943 sched = hn_xmit(txr, txr->hn_direct_tx_size);
2944 mtx_unlock(&txr->hn_tx_lock);
2949 taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_tx_task);
2954 hn_xmit_qflush(struct ifnet *ifp)
2956 struct hn_softc *sc = ifp->if_softc;
2959 for (i = 0; i < sc->hn_tx_ring_inuse; ++i) {
2960 struct hn_tx_ring *txr = &sc->hn_tx_ring[i];
2963 mtx_lock(&txr->hn_tx_lock);
2964 while ((m = buf_ring_dequeue_sc(txr->hn_mbuf_br)) != NULL)
2966 mtx_unlock(&txr->hn_tx_lock);
2972 hn_xmit_txeof(struct hn_tx_ring *txr)
2975 if (txr->hn_sched_tx)
2978 if (mtx_trylock(&txr->hn_tx_lock)) {
2981 txr->hn_oactive = 0;
2982 sched = hn_xmit(txr, txr->hn_direct_tx_size);
2983 mtx_unlock(&txr->hn_tx_lock);
2985 taskqueue_enqueue(txr->hn_tx_taskq,
2991 * Release the oactive earlier, with the hope, that
2992 * others could catch up. The task will clear the
2993 * oactive again with the hn_tx_lock to avoid possible
2996 txr->hn_oactive = 0;
2997 taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_txeof_task);
3002 hn_xmit_taskfunc(void *xtxr, int pending __unused)
3004 struct hn_tx_ring *txr = xtxr;
3006 mtx_lock(&txr->hn_tx_lock);
3008 mtx_unlock(&txr->hn_tx_lock);
3012 hn_xmit_txeof_taskfunc(void *xtxr, int pending __unused)
3014 struct hn_tx_ring *txr = xtxr;
3016 mtx_lock(&txr->hn_tx_lock);
3017 txr->hn_oactive = 0;
3019 mtx_unlock(&txr->hn_tx_lock);
3023 hn_chan_attach(struct hn_softc *sc, struct vmbus_channel *chan)
3025 struct hn_rx_ring *rxr;
3026 struct hn_tx_ring *txr = NULL;
3029 idx = vmbus_chan_subidx(chan);
3032 * Link this channel to RX/TX ring.
3034 KASSERT(idx >= 0 && idx < sc->hn_rx_ring_inuse,
3035 ("invalid channel index %d, should > 0 && < %d",
3036 idx, sc->hn_rx_ring_inuse));
3037 rxr = &sc->hn_rx_ring[idx];
3038 KASSERT((rxr->hn_rx_flags & HN_RX_FLAG_ATTACHED) == 0,
3039 ("RX ring %d already attached", idx));
3040 rxr->hn_rx_flags |= HN_RX_FLAG_ATTACHED;
3043 if_printf(sc->hn_ifp, "link RX ring %d to chan%u\n",
3044 idx, vmbus_chan_id(chan));
3047 if (idx < sc->hn_tx_ring_inuse) {
3048 txr = &sc->hn_tx_ring[idx];
3049 KASSERT((txr->hn_tx_flags & HN_TX_FLAG_ATTACHED) == 0,
3050 ("TX ring %d already attached", idx));
3051 txr->hn_tx_flags |= HN_TX_FLAG_ATTACHED;
3053 txr->hn_chan = chan;
3055 if_printf(sc->hn_ifp, "link TX ring %d to chan%u\n",
3056 idx, vmbus_chan_id(chan));
3060 /* Bind this channel to a proper CPU. */
3061 vmbus_chan_cpu_set(chan, (sc->hn_cpu + idx) % mp_ncpus);
3063 /* Open this channel */
3064 error = vmbus_chan_open(chan, NETVSC_DEVICE_RING_BUFFER_SIZE,
3065 NETVSC_DEVICE_RING_BUFFER_SIZE, NULL, 0, hn_chan_callback, rxr);
3067 if_printf(sc->hn_ifp, "open chan%u failed: %d\n",
3068 vmbus_chan_id(chan), error);
3069 rxr->hn_rx_flags &= ~HN_RX_FLAG_ATTACHED;
3071 txr->hn_tx_flags &= ~HN_TX_FLAG_ATTACHED;
3077 hn_attach_subchans(struct hn_softc *sc)
3079 struct vmbus_channel **subchans;
3080 int subchan_cnt = sc->hn_rx_ring_inuse - 1;
3083 /* Wait for sub-channels setup to complete. */
3084 subchans = vmbus_subchan_get(sc->hn_prichan, subchan_cnt);
3086 /* Attach the sub-channels. */
3087 for (i = 0; i < subchan_cnt; ++i) {
3088 error = hn_chan_attach(sc, subchans[i]);
3093 /* Release the sub-channels */
3094 vmbus_subchan_rel(subchans, subchan_cnt);
3097 if_printf(sc->hn_ifp, "sub-channels attach failed: %d\n", error);
3100 if_printf(sc->hn_ifp, "%d sub-channels attached\n",
3108 hn_nvs_handle_notify(struct hn_softc *sc, const struct vmbus_chanpkt_hdr *pkt)
3110 const struct hn_nvs_hdr *hdr;
3112 if (VMBUS_CHANPKT_DATALEN(pkt) < sizeof(*hdr)) {
3113 if_printf(sc->hn_ifp, "invalid nvs notify\n");
3116 hdr = VMBUS_CHANPKT_CONST_DATA(pkt);
3118 if (hdr->nvs_type == HN_NVS_TYPE_TXTBL_NOTE) {
3119 /* Useless; ignore */
3122 if_printf(sc->hn_ifp, "got notify, nvs type %u\n", hdr->nvs_type);
3126 hn_nvs_handle_comp(struct hn_softc *sc, struct vmbus_channel *chan,
3127 const struct vmbus_chanpkt_hdr *pkt)
3129 struct hn_send_ctx *sndc;
3131 sndc = (struct hn_send_ctx *)(uintptr_t)pkt->cph_xactid;
3132 sndc->hn_cb(sndc, sc, chan, VMBUS_CHANPKT_CONST_DATA(pkt),
3133 VMBUS_CHANPKT_DATALEN(pkt));
3136 * 'sndc' CAN NOT be accessed anymore, since it can be freed by
3142 hn_nvs_handle_rxbuf(struct hn_softc *sc, struct hn_rx_ring *rxr,
3143 struct vmbus_channel *chan, const struct vmbus_chanpkt_hdr *pkthdr)
3145 const struct vmbus_chanpkt_rxbuf *pkt;
3146 const struct hn_nvs_hdr *nvs_hdr;
3149 if (__predict_false(VMBUS_CHANPKT_DATALEN(pkthdr) < sizeof(*nvs_hdr))) {
3150 if_printf(rxr->hn_ifp, "invalid nvs RNDIS\n");
3153 nvs_hdr = VMBUS_CHANPKT_CONST_DATA(pkthdr);
3155 /* Make sure that this is a RNDIS message. */
3156 if (__predict_false(nvs_hdr->nvs_type != HN_NVS_TYPE_RNDIS)) {
3157 if_printf(rxr->hn_ifp, "nvs type %u, not RNDIS\n",
3162 hlen = VMBUS_CHANPKT_GETLEN(pkthdr->cph_hlen);
3163 if (__predict_false(hlen < sizeof(*pkt))) {
3164 if_printf(rxr->hn_ifp, "invalid rxbuf chanpkt\n");
3167 pkt = (const struct vmbus_chanpkt_rxbuf *)pkthdr;
3169 if (__predict_false(pkt->cp_rxbuf_id != HN_NVS_RXBUF_SIG)) {
3170 if_printf(rxr->hn_ifp, "invalid rxbuf_id 0x%08x\n",
3175 count = pkt->cp_rxbuf_cnt;
3176 if (__predict_false(hlen <
3177 __offsetof(struct vmbus_chanpkt_rxbuf, cp_rxbuf[count]))) {
3178 if_printf(rxr->hn_ifp, "invalid rxbuf_cnt %d\n", count);
3182 /* Each range represents 1 RNDIS pkt that contains 1 Ethernet frame */
3183 for (i = 0; i < count; ++i) {
3186 ofs = pkt->cp_rxbuf[i].rb_ofs;
3187 len = pkt->cp_rxbuf[i].rb_len;
3188 if (__predict_false(ofs + len > NETVSC_RECEIVE_BUFFER_SIZE)) {
3189 if_printf(rxr->hn_ifp, "%dth RNDIS msg overflow rxbuf, "
3190 "ofs %d, len %d\n", i, ofs, len);
3193 hv_rf_on_receive(sc, rxr, rxr->hn_rxbuf + ofs, len);
3197 * Moved completion call back here so that all received
3198 * messages (not just data messages) will trigger a response
3199 * message back to the host.
3201 hn_nvs_ack_rxbuf(chan, pkt->cp_hdr.cph_xactid);
3205 * Net VSC on receive completion
3207 * Send a receive completion packet to RNDIS device (ie NetVsp)
3210 hn_nvs_ack_rxbuf(struct vmbus_channel *chan, uint64_t tid)
3212 struct hn_nvs_rndis_ack ack;
3216 ack.nvs_type = HN_NVS_TYPE_RNDIS_ACK;
3217 ack.nvs_status = HN_NVS_STATUS_OK;
3220 /* Send the completion */
3221 ret = vmbus_chan_send(chan, VMBUS_CHANPKT_TYPE_COMP,
3222 VMBUS_CHANPKT_FLAG_NONE, &ack, sizeof(ack), tid);
3226 } else if (ret == EAGAIN) {
3227 /* no more room... wait a bit and attempt to retry 3 times */
3232 goto retry_send_cmplt;
3238 hn_chan_callback(struct vmbus_channel *chan, void *xrxr)
3240 struct hn_rx_ring *rxr = xrxr;
3241 struct hn_softc *sc = rxr->hn_ifp->if_softc;
3243 int bufferlen = NETVSC_PACKET_SIZE;
3245 buffer = rxr->hn_rdbuf;
3247 struct vmbus_chanpkt_hdr *pkt = buffer;
3248 uint32_t bytes_rxed;
3251 bytes_rxed = bufferlen;
3252 ret = vmbus_chan_recv_pkt(chan, pkt, &bytes_rxed);
3254 switch (pkt->cph_type) {
3255 case VMBUS_CHANPKT_TYPE_COMP:
3256 hn_nvs_handle_comp(sc, chan, pkt);
3258 case VMBUS_CHANPKT_TYPE_RXBUF:
3259 hn_nvs_handle_rxbuf(sc, rxr, chan, pkt);
3261 case VMBUS_CHANPKT_TYPE_INBAND:
3262 hn_nvs_handle_notify(sc, pkt);
3265 if_printf(rxr->hn_ifp,
3266 "unknown chan pkt %u\n",
3270 } else if (ret == ENOBUFS) {
3271 /* Handle large packet */
3272 if (bufferlen > NETVSC_PACKET_SIZE) {
3273 free(buffer, M_NETVSC);
3277 /* alloc new buffer */
3278 buffer = malloc(bytes_rxed, M_NETVSC, M_NOWAIT);
3279 if (buffer == NULL) {
3280 if_printf(rxr->hn_ifp,
3281 "hv_cb malloc buffer failed, len=%u\n",
3286 bufferlen = bytes_rxed;
3288 /* No more packets */
3293 if (bufferlen > NETVSC_PACKET_SIZE)
3294 free(buffer, M_NETVSC);
3296 hv_rf_channel_rollup(rxr, rxr->hn_txr);
3300 hn_tx_taskq_create(void *arg __unused)
3302 if (!hn_share_tx_taskq)
3305 hn_tx_taskq = taskqueue_create("hn_tx", M_WAITOK,
3306 taskqueue_thread_enqueue, &hn_tx_taskq);
3307 taskqueue_start_threads(&hn_tx_taskq, 1, PI_NET, "hn tx");
3308 if (hn_bind_tx_taskq >= 0) {
3309 int cpu = hn_bind_tx_taskq;
3310 struct task cpuset_task;
3313 if (cpu > mp_ncpus - 1)
3315 CPU_SETOF(cpu, &cpu_set);
3316 TASK_INIT(&cpuset_task, 0, hn_cpuset_setthread_task, &cpu_set);
3317 taskqueue_enqueue(hn_tx_taskq, &cpuset_task);
3318 taskqueue_drain(hn_tx_taskq, &cpuset_task);
3321 SYSINIT(hn_txtq_create, SI_SUB_DRIVERS, SI_ORDER_FIRST,
3322 hn_tx_taskq_create, NULL);
3325 hn_tx_taskq_destroy(void *arg __unused)
3327 if (hn_tx_taskq != NULL)
3328 taskqueue_free(hn_tx_taskq);
3330 SYSUNINIT(hn_txtq_destroy, SI_SUB_DRIVERS, SI_ORDER_FIRST,
3331 hn_tx_taskq_destroy, NULL);
3333 static device_method_t netvsc_methods[] = {
3334 /* Device interface */
3335 DEVMETHOD(device_probe, netvsc_probe),
3336 DEVMETHOD(device_attach, netvsc_attach),
3337 DEVMETHOD(device_detach, netvsc_detach),
3338 DEVMETHOD(device_shutdown, netvsc_shutdown),
3343 static driver_t netvsc_driver = {
3349 static devclass_t netvsc_devclass;
3351 DRIVER_MODULE(hn, vmbus, netvsc_driver, netvsc_devclass, 0, 0);
3352 MODULE_VERSION(hn, 1);
3353 MODULE_DEPEND(hn, vmbus, 1, 1, 1);