2 * Copyright (c) 2010-2012 Citrix Inc.
3 * Copyright (c) 2009-2012 Microsoft Corp.
4 * Copyright (c) 2012 NetApp Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * Copyright (c) 2004-2006 Kip Macy
31 * All rights reserved.
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 #include <sys/cdefs.h>
56 __FBSDID("$FreeBSD$");
58 #include "opt_inet6.h"
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/sockio.h>
65 #include <sys/malloc.h>
66 #include <sys/module.h>
67 #include <sys/kernel.h>
68 #include <sys/socket.h>
70 #include <sys/queue.h>
73 #include <sys/sysctl.h>
74 #include <sys/buf_ring.h>
77 #include <net/if_arp.h>
78 #include <net/ethernet.h>
79 #include <net/if_dl.h>
80 #include <net/if_media.h>
84 #include <net/if_types.h>
85 #include <net/if_vlan_var.h>
88 #include <netinet/in_systm.h>
89 #include <netinet/in.h>
90 #include <netinet/ip.h>
91 #include <netinet/if_ether.h>
92 #include <netinet/tcp.h>
93 #include <netinet/udp.h>
94 #include <netinet/ip6.h>
97 #include <vm/vm_param.h>
98 #include <vm/vm_kern.h>
101 #include <machine/bus.h>
102 #include <machine/resource.h>
103 #include <machine/frame.h>
104 #include <machine/vmparam.h>
107 #include <sys/rman.h>
108 #include <sys/mutex.h>
109 #include <sys/errno.h>
110 #include <sys/types.h>
111 #include <machine/atomic.h>
113 #include <machine/intr_machdep.h>
115 #include <machine/in_cksum.h>
117 #include <dev/hyperv/include/hyperv.h>
118 #include "hv_net_vsc.h"
119 #include "hv_rndis.h"
120 #include "hv_rndis_filter.h"
122 #define hv_chan_rxr hv_chan_priv1
123 #define hv_chan_txr hv_chan_priv2
125 /* Short for Hyper-V network interface */
126 #define NETVSC_DEVNAME "hn"
129 * It looks like offset 0 of buf is reserved to hold the softc pointer.
130 * The sc pointer evidently not needed, and is not presently populated.
131 * The packet offset is where the netvsc_packet starts in the buffer.
133 #define HV_NV_SC_PTR_OFFSET_IN_BUF 0
134 #define HV_NV_PACKET_OFFSET_IN_BUF 16
136 /* YYY should get it from the underlying channel */
137 #define HN_TX_DESC_CNT 512
139 #define HN_LROENT_CNT_DEF 128
141 #define HN_RNDIS_MSG_LEN \
142 (sizeof(rndis_msg) + \
143 RNDIS_HASH_PPI_SIZE + \
144 RNDIS_VLAN_PPI_SIZE + \
145 RNDIS_TSO_PPI_SIZE + \
147 #define HN_RNDIS_MSG_BOUNDARY PAGE_SIZE
148 #define HN_RNDIS_MSG_ALIGN CACHE_LINE_SIZE
150 #define HN_TX_DATA_BOUNDARY PAGE_SIZE
151 #define HN_TX_DATA_MAXSIZE IP_MAXPACKET
152 #define HN_TX_DATA_SEGSIZE PAGE_SIZE
153 #define HN_TX_DATA_SEGCNT_MAX \
154 (NETVSC_PACKET_MAXPAGE - HV_RF_NUM_TX_RESERVED_PAGE_BUFS)
156 #define HN_DIRECT_TX_SIZE_DEF 128
159 #ifndef HN_USE_TXDESC_BUFRING
160 SLIST_ENTRY(hn_txdesc) link;
163 struct hn_tx_ring *txr;
165 uint32_t flags; /* HN_TXD_FLAG_ */
166 netvsc_packet netvsc_pkt; /* XXX to be removed */
168 bus_dmamap_t data_dmap;
170 bus_addr_t rndis_msg_paddr;
171 rndis_msg *rndis_msg;
172 bus_dmamap_t rndis_msg_dmap;
175 #define HN_TXD_FLAG_ONLIST 0x1
176 #define HN_TXD_FLAG_DMAMAP 0x2
179 * Only enable UDP checksum offloading when it is on 2012R2 or
180 * later. UDP checksum offloading doesn't work on earlier
183 #define HN_CSUM_ASSIST_WIN8 (CSUM_IP | CSUM_TCP)
184 #define HN_CSUM_ASSIST (CSUM_IP | CSUM_UDP | CSUM_TCP)
186 #define HN_LRO_LENLIM_DEF (25 * ETHERMTU)
187 /* YYY 2*MTU is a bit rough, but should be good enough. */
188 #define HN_LRO_LENLIM_MIN(ifp) (2 * (ifp)->if_mtu)
190 #define HN_LRO_ACKCNT_DEF 1
193 * Be aware that this sleepable mutex will exhibit WITNESS errors when
194 * certain TCP and ARP code paths are taken. This appears to be a
195 * well-known condition, as all other drivers checked use a sleeping
196 * mutex to protect their transmit paths.
197 * Also Be aware that mutexes do not play well with semaphores, and there
198 * is a conflicting semaphore in a certain channel code path.
200 #define NV_LOCK_INIT(_sc, _name) \
201 mtx_init(&(_sc)->hn_lock, _name, MTX_NETWORK_LOCK, MTX_DEF)
202 #define NV_LOCK(_sc) mtx_lock(&(_sc)->hn_lock)
203 #define NV_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->hn_lock, MA_OWNED)
204 #define NV_UNLOCK(_sc) mtx_unlock(&(_sc)->hn_lock)
205 #define NV_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->hn_lock)
212 int hv_promisc_mode = 0; /* normal mode by default */
214 SYSCTL_NODE(_hw, OID_AUTO, hn, CTLFLAG_RD, NULL, "Hyper-V network interface");
216 /* Trust tcp segements verification on host side. */
217 static int hn_trust_hosttcp = 1;
218 SYSCTL_INT(_hw_hn, OID_AUTO, trust_hosttcp, CTLFLAG_RDTUN,
219 &hn_trust_hosttcp, 0,
220 "Trust tcp segement verification on host side, "
221 "when csum info is missing (global setting)");
223 /* Trust udp datagrams verification on host side. */
224 static int hn_trust_hostudp = 1;
225 SYSCTL_INT(_hw_hn, OID_AUTO, trust_hostudp, CTLFLAG_RDTUN,
226 &hn_trust_hostudp, 0,
227 "Trust udp datagram verification on host side, "
228 "when csum info is missing (global setting)");
230 /* Trust ip packets verification on host side. */
231 static int hn_trust_hostip = 1;
232 SYSCTL_INT(_hw_hn, OID_AUTO, trust_hostip, CTLFLAG_RDTUN,
234 "Trust ip packet verification on host side, "
235 "when csum info is missing (global setting)");
237 #if __FreeBSD_version >= 1100045
238 /* Limit TSO burst size */
239 static int hn_tso_maxlen = 0;
240 SYSCTL_INT(_hw_hn, OID_AUTO, tso_maxlen, CTLFLAG_RDTUN,
241 &hn_tso_maxlen, 0, "TSO burst limit");
244 /* Limit chimney send size */
245 static int hn_tx_chimney_size = 0;
246 SYSCTL_INT(_hw_hn, OID_AUTO, tx_chimney_size, CTLFLAG_RDTUN,
247 &hn_tx_chimney_size, 0, "Chimney send packet size limit");
249 /* Limit the size of packet for direct transmission */
250 static int hn_direct_tx_size = HN_DIRECT_TX_SIZE_DEF;
251 SYSCTL_INT(_hw_hn, OID_AUTO, direct_tx_size, CTLFLAG_RDTUN,
252 &hn_direct_tx_size, 0, "Size of the packet for direct transmission");
254 #if defined(INET) || defined(INET6)
255 #if __FreeBSD_version >= 1100095
256 static int hn_lro_entry_count = HN_LROENT_CNT_DEF;
257 SYSCTL_INT(_hw_hn, OID_AUTO, lro_entry_count, CTLFLAG_RDTUN,
258 &hn_lro_entry_count, 0, "LRO entry count");
262 static int hn_share_tx_taskq = 0;
263 SYSCTL_INT(_hw_hn, OID_AUTO, share_tx_taskq, CTLFLAG_RDTUN,
264 &hn_share_tx_taskq, 0, "Enable shared TX taskqueue");
266 static struct taskqueue *hn_tx_taskq;
268 #ifndef HN_USE_TXDESC_BUFRING
269 static int hn_use_txdesc_bufring = 0;
271 static int hn_use_txdesc_bufring = 1;
273 SYSCTL_INT(_hw_hn, OID_AUTO, use_txdesc_bufring, CTLFLAG_RD,
274 &hn_use_txdesc_bufring, 0, "Use buf_ring for TX descriptors");
276 static int hn_bind_tx_taskq = -1;
277 SYSCTL_INT(_hw_hn, OID_AUTO, bind_tx_taskq, CTLFLAG_RDTUN,
278 &hn_bind_tx_taskq, 0, "Bind TX taskqueue to the specified cpu");
280 static int hn_use_if_start = 0;
281 SYSCTL_INT(_hw_hn, OID_AUTO, use_if_start, CTLFLAG_RDTUN,
282 &hn_use_if_start, 0, "Use if_start TX method");
285 * Forward declarations
287 static void hn_stop(hn_softc_t *sc);
288 static void hn_ifinit_locked(hn_softc_t *sc);
289 static void hn_ifinit(void *xsc);
290 static int hn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
291 static int hn_start_locked(struct hn_tx_ring *txr, int len);
292 static void hn_start(struct ifnet *ifp);
293 static void hn_start_txeof(struct hn_tx_ring *);
294 static int hn_ifmedia_upd(struct ifnet *ifp);
295 static void hn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
296 #if __FreeBSD_version >= 1100099
297 static int hn_lro_lenlim_sysctl(SYSCTL_HANDLER_ARGS);
298 static int hn_lro_ackcnt_sysctl(SYSCTL_HANDLER_ARGS);
300 static int hn_trust_hcsum_sysctl(SYSCTL_HANDLER_ARGS);
301 static int hn_tx_chimney_size_sysctl(SYSCTL_HANDLER_ARGS);
302 #if __FreeBSD_version < 1100095
303 static int hn_rx_stat_int_sysctl(SYSCTL_HANDLER_ARGS);
305 static int hn_rx_stat_u64_sysctl(SYSCTL_HANDLER_ARGS);
307 static int hn_rx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS);
308 static int hn_tx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS);
309 static int hn_tx_conf_int_sysctl(SYSCTL_HANDLER_ARGS);
310 static int hn_check_iplen(const struct mbuf *, int);
311 static int hn_create_tx_ring(struct hn_softc *, int);
312 static void hn_destroy_tx_ring(struct hn_tx_ring *);
313 static int hn_create_tx_data(struct hn_softc *);
314 static void hn_destroy_tx_data(struct hn_softc *);
315 static void hn_start_taskfunc(void *, int);
316 static void hn_start_txeof_taskfunc(void *, int);
317 static void hn_stop_tx_tasks(struct hn_softc *);
318 static int hn_encap(struct hn_tx_ring *, struct hn_txdesc *, struct mbuf **);
319 static void hn_create_rx_data(struct hn_softc *sc);
320 static void hn_destroy_rx_data(struct hn_softc *sc);
321 static void hn_set_tx_chimney_size(struct hn_softc *, int);
323 static int hn_transmit(struct ifnet *, struct mbuf *);
324 static void hn_xmit_qflush(struct ifnet *);
325 static int hn_xmit(struct hn_tx_ring *, int);
326 static void hn_xmit_txeof(struct hn_tx_ring *);
327 static void hn_xmit_taskfunc(void *, int);
328 static void hn_xmit_txeof_taskfunc(void *, int);
331 hn_ifmedia_upd(struct ifnet *ifp __unused)
338 hn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
340 struct hn_softc *sc = ifp->if_softc;
342 ifmr->ifm_status = IFM_AVALID;
343 ifmr->ifm_active = IFM_ETHER;
345 if (!sc->hn_carrier) {
346 ifmr->ifm_active |= IFM_NONE;
349 ifmr->ifm_status |= IFM_ACTIVE;
350 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
353 /* {F8615163-DF3E-46c5-913F-F2D2F965ED0E} */
354 static const hv_guid g_net_vsc_device_type = {
355 .data = {0x63, 0x51, 0x61, 0xF8, 0x3E, 0xDF, 0xc5, 0x46,
356 0x91, 0x3F, 0xF2, 0xD2, 0xF9, 0x65, 0xED, 0x0E}
360 * Standard probe entry point.
364 netvsc_probe(device_t dev)
368 p = vmbus_get_type(dev);
369 if (!memcmp(p, &g_net_vsc_device_type.data, sizeof(hv_guid))) {
370 device_set_desc(dev, "Synthetic Network Interface");
372 printf("Netvsc probe... DONE \n");
374 return (BUS_PROBE_DEFAULT);
381 hn_cpuset_setthread_task(void *xmask, int pending __unused)
383 cpuset_t *mask = xmask;
386 error = cpuset_setthread(curthread->td_tid, mask);
388 panic("curthread=%ju: can't pin; error=%d",
389 (uintmax_t)curthread->td_tid, error);
394 * Standard attach entry point.
396 * Called when the driver is loaded. It allocates needed resources,
397 * and initializes the "hardware" and software.
400 netvsc_attach(device_t dev)
402 struct hv_device *device_ctx = vmbus_get_devctx(dev);
403 struct hv_vmbus_channel *chan;
404 netvsc_device_info device_info;
406 int unit = device_get_unit(dev);
407 struct ifnet *ifp = NULL;
409 #if __FreeBSD_version >= 1100045
413 sc = device_get_softc(dev);
418 bzero(sc, sizeof(hn_softc_t));
422 if (hn_tx_taskq == NULL) {
423 sc->hn_tx_taskq = taskqueue_create("hn_tx", M_WAITOK,
424 taskqueue_thread_enqueue, &sc->hn_tx_taskq);
425 taskqueue_start_threads(&sc->hn_tx_taskq, 1, PI_NET, "%s tx",
426 device_get_nameunit(dev));
427 if (hn_bind_tx_taskq >= 0) {
428 int cpu = hn_bind_tx_taskq;
429 struct task cpuset_task;
432 if (cpu > mp_ncpus - 1)
434 CPU_SETOF(cpu, &cpu_set);
435 TASK_INIT(&cpuset_task, 0, hn_cpuset_setthread_task,
437 taskqueue_enqueue(sc->hn_tx_taskq, &cpuset_task);
438 taskqueue_drain(sc->hn_tx_taskq, &cpuset_task);
441 sc->hn_tx_taskq = hn_tx_taskq;
443 NV_LOCK_INIT(sc, "NetVSCLock");
445 sc->hn_dev_obj = device_ctx;
447 ifp = sc->hn_ifp = sc->arpcom.ac_ifp = if_alloc(IFT_ETHER);
450 error = hn_create_tx_data(sc);
454 hn_create_rx_data(sc);
457 * Associate the first TX/RX ring w/ the primary channel.
459 chan = device_ctx->channel;
460 chan->hv_chan_rxr = &sc->hn_rx_ring[0];
461 chan->hv_chan_txr = &sc->hn_tx_ring[0];
462 sc->hn_tx_ring[0].hn_chan = chan;
464 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
465 ifp->if_dunit = unit;
466 ifp->if_dname = NETVSC_DEVNAME;
468 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
469 ifp->if_ioctl = hn_ioctl;
470 ifp->if_init = hn_ifinit;
471 /* needed by hv_rf_on_device_add() code */
472 ifp->if_mtu = ETHERMTU;
473 if (hn_use_if_start) {
474 ifp->if_start = hn_start;
475 IFQ_SET_MAXLEN(&ifp->if_snd, 512);
476 ifp->if_snd.ifq_drv_maxlen = 511;
477 IFQ_SET_READY(&ifp->if_snd);
479 ifp->if_transmit = hn_transmit;
480 ifp->if_qflush = hn_xmit_qflush;
483 ifmedia_init(&sc->hn_media, 0, hn_ifmedia_upd, hn_ifmedia_sts);
484 ifmedia_add(&sc->hn_media, IFM_ETHER | IFM_AUTO, 0, NULL);
485 ifmedia_set(&sc->hn_media, IFM_ETHER | IFM_AUTO);
486 /* XXX ifmedia_set really should do this for us */
487 sc->hn_media.ifm_media = sc->hn_media.ifm_cur->ifm_media;
490 * Tell upper layers that we support full VLAN capability.
492 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
493 ifp->if_capabilities |=
494 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_TSO |
497 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_TSO |
499 ifp->if_hwassist = sc->hn_tx_ring[0].hn_csum_assist | CSUM_TSO;
501 error = hv_rf_on_device_add(device_ctx, &device_info);
505 if (device_info.link_state == 0) {
509 #if __FreeBSD_version >= 1100045
510 tso_maxlen = hn_tso_maxlen;
511 if (tso_maxlen <= 0 || tso_maxlen > IP_MAXPACKET)
512 tso_maxlen = IP_MAXPACKET;
514 ifp->if_hw_tsomaxsegcount = HN_TX_DATA_SEGCNT_MAX;
515 ifp->if_hw_tsomaxsegsize = PAGE_SIZE;
516 ifp->if_hw_tsomax = tso_maxlen -
517 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
520 ether_ifattach(ifp, device_info.mac_addr);
522 #if __FreeBSD_version >= 1100045
523 if_printf(ifp, "TSO: %u/%u/%u\n", ifp->if_hw_tsomax,
524 ifp->if_hw_tsomaxsegcount, ifp->if_hw_tsomaxsegsize);
527 sc->hn_tx_chimney_max = sc->net_dev->send_section_size;
528 hn_set_tx_chimney_size(sc, sc->hn_tx_chimney_max);
529 if (hn_tx_chimney_size > 0 &&
530 hn_tx_chimney_size < sc->hn_tx_chimney_max)
531 hn_set_tx_chimney_size(sc, hn_tx_chimney_size);
535 hn_destroy_tx_data(sc);
542 * Standard detach entry point
545 netvsc_detach(device_t dev)
547 struct hn_softc *sc = device_get_softc(dev);
548 struct hv_device *hv_device = vmbus_get_devctx(dev);
551 printf("netvsc_detach\n");
554 * XXXKYS: Need to clean up all our
555 * driver state; this is the driver
560 * XXXKYS: Need to stop outgoing traffic and unregister
564 hv_rf_on_device_remove(hv_device, HV_RF_NV_DESTROY_CHANNEL);
566 hn_stop_tx_tasks(sc);
568 ifmedia_removeall(&sc->hn_media);
569 hn_destroy_rx_data(sc);
570 hn_destroy_tx_data(sc);
572 if (sc->hn_tx_taskq != hn_tx_taskq)
573 taskqueue_free(sc->hn_tx_taskq);
579 * Standard shutdown entry point
582 netvsc_shutdown(device_t dev)
588 hn_txdesc_dmamap_load(struct hn_tx_ring *txr, struct hn_txdesc *txd,
589 struct mbuf **m_head, bus_dma_segment_t *segs, int *nsegs)
591 struct mbuf *m = *m_head;
594 error = bus_dmamap_load_mbuf_sg(txr->hn_tx_data_dtag, txd->data_dmap,
595 m, segs, nsegs, BUS_DMA_NOWAIT);
596 if (error == EFBIG) {
599 m_new = m_collapse(m, M_NOWAIT, HN_TX_DATA_SEGCNT_MAX);
604 txr->hn_tx_collapsed++;
606 error = bus_dmamap_load_mbuf_sg(txr->hn_tx_data_dtag,
607 txd->data_dmap, m, segs, nsegs, BUS_DMA_NOWAIT);
610 bus_dmamap_sync(txr->hn_tx_data_dtag, txd->data_dmap,
611 BUS_DMASYNC_PREWRITE);
612 txd->flags |= HN_TXD_FLAG_DMAMAP;
618 hn_txdesc_dmamap_unload(struct hn_tx_ring *txr, struct hn_txdesc *txd)
621 if (txd->flags & HN_TXD_FLAG_DMAMAP) {
622 bus_dmamap_sync(txr->hn_tx_data_dtag,
623 txd->data_dmap, BUS_DMASYNC_POSTWRITE);
624 bus_dmamap_unload(txr->hn_tx_data_dtag,
626 txd->flags &= ~HN_TXD_FLAG_DMAMAP;
631 hn_txdesc_put(struct hn_tx_ring *txr, struct hn_txdesc *txd)
634 KASSERT((txd->flags & HN_TXD_FLAG_ONLIST) == 0,
635 ("put an onlist txd %#x", txd->flags));
637 KASSERT(txd->refs > 0, ("invalid txd refs %d", txd->refs));
638 if (atomic_fetchadd_int(&txd->refs, -1) != 1)
641 hn_txdesc_dmamap_unload(txr, txd);
642 if (txd->m != NULL) {
647 txd->flags |= HN_TXD_FLAG_ONLIST;
649 #ifndef HN_USE_TXDESC_BUFRING
650 mtx_lock_spin(&txr->hn_txlist_spin);
651 KASSERT(txr->hn_txdesc_avail >= 0 &&
652 txr->hn_txdesc_avail < txr->hn_txdesc_cnt,
653 ("txdesc_put: invalid txd avail %d", txr->hn_txdesc_avail));
654 txr->hn_txdesc_avail++;
655 SLIST_INSERT_HEAD(&txr->hn_txlist, txd, link);
656 mtx_unlock_spin(&txr->hn_txlist_spin);
658 atomic_add_int(&txr->hn_txdesc_avail, 1);
659 buf_ring_enqueue(txr->hn_txdesc_br, txd);
665 static __inline struct hn_txdesc *
666 hn_txdesc_get(struct hn_tx_ring *txr)
668 struct hn_txdesc *txd;
670 #ifndef HN_USE_TXDESC_BUFRING
671 mtx_lock_spin(&txr->hn_txlist_spin);
672 txd = SLIST_FIRST(&txr->hn_txlist);
674 KASSERT(txr->hn_txdesc_avail > 0,
675 ("txdesc_get: invalid txd avail %d", txr->hn_txdesc_avail));
676 txr->hn_txdesc_avail--;
677 SLIST_REMOVE_HEAD(&txr->hn_txlist, link);
679 mtx_unlock_spin(&txr->hn_txlist_spin);
681 txd = buf_ring_dequeue_sc(txr->hn_txdesc_br);
685 #ifdef HN_USE_TXDESC_BUFRING
686 atomic_subtract_int(&txr->hn_txdesc_avail, 1);
688 KASSERT(txd->m == NULL && txd->refs == 0 &&
689 (txd->flags & HN_TXD_FLAG_ONLIST), ("invalid txd"));
690 txd->flags &= ~HN_TXD_FLAG_ONLIST;
697 hn_txdesc_hold(struct hn_txdesc *txd)
700 /* 0->1 transition will never work */
701 KASSERT(txd->refs > 0, ("invalid refs %d", txd->refs));
702 atomic_add_int(&txd->refs, 1);
706 hn_tx_done(void *xpkt)
708 netvsc_packet *packet = xpkt;
709 struct hn_txdesc *txd;
710 struct hn_tx_ring *txr;
712 txd = (struct hn_txdesc *)(uintptr_t)
713 packet->compl.send.send_completion_tid;
716 txr->hn_has_txeof = 1;
717 hn_txdesc_put(txr, txd);
721 netvsc_channel_rollup(struct hv_vmbus_channel *chan)
723 struct hn_tx_ring *txr = chan->hv_chan_txr;
724 #if defined(INET) || defined(INET6)
725 struct hn_rx_ring *rxr = chan->hv_chan_rxr;
726 struct lro_ctrl *lro = &rxr->hn_lro;
727 struct lro_entry *queued;
729 while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
730 SLIST_REMOVE_HEAD(&lro->lro_active, next);
731 tcp_lro_flush(lro, queued);
737 * 'txr' could be NULL, if multiple channels and
738 * ifnet.if_start method are enabled.
740 if (txr == NULL || !txr->hn_has_txeof)
743 txr->hn_has_txeof = 0;
749 * If this function fails, then both txd and m_head0 will be freed.
752 hn_encap(struct hn_tx_ring *txr, struct hn_txdesc *txd, struct mbuf **m_head0)
754 bus_dma_segment_t segs[HN_TX_DATA_SEGCNT_MAX];
756 struct mbuf *m_head = *m_head0;
757 netvsc_packet *packet;
758 rndis_msg *rndis_mesg;
759 rndis_packet *rndis_pkt;
760 rndis_per_packet_info *rppi;
761 struct ndis_hash_info *hash_info;
762 uint32_t rndis_msg_size;
764 packet = &txd->netvsc_pkt;
765 packet->is_data_pkt = TRUE;
766 packet->tot_data_buf_len = m_head->m_pkthdr.len;
769 * extension points to the area reserved for the
770 * rndis_filter_packet, which is placed just after
771 * the netvsc_packet (and rppi struct, if present;
772 * length is updated later).
774 rndis_mesg = txd->rndis_msg;
775 /* XXX not necessary */
776 memset(rndis_mesg, 0, HN_RNDIS_MSG_LEN);
777 rndis_mesg->ndis_msg_type = REMOTE_NDIS_PACKET_MSG;
779 rndis_pkt = &rndis_mesg->msg.packet;
780 rndis_pkt->data_offset = sizeof(rndis_packet);
781 rndis_pkt->data_length = packet->tot_data_buf_len;
782 rndis_pkt->per_pkt_info_offset = sizeof(rndis_packet);
784 rndis_msg_size = RNDIS_MESSAGE_SIZE(rndis_packet);
787 * Set the hash info for this packet, so that the host could
788 * dispatch the TX done event for this packet back to this TX
791 rndis_msg_size += RNDIS_HASH_PPI_SIZE;
792 rppi = hv_set_rppi_data(rndis_mesg, RNDIS_HASH_PPI_SIZE,
794 hash_info = (struct ndis_hash_info *)((uint8_t *)rppi +
795 rppi->per_packet_info_offset);
796 hash_info->hash = txr->hn_tx_idx;
798 if (m_head->m_flags & M_VLANTAG) {
799 ndis_8021q_info *rppi_vlan_info;
801 rndis_msg_size += RNDIS_VLAN_PPI_SIZE;
802 rppi = hv_set_rppi_data(rndis_mesg, RNDIS_VLAN_PPI_SIZE,
805 rppi_vlan_info = (ndis_8021q_info *)((uint8_t *)rppi +
806 rppi->per_packet_info_offset);
807 rppi_vlan_info->u1.s1.vlan_id =
808 m_head->m_pkthdr.ether_vtag & 0xfff;
811 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
812 rndis_tcp_tso_info *tso_info;
813 struct ether_vlan_header *eh;
817 * XXX need m_pullup and use mtodo
819 eh = mtod(m_head, struct ether_vlan_header*);
820 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
821 ether_len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
823 ether_len = ETHER_HDR_LEN;
825 rndis_msg_size += RNDIS_TSO_PPI_SIZE;
826 rppi = hv_set_rppi_data(rndis_mesg, RNDIS_TSO_PPI_SIZE,
827 tcp_large_send_info);
829 tso_info = (rndis_tcp_tso_info *)((uint8_t *)rppi +
830 rppi->per_packet_info_offset);
831 tso_info->lso_v2_xmit.type =
832 RNDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
835 if (m_head->m_pkthdr.csum_flags & CSUM_IP_TSO) {
837 (struct ip *)(m_head->m_data + ether_len);
838 unsigned long iph_len = ip->ip_hl << 2;
840 (struct tcphdr *)((caddr_t)ip + iph_len);
842 tso_info->lso_v2_xmit.ip_version =
843 RNDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
847 th->th_sum = in_pseudo(ip->ip_src.s_addr,
848 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
851 #if defined(INET6) && defined(INET)
856 struct ip6_hdr *ip6 = (struct ip6_hdr *)
857 (m_head->m_data + ether_len);
858 struct tcphdr *th = (struct tcphdr *)(ip6 + 1);
860 tso_info->lso_v2_xmit.ip_version =
861 RNDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
863 th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
866 tso_info->lso_v2_xmit.tcp_header_offset = 0;
867 tso_info->lso_v2_xmit.mss = m_head->m_pkthdr.tso_segsz;
868 } else if (m_head->m_pkthdr.csum_flags & txr->hn_csum_assist) {
869 rndis_tcp_ip_csum_info *csum_info;
871 rndis_msg_size += RNDIS_CSUM_PPI_SIZE;
872 rppi = hv_set_rppi_data(rndis_mesg, RNDIS_CSUM_PPI_SIZE,
874 csum_info = (rndis_tcp_ip_csum_info *)((uint8_t *)rppi +
875 rppi->per_packet_info_offset);
877 csum_info->xmit.is_ipv4 = 1;
878 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
879 csum_info->xmit.ip_header_csum = 1;
881 if (m_head->m_pkthdr.csum_flags & CSUM_TCP) {
882 csum_info->xmit.tcp_csum = 1;
883 csum_info->xmit.tcp_header_offset = 0;
884 } else if (m_head->m_pkthdr.csum_flags & CSUM_UDP) {
885 csum_info->xmit.udp_csum = 1;
889 rndis_mesg->msg_len = packet->tot_data_buf_len + rndis_msg_size;
890 packet->tot_data_buf_len = rndis_mesg->msg_len;
893 * Chimney send, if the packet could fit into one chimney buffer.
895 * TODO: vRSS, chimney buffer should be per-channel.
897 if (packet->tot_data_buf_len < txr->hn_tx_chimney_size) {
898 netvsc_dev *net_dev = txr->hn_sc->net_dev;
899 uint32_t send_buf_section_idx;
901 send_buf_section_idx =
902 hv_nv_get_next_send_section(net_dev);
903 if (send_buf_section_idx !=
904 NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX) {
905 uint8_t *dest = ((uint8_t *)net_dev->send_buf +
906 (send_buf_section_idx *
907 net_dev->send_section_size));
909 memcpy(dest, rndis_mesg, rndis_msg_size);
910 dest += rndis_msg_size;
911 m_copydata(m_head, 0, m_head->m_pkthdr.len, dest);
913 packet->send_buf_section_idx = send_buf_section_idx;
914 packet->send_buf_section_size =
915 packet->tot_data_buf_len;
916 packet->page_buf_count = 0;
917 txr->hn_tx_chimney++;
922 error = hn_txdesc_dmamap_load(txr, txd, &m_head, segs, &nsegs);
927 * This mbuf is not linked w/ the txd yet, so free it now.
932 freed = hn_txdesc_put(txr, txd);
934 ("fail to free txd upon txdma error"));
936 txr->hn_txdma_failed++;
937 if_inc_counter(txr->hn_sc->hn_ifp, IFCOUNTER_OERRORS, 1);
942 packet->page_buf_count = nsegs + HV_RF_NUM_TX_RESERVED_PAGE_BUFS;
944 /* send packet with page buffer */
945 packet->page_buffers[0].pfn = atop(txd->rndis_msg_paddr);
946 packet->page_buffers[0].offset = txd->rndis_msg_paddr & PAGE_MASK;
947 packet->page_buffers[0].length = rndis_msg_size;
950 * Fill the page buffers with mbuf info starting at index
951 * HV_RF_NUM_TX_RESERVED_PAGE_BUFS.
953 for (i = 0; i < nsegs; ++i) {
954 hv_vmbus_page_buffer *pb = &packet->page_buffers[
955 i + HV_RF_NUM_TX_RESERVED_PAGE_BUFS];
957 pb->pfn = atop(segs[i].ds_addr);
958 pb->offset = segs[i].ds_addr & PAGE_MASK;
959 pb->length = segs[i].ds_len;
962 packet->send_buf_section_idx =
963 NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX;
964 packet->send_buf_section_size = 0;
968 /* Set the completion routine */
969 packet->compl.send.on_send_completion = hn_tx_done;
970 packet->compl.send.send_completion_context = packet;
971 packet->compl.send.send_completion_tid = (uint64_t)(uintptr_t)txd;
978 * If this function fails, then txd will be freed, but the mbuf
979 * associated w/ the txd will _not_ be freed.
982 hn_send_pkt(struct ifnet *ifp, struct hn_tx_ring *txr, struct hn_txdesc *txd)
984 int error, send_failed = 0;
988 * Make sure that txd is not freed before ETHER_BPF_MTAP.
991 error = hv_nv_on_send(txr->hn_chan, &txd->netvsc_pkt);
993 ETHER_BPF_MTAP(ifp, txd->m);
994 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
995 if (!hn_use_if_start) {
996 if_inc_counter(ifp, IFCOUNTER_OBYTES,
997 txd->m->m_pkthdr.len);
998 if (txd->m->m_flags & M_MCAST)
999 if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
1002 hn_txdesc_put(txr, txd);
1004 if (__predict_false(error)) {
1008 * This should "really rarely" happen.
1010 * XXX Too many RX to be acked or too many sideband
1011 * commands to run? Ask netvsc_channel_rollup()
1012 * to kick start later.
1014 txr->hn_has_txeof = 1;
1016 txr->hn_send_failed++;
1019 * Try sending again after set hn_has_txeof;
1020 * in case that we missed the last
1021 * netvsc_channel_rollup().
1025 if_printf(ifp, "send failed\n");
1028 * Caller will perform further processing on the
1029 * associated mbuf, so don't free it in hn_txdesc_put();
1030 * only unload it from the DMA map in hn_txdesc_put(),
1034 freed = hn_txdesc_put(txr, txd);
1036 ("fail to free txd upon send error"));
1038 txr->hn_send_failed++;
1044 * Start a transmit of one or more packets
1047 hn_start_locked(struct hn_tx_ring *txr, int len)
1049 struct hn_softc *sc = txr->hn_sc;
1050 struct ifnet *ifp = sc->hn_ifp;
1052 KASSERT(hn_use_if_start,
1053 ("hn_start_locked is called, when if_start is disabled"));
1054 KASSERT(txr == &sc->hn_tx_ring[0], ("not the first TX ring"));
1055 mtx_assert(&txr->hn_tx_lock, MA_OWNED);
1057 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1061 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
1062 struct hn_txdesc *txd;
1063 struct mbuf *m_head;
1066 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1070 if (len > 0 && m_head->m_pkthdr.len > len) {
1072 * This sending could be time consuming; let callers
1073 * dispatch this packet sending (and sending of any
1074 * following up packets) to tx taskqueue.
1076 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1080 txd = hn_txdesc_get(txr);
1082 txr->hn_no_txdescs++;
1083 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1084 atomic_set_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
1088 error = hn_encap(txr, txd, &m_head);
1090 /* Both txd and m_head are freed */
1094 error = hn_send_pkt(ifp, txr, txd);
1095 if (__predict_false(error)) {
1096 /* txd is freed, but m_head is not */
1097 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1098 atomic_set_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
1106 * Link up/down notification
1109 netvsc_linkstatus_callback(struct hv_device *device_obj, uint32_t status)
1111 hn_softc_t *sc = device_get_softc(device_obj->device);
1125 * Append the specified data to the indicated mbuf chain,
1126 * Extend the mbuf chain if the new data does not fit in
1129 * This is a minor rewrite of m_append() from sys/kern/uipc_mbuf.c.
1130 * There should be an equivalent in the kernel mbuf code,
1131 * but there does not appear to be one yet.
1133 * Differs from m_append() in that additional mbufs are
1134 * allocated with cluster size MJUMPAGESIZE, and filled
1137 * Return 1 if able to complete the job; otherwise 0.
1140 hv_m_append(struct mbuf *m0, int len, c_caddr_t cp)
1143 int remainder, space;
1145 for (m = m0; m->m_next != NULL; m = m->m_next)
1148 space = M_TRAILINGSPACE(m);
1151 * Copy into available space.
1153 if (space > remainder)
1155 bcopy(cp, mtod(m, caddr_t) + m->m_len, space);
1160 while (remainder > 0) {
1162 * Allocate a new mbuf; could check space
1163 * and allocate a cluster instead.
1165 n = m_getjcl(M_DONTWAIT, m->m_type, 0, MJUMPAGESIZE);
1168 n->m_len = min(MJUMPAGESIZE, remainder);
1169 bcopy(cp, mtod(n, caddr_t), n->m_len);
1171 remainder -= n->m_len;
1175 if (m0->m_flags & M_PKTHDR)
1176 m0->m_pkthdr.len += len - remainder;
1178 return (remainder == 0);
1183 * Called when we receive a data packet from the "wire" on the
1186 * Note: This is no longer used as a callback
1189 netvsc_recv(struct hv_vmbus_channel *chan, netvsc_packet *packet,
1190 rndis_tcp_ip_csum_info *csum_info)
1192 struct hn_rx_ring *rxr = chan->hv_chan_rxr;
1193 struct ifnet *ifp = rxr->hn_ifp;
1195 int size, do_lro = 0, do_csum = 1;
1197 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1201 * Bail out if packet contains more data than configured MTU.
1203 if (packet->tot_data_buf_len > (ifp->if_mtu + ETHER_HDR_LEN)) {
1205 } else if (packet->tot_data_buf_len <= MHLEN) {
1206 m_new = m_gethdr(M_NOWAIT, MT_DATA);
1209 memcpy(mtod(m_new, void *), packet->data,
1210 packet->tot_data_buf_len);
1211 m_new->m_pkthdr.len = m_new->m_len = packet->tot_data_buf_len;
1212 rxr->hn_small_pkts++;
1215 * Get an mbuf with a cluster. For packets 2K or less,
1216 * get a standard 2K cluster. For anything larger, get a
1217 * 4K cluster. Any buffers larger than 4K can cause problems
1218 * if looped around to the Hyper-V TX channel, so avoid them.
1221 if (packet->tot_data_buf_len > MCLBYTES) {
1223 size = MJUMPAGESIZE;
1226 m_new = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, size);
1227 if (m_new == NULL) {
1228 if_printf(ifp, "alloc mbuf failed.\n");
1232 hv_m_append(m_new, packet->tot_data_buf_len, packet->data);
1234 m_new->m_pkthdr.rcvif = ifp;
1236 if (__predict_false((ifp->if_capenable & IFCAP_RXCSUM) == 0))
1239 /* receive side checksum offload */
1240 if (csum_info != NULL) {
1241 /* IP csum offload */
1242 if (csum_info->receive.ip_csum_succeeded && do_csum) {
1243 m_new->m_pkthdr.csum_flags |=
1244 (CSUM_IP_CHECKED | CSUM_IP_VALID);
1248 /* TCP/UDP csum offload */
1249 if ((csum_info->receive.tcp_csum_succeeded ||
1250 csum_info->receive.udp_csum_succeeded) && do_csum) {
1251 m_new->m_pkthdr.csum_flags |=
1252 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1253 m_new->m_pkthdr.csum_data = 0xffff;
1254 if (csum_info->receive.tcp_csum_succeeded)
1260 if (csum_info->receive.ip_csum_succeeded &&
1261 csum_info->receive.tcp_csum_succeeded)
1264 const struct ether_header *eh;
1269 if (m_new->m_len < hoff)
1271 eh = mtod(m_new, struct ether_header *);
1272 etype = ntohs(eh->ether_type);
1273 if (etype == ETHERTYPE_VLAN) {
1274 const struct ether_vlan_header *evl;
1276 hoff = sizeof(*evl);
1277 if (m_new->m_len < hoff)
1279 evl = mtod(m_new, struct ether_vlan_header *);
1280 etype = ntohs(evl->evl_proto);
1283 if (etype == ETHERTYPE_IP) {
1286 pr = hn_check_iplen(m_new, hoff);
1287 if (pr == IPPROTO_TCP) {
1289 (rxr->hn_trust_hcsum &
1290 HN_TRUST_HCSUM_TCP)) {
1291 rxr->hn_csum_trusted++;
1292 m_new->m_pkthdr.csum_flags |=
1293 (CSUM_IP_CHECKED | CSUM_IP_VALID |
1294 CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1295 m_new->m_pkthdr.csum_data = 0xffff;
1297 /* Rely on SW csum verification though... */
1299 } else if (pr == IPPROTO_UDP) {
1301 (rxr->hn_trust_hcsum &
1302 HN_TRUST_HCSUM_UDP)) {
1303 rxr->hn_csum_trusted++;
1304 m_new->m_pkthdr.csum_flags |=
1305 (CSUM_IP_CHECKED | CSUM_IP_VALID |
1306 CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1307 m_new->m_pkthdr.csum_data = 0xffff;
1309 } else if (pr != IPPROTO_DONE && do_csum &&
1310 (rxr->hn_trust_hcsum & HN_TRUST_HCSUM_IP)) {
1311 rxr->hn_csum_trusted++;
1312 m_new->m_pkthdr.csum_flags |=
1313 (CSUM_IP_CHECKED | CSUM_IP_VALID);
1318 if ((packet->vlan_tci != 0) &&
1319 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
1320 m_new->m_pkthdr.ether_vtag = packet->vlan_tci;
1321 m_new->m_flags |= M_VLANTAG;
1324 m_new->m_pkthdr.flowid = rxr->hn_rx_idx;
1325 M_HASHTYPE_SET(m_new, M_HASHTYPE_OPAQUE);
1328 * Note: Moved RX completion back to hv_nv_on_receive() so all
1329 * messages (not just data messages) will trigger a response.
1334 if ((ifp->if_capenable & IFCAP_LRO) && do_lro) {
1335 #if defined(INET) || defined(INET6)
1336 struct lro_ctrl *lro = &rxr->hn_lro;
1339 rxr->hn_lro_tried++;
1340 if (tcp_lro_rx(lro, m_new, 0) == 0) {
1348 /* We're not holding the lock here, so don't release it */
1349 (*ifp->if_input)(ifp, m_new);
1355 * Rules for using sc->temp_unusable:
1356 * 1. sc->temp_unusable can only be read or written while holding NV_LOCK()
1357 * 2. code reading sc->temp_unusable under NV_LOCK(), and finding
1358 * sc->temp_unusable set, must release NV_LOCK() and exit
1359 * 3. to retain exclusive control of the interface,
1360 * sc->temp_unusable must be set by code before releasing NV_LOCK()
1361 * 4. only code setting sc->temp_unusable can clear sc->temp_unusable
1362 * 5. code setting sc->temp_unusable must eventually clear sc->temp_unusable
1366 * Standard ioctl entry point. Called when the user wants to configure
1370 hn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1372 hn_softc_t *sc = ifp->if_softc;
1373 struct ifreq *ifr = (struct ifreq *)data;
1375 struct ifaddr *ifa = (struct ifaddr *)data;
1377 netvsc_device_info device_info;
1378 struct hv_device *hn_dev;
1379 int mask, error = 0;
1380 int retry_cnt = 500;
1386 if (ifa->ifa_addr->sa_family == AF_INET) {
1387 ifp->if_flags |= IFF_UP;
1388 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1390 arp_ifinit(ifp, ifa);
1393 error = ether_ioctl(ifp, cmd, data);
1396 hn_dev = vmbus_get_devctx(sc->hn_dev);
1398 /* Check MTU value change */
1399 if (ifp->if_mtu == ifr->ifr_mtu)
1402 if (ifr->ifr_mtu > NETVSC_MAX_CONFIGURABLE_MTU) {
1407 /* Obtain and record requested MTU */
1408 ifp->if_mtu = ifr->ifr_mtu;
1410 #if __FreeBSD_version >= 1100099
1412 * Make sure that LRO aggregation length limit is still
1413 * valid, after the MTU change.
1416 if (sc->hn_rx_ring[0].hn_lro.lro_length_lim <
1417 HN_LRO_LENLIM_MIN(ifp)) {
1419 for (i = 0; i < sc->hn_rx_ring_cnt; ++i) {
1420 sc->hn_rx_ring[i].hn_lro.lro_length_lim =
1421 HN_LRO_LENLIM_MIN(ifp);
1429 if (!sc->temp_unusable) {
1430 sc->temp_unusable = TRUE;
1434 if (retry_cnt > 0) {
1438 } while (retry_cnt > 0);
1440 if (retry_cnt == 0) {
1445 /* We must remove and add back the device to cause the new
1446 * MTU to take effect. This includes tearing down, but not
1447 * deleting the channel, then bringing it back up.
1449 error = hv_rf_on_device_remove(hn_dev, HV_RF_NV_RETAIN_CHANNEL);
1452 sc->temp_unusable = FALSE;
1456 error = hv_rf_on_device_add(hn_dev, &device_info);
1459 sc->temp_unusable = FALSE;
1464 sc->hn_tx_chimney_max = sc->net_dev->send_section_size;
1465 if (sc->hn_tx_ring[0].hn_tx_chimney_size >
1466 sc->hn_tx_chimney_max)
1467 hn_set_tx_chimney_size(sc, sc->hn_tx_chimney_max);
1469 hn_ifinit_locked(sc);
1472 sc->temp_unusable = FALSE;
1478 if (!sc->temp_unusable) {
1479 sc->temp_unusable = TRUE;
1483 if (retry_cnt > 0) {
1487 } while (retry_cnt > 0);
1489 if (retry_cnt == 0) {
1494 if (ifp->if_flags & IFF_UP) {
1496 * If only the state of the PROMISC flag changed,
1497 * then just use the 'set promisc mode' command
1498 * instead of reinitializing the entire NIC. Doing
1499 * a full re-init means reloading the firmware and
1500 * waiting for it to start up, which may take a
1504 /* Fixme: Promiscuous mode? */
1505 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1506 ifp->if_flags & IFF_PROMISC &&
1507 !(sc->hn_if_flags & IFF_PROMISC)) {
1508 /* do something here for Hyper-V */
1509 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1510 !(ifp->if_flags & IFF_PROMISC) &&
1511 sc->hn_if_flags & IFF_PROMISC) {
1512 /* do something here for Hyper-V */
1515 hn_ifinit_locked(sc);
1517 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1522 sc->temp_unusable = FALSE;
1524 sc->hn_if_flags = ifp->if_flags;
1530 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1531 if (mask & IFCAP_TXCSUM) {
1532 ifp->if_capenable ^= IFCAP_TXCSUM;
1533 if (ifp->if_capenable & IFCAP_TXCSUM) {
1535 sc->hn_tx_ring[0].hn_csum_assist;
1538 ~sc->hn_tx_ring[0].hn_csum_assist;
1542 if (mask & IFCAP_RXCSUM)
1543 ifp->if_capenable ^= IFCAP_RXCSUM;
1545 if (mask & IFCAP_LRO)
1546 ifp->if_capenable ^= IFCAP_LRO;
1548 if (mask & IFCAP_TSO4) {
1549 ifp->if_capenable ^= IFCAP_TSO4;
1550 if (ifp->if_capenable & IFCAP_TSO4)
1551 ifp->if_hwassist |= CSUM_IP_TSO;
1553 ifp->if_hwassist &= ~CSUM_IP_TSO;
1556 if (mask & IFCAP_TSO6) {
1557 ifp->if_capenable ^= IFCAP_TSO6;
1558 if (ifp->if_capenable & IFCAP_TSO6)
1559 ifp->if_hwassist |= CSUM_IP6_TSO;
1561 ifp->if_hwassist &= ~CSUM_IP6_TSO;
1570 /* Fixme: Multicast mode? */
1571 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1573 netvsc_setmulti(sc);
1582 error = ifmedia_ioctl(ifp, ifr, &sc->hn_media, cmd);
1585 error = ether_ioctl(ifp, cmd, data);
1596 hn_stop(hn_softc_t *sc)
1600 struct hv_device *device_ctx = vmbus_get_devctx(sc->hn_dev);
1605 printf(" Closing Device ...\n");
1607 atomic_clear_int(&ifp->if_drv_flags,
1608 (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
1609 for (i = 0; i < sc->hn_tx_ring_cnt; ++i)
1610 sc->hn_tx_ring[i].hn_oactive = 0;
1612 if_link_state_change(ifp, LINK_STATE_DOWN);
1613 sc->hn_initdone = 0;
1615 ret = hv_rf_on_close(device_ctx);
1619 * FreeBSD transmit entry point
1622 hn_start(struct ifnet *ifp)
1624 struct hn_softc *sc = ifp->if_softc;
1625 struct hn_tx_ring *txr = &sc->hn_tx_ring[0];
1627 if (txr->hn_sched_tx)
1630 if (mtx_trylock(&txr->hn_tx_lock)) {
1633 sched = hn_start_locked(txr, txr->hn_direct_tx_size);
1634 mtx_unlock(&txr->hn_tx_lock);
1639 taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_tx_task);
1643 hn_start_txeof(struct hn_tx_ring *txr)
1645 struct hn_softc *sc = txr->hn_sc;
1646 struct ifnet *ifp = sc->hn_ifp;
1648 KASSERT(txr == &sc->hn_tx_ring[0], ("not the first TX ring"));
1650 if (txr->hn_sched_tx)
1653 if (mtx_trylock(&txr->hn_tx_lock)) {
1656 atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
1657 sched = hn_start_locked(txr, txr->hn_direct_tx_size);
1658 mtx_unlock(&txr->hn_tx_lock);
1660 taskqueue_enqueue(txr->hn_tx_taskq,
1666 * Release the OACTIVE earlier, with the hope, that
1667 * others could catch up. The task will clear the
1668 * flag again with the hn_tx_lock to avoid possible
1671 atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
1672 taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_txeof_task);
1680 hn_ifinit_locked(hn_softc_t *sc)
1683 struct hv_device *device_ctx = vmbus_get_devctx(sc->hn_dev);
1688 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1692 hv_promisc_mode = 1;
1694 ret = hv_rf_on_open(device_ctx);
1698 sc->hn_initdone = 1;
1701 atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
1702 for (i = 0; i < sc->hn_tx_ring_cnt; ++i)
1703 sc->hn_tx_ring[i].hn_oactive = 0;
1705 atomic_set_int(&ifp->if_drv_flags, IFF_DRV_RUNNING);
1706 if_link_state_change(ifp, LINK_STATE_UP);
1713 hn_ifinit(void *xsc)
1715 hn_softc_t *sc = xsc;
1718 if (sc->temp_unusable) {
1722 sc->temp_unusable = TRUE;
1725 hn_ifinit_locked(sc);
1728 sc->temp_unusable = FALSE;
1737 hn_watchdog(struct ifnet *ifp)
1742 printf("hn%d: watchdog timeout -- resetting\n", sc->hn_unit);
1743 hn_ifinit(sc); /*???*/
1748 #if __FreeBSD_version >= 1100099
1751 hn_lro_lenlim_sysctl(SYSCTL_HANDLER_ARGS)
1753 struct hn_softc *sc = arg1;
1754 unsigned int lenlim;
1757 lenlim = sc->hn_rx_ring[0].hn_lro.lro_length_lim;
1758 error = sysctl_handle_int(oidp, &lenlim, 0, req);
1759 if (error || req->newptr == NULL)
1762 if (lenlim < HN_LRO_LENLIM_MIN(sc->hn_ifp) ||
1763 lenlim > TCP_LRO_LENGTH_MAX)
1767 for (i = 0; i < sc->hn_rx_ring_cnt; ++i)
1768 sc->hn_rx_ring[i].hn_lro.lro_length_lim = lenlim;
1774 hn_lro_ackcnt_sysctl(SYSCTL_HANDLER_ARGS)
1776 struct hn_softc *sc = arg1;
1777 int ackcnt, error, i;
1780 * lro_ackcnt_lim is append count limit,
1781 * +1 to turn it into aggregation limit.
1783 ackcnt = sc->hn_rx_ring[0].hn_lro.lro_ackcnt_lim + 1;
1784 error = sysctl_handle_int(oidp, &ackcnt, 0, req);
1785 if (error || req->newptr == NULL)
1788 if (ackcnt < 2 || ackcnt > (TCP_LRO_ACKCNT_MAX + 1))
1792 * Convert aggregation limit back to append
1797 for (i = 0; i < sc->hn_rx_ring_cnt; ++i)
1798 sc->hn_rx_ring[i].hn_lro.lro_ackcnt_lim = ackcnt;
1806 hn_trust_hcsum_sysctl(SYSCTL_HANDLER_ARGS)
1808 struct hn_softc *sc = arg1;
1813 if (sc->hn_rx_ring[0].hn_trust_hcsum & hcsum)
1816 error = sysctl_handle_int(oidp, &on, 0, req);
1817 if (error || req->newptr == NULL)
1821 for (i = 0; i < sc->hn_rx_ring_cnt; ++i) {
1822 struct hn_rx_ring *rxr = &sc->hn_rx_ring[i];
1825 rxr->hn_trust_hcsum |= hcsum;
1827 rxr->hn_trust_hcsum &= ~hcsum;
1834 hn_tx_chimney_size_sysctl(SYSCTL_HANDLER_ARGS)
1836 struct hn_softc *sc = arg1;
1837 int chimney_size, error;
1839 chimney_size = sc->hn_tx_ring[0].hn_tx_chimney_size;
1840 error = sysctl_handle_int(oidp, &chimney_size, 0, req);
1841 if (error || req->newptr == NULL)
1844 if (chimney_size > sc->hn_tx_chimney_max || chimney_size <= 0)
1847 hn_set_tx_chimney_size(sc, chimney_size);
1851 #if __FreeBSD_version < 1100095
1853 hn_rx_stat_int_sysctl(SYSCTL_HANDLER_ARGS)
1855 struct hn_softc *sc = arg1;
1856 int ofs = arg2, i, error;
1857 struct hn_rx_ring *rxr;
1861 for (i = 0; i < sc->hn_rx_ring_cnt; ++i) {
1862 rxr = &sc->hn_rx_ring[i];
1863 stat += *((int *)((uint8_t *)rxr + ofs));
1866 error = sysctl_handle_64(oidp, &stat, 0, req);
1867 if (error || req->newptr == NULL)
1870 /* Zero out this stat. */
1871 for (i = 0; i < sc->hn_rx_ring_cnt; ++i) {
1872 rxr = &sc->hn_rx_ring[i];
1873 *((int *)((uint8_t *)rxr + ofs)) = 0;
1879 hn_rx_stat_u64_sysctl(SYSCTL_HANDLER_ARGS)
1881 struct hn_softc *sc = arg1;
1882 int ofs = arg2, i, error;
1883 struct hn_rx_ring *rxr;
1887 for (i = 0; i < sc->hn_rx_ring_cnt; ++i) {
1888 rxr = &sc->hn_rx_ring[i];
1889 stat += *((uint64_t *)((uint8_t *)rxr + ofs));
1892 error = sysctl_handle_64(oidp, &stat, 0, req);
1893 if (error || req->newptr == NULL)
1896 /* Zero out this stat. */
1897 for (i = 0; i < sc->hn_rx_ring_cnt; ++i) {
1898 rxr = &sc->hn_rx_ring[i];
1899 *((uint64_t *)((uint8_t *)rxr + ofs)) = 0;
1907 hn_rx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS)
1909 struct hn_softc *sc = arg1;
1910 int ofs = arg2, i, error;
1911 struct hn_rx_ring *rxr;
1915 for (i = 0; i < sc->hn_rx_ring_cnt; ++i) {
1916 rxr = &sc->hn_rx_ring[i];
1917 stat += *((u_long *)((uint8_t *)rxr + ofs));
1920 error = sysctl_handle_long(oidp, &stat, 0, req);
1921 if (error || req->newptr == NULL)
1924 /* Zero out this stat. */
1925 for (i = 0; i < sc->hn_rx_ring_cnt; ++i) {
1926 rxr = &sc->hn_rx_ring[i];
1927 *((u_long *)((uint8_t *)rxr + ofs)) = 0;
1933 hn_tx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS)
1935 struct hn_softc *sc = arg1;
1936 int ofs = arg2, i, error;
1937 struct hn_tx_ring *txr;
1941 for (i = 0; i < sc->hn_tx_ring_cnt; ++i) {
1942 txr = &sc->hn_tx_ring[i];
1943 stat += *((u_long *)((uint8_t *)txr + ofs));
1946 error = sysctl_handle_long(oidp, &stat, 0, req);
1947 if (error || req->newptr == NULL)
1950 /* Zero out this stat. */
1951 for (i = 0; i < sc->hn_tx_ring_cnt; ++i) {
1952 txr = &sc->hn_tx_ring[i];
1953 *((u_long *)((uint8_t *)txr + ofs)) = 0;
1959 hn_tx_conf_int_sysctl(SYSCTL_HANDLER_ARGS)
1961 struct hn_softc *sc = arg1;
1962 int ofs = arg2, i, error, conf;
1963 struct hn_tx_ring *txr;
1965 txr = &sc->hn_tx_ring[0];
1966 conf = *((int *)((uint8_t *)txr + ofs));
1968 error = sysctl_handle_int(oidp, &conf, 0, req);
1969 if (error || req->newptr == NULL)
1973 for (i = 0; i < sc->hn_tx_ring_cnt; ++i) {
1974 txr = &sc->hn_tx_ring[i];
1975 *((int *)((uint8_t *)txr + ofs)) = conf;
1983 hn_check_iplen(const struct mbuf *m, int hoff)
1985 const struct ip *ip;
1986 int len, iphlen, iplen;
1987 const struct tcphdr *th;
1988 int thoff; /* TCP data offset */
1990 len = hoff + sizeof(struct ip);
1992 /* The packet must be at least the size of an IP header. */
1993 if (m->m_pkthdr.len < len)
1994 return IPPROTO_DONE;
1996 /* The fixed IP header must reside completely in the first mbuf. */
1998 return IPPROTO_DONE;
2000 ip = mtodo(m, hoff);
2002 /* Bound check the packet's stated IP header length. */
2003 iphlen = ip->ip_hl << 2;
2004 if (iphlen < sizeof(struct ip)) /* minimum header length */
2005 return IPPROTO_DONE;
2007 /* The full IP header must reside completely in the one mbuf. */
2008 if (m->m_len < hoff + iphlen)
2009 return IPPROTO_DONE;
2011 iplen = ntohs(ip->ip_len);
2014 * Check that the amount of data in the buffers is as
2015 * at least much as the IP header would have us expect.
2017 if (m->m_pkthdr.len < hoff + iplen)
2018 return IPPROTO_DONE;
2021 * Ignore IP fragments.
2023 if (ntohs(ip->ip_off) & (IP_OFFMASK | IP_MF))
2024 return IPPROTO_DONE;
2027 * The TCP/IP or UDP/IP header must be entirely contained within
2028 * the first fragment of a packet.
2032 if (iplen < iphlen + sizeof(struct tcphdr))
2033 return IPPROTO_DONE;
2034 if (m->m_len < hoff + iphlen + sizeof(struct tcphdr))
2035 return IPPROTO_DONE;
2036 th = (const struct tcphdr *)((const uint8_t *)ip + iphlen);
2037 thoff = th->th_off << 2;
2038 if (thoff < sizeof(struct tcphdr) || thoff + iphlen > iplen)
2039 return IPPROTO_DONE;
2040 if (m->m_len < hoff + iphlen + thoff)
2041 return IPPROTO_DONE;
2044 if (iplen < iphlen + sizeof(struct udphdr))
2045 return IPPROTO_DONE;
2046 if (m->m_len < hoff + iphlen + sizeof(struct udphdr))
2047 return IPPROTO_DONE;
2051 return IPPROTO_DONE;
2058 hn_dma_map_paddr(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2060 bus_addr_t *paddr = arg;
2065 KASSERT(nseg == 1, ("too many segments %d!", nseg));
2066 *paddr = segs->ds_addr;
2070 hn_create_rx_data(struct hn_softc *sc)
2072 struct sysctl_oid_list *child;
2073 struct sysctl_ctx_list *ctx;
2074 device_t dev = sc->hn_dev;
2075 #if defined(INET) || defined(INET6)
2076 #if __FreeBSD_version >= 1100095
2082 sc->hn_rx_ring_cnt = 1; /* TODO: vRSS */
2083 sc->hn_rx_ring = malloc(sizeof(struct hn_rx_ring) * sc->hn_rx_ring_cnt,
2084 M_NETVSC, M_WAITOK | M_ZERO);
2086 #if defined(INET) || defined(INET6)
2087 #if __FreeBSD_version >= 1100095
2088 lroent_cnt = hn_lro_entry_count;
2089 if (lroent_cnt < TCP_LRO_ENTRIES)
2090 lroent_cnt = TCP_LRO_ENTRIES;
2091 device_printf(dev, "LRO: entry count %d\n", lroent_cnt);
2093 #endif /* INET || INET6 */
2095 for (i = 0; i < sc->hn_rx_ring_cnt; ++i) {
2096 struct hn_rx_ring *rxr = &sc->hn_rx_ring[i];
2098 if (hn_trust_hosttcp)
2099 rxr->hn_trust_hcsum |= HN_TRUST_HCSUM_TCP;
2100 if (hn_trust_hostudp)
2101 rxr->hn_trust_hcsum |= HN_TRUST_HCSUM_UDP;
2102 if (hn_trust_hostip)
2103 rxr->hn_trust_hcsum |= HN_TRUST_HCSUM_IP;
2104 rxr->hn_ifp = sc->hn_ifp;
2110 #if defined(INET) || defined(INET6)
2111 #if __FreeBSD_version >= 1100095
2112 tcp_lro_init_args(&rxr->hn_lro, sc->hn_ifp, lroent_cnt, 0);
2114 tcp_lro_init(&rxr->hn_lro);
2115 rxr->hn_lro.ifp = sc->hn_ifp;
2117 #if __FreeBSD_version >= 1100099
2118 rxr->hn_lro.lro_length_lim = HN_LRO_LENLIM_DEF;
2119 rxr->hn_lro.lro_ackcnt_lim = HN_LRO_ACKCNT_DEF;
2121 #endif /* INET || INET6 */
2124 ctx = device_get_sysctl_ctx(dev);
2125 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2127 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_queued",
2128 CTLTYPE_U64 | CTLFLAG_RW, sc,
2129 __offsetof(struct hn_rx_ring, hn_lro.lro_queued),
2130 #if __FreeBSD_version < 1100095
2131 hn_rx_stat_int_sysctl,
2133 hn_rx_stat_u64_sysctl,
2135 "LU", "LRO queued");
2136 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_flushed",
2137 CTLTYPE_U64 | CTLFLAG_RW, sc,
2138 __offsetof(struct hn_rx_ring, hn_lro.lro_flushed),
2139 #if __FreeBSD_version < 1100095
2140 hn_rx_stat_int_sysctl,
2142 hn_rx_stat_u64_sysctl,
2144 "LU", "LRO flushed");
2145 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_tried",
2146 CTLTYPE_ULONG | CTLFLAG_RW, sc,
2147 __offsetof(struct hn_rx_ring, hn_lro_tried),
2148 hn_rx_stat_ulong_sysctl, "LU", "# of LRO tries");
2149 #if __FreeBSD_version >= 1100099
2150 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_length_lim",
2151 CTLTYPE_UINT | CTLFLAG_RW, sc, 0, hn_lro_lenlim_sysctl, "IU",
2152 "Max # of data bytes to be aggregated by LRO");
2153 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_ackcnt_lim",
2154 CTLTYPE_INT | CTLFLAG_RW, sc, 0, hn_lro_ackcnt_sysctl, "I",
2155 "Max # of ACKs to be aggregated by LRO");
2157 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hosttcp",
2158 CTLTYPE_INT | CTLFLAG_RW, sc, HN_TRUST_HCSUM_TCP,
2159 hn_trust_hcsum_sysctl, "I",
2160 "Trust tcp segement verification on host side, "
2161 "when csum info is missing");
2162 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hostudp",
2163 CTLTYPE_INT | CTLFLAG_RW, sc, HN_TRUST_HCSUM_UDP,
2164 hn_trust_hcsum_sysctl, "I",
2165 "Trust udp datagram verification on host side, "
2166 "when csum info is missing");
2167 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hostip",
2168 CTLTYPE_INT | CTLFLAG_RW, sc, HN_TRUST_HCSUM_IP,
2169 hn_trust_hcsum_sysctl, "I",
2170 "Trust ip packet verification on host side, "
2171 "when csum info is missing");
2172 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_ip",
2173 CTLTYPE_ULONG | CTLFLAG_RW, sc,
2174 __offsetof(struct hn_rx_ring, hn_csum_ip),
2175 hn_rx_stat_ulong_sysctl, "LU", "RXCSUM IP");
2176 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_tcp",
2177 CTLTYPE_ULONG | CTLFLAG_RW, sc,
2178 __offsetof(struct hn_rx_ring, hn_csum_tcp),
2179 hn_rx_stat_ulong_sysctl, "LU", "RXCSUM TCP");
2180 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_udp",
2181 CTLTYPE_ULONG | CTLFLAG_RW, sc,
2182 __offsetof(struct hn_rx_ring, hn_csum_udp),
2183 hn_rx_stat_ulong_sysctl, "LU", "RXCSUM UDP");
2184 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_trusted",
2185 CTLTYPE_ULONG | CTLFLAG_RW, sc,
2186 __offsetof(struct hn_rx_ring, hn_csum_trusted),
2187 hn_rx_stat_ulong_sysctl, "LU",
2188 "# of packets that we trust host's csum verification");
2189 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "small_pkts",
2190 CTLTYPE_ULONG | CTLFLAG_RW, sc,
2191 __offsetof(struct hn_rx_ring, hn_small_pkts),
2192 hn_rx_stat_ulong_sysctl, "LU", "# of small packets received");
2196 hn_destroy_rx_data(struct hn_softc *sc)
2198 #if defined(INET) || defined(INET6)
2202 if (sc->hn_rx_ring_cnt == 0)
2205 #if defined(INET) || defined(INET6)
2206 for (i = 0; i < sc->hn_rx_ring_cnt; ++i)
2207 tcp_lro_free(&sc->hn_rx_ring[i].hn_lro);
2209 free(sc->hn_rx_ring, M_NETVSC);
2210 sc->hn_rx_ring = NULL;
2212 sc->hn_rx_ring_cnt = 0;
2216 hn_create_tx_ring(struct hn_softc *sc, int id)
2218 struct hn_tx_ring *txr = &sc->hn_tx_ring[id];
2219 bus_dma_tag_t parent_dtag;
2223 txr->hn_tx_idx = id;
2225 #ifndef HN_USE_TXDESC_BUFRING
2226 mtx_init(&txr->hn_txlist_spin, "hn txlist", NULL, MTX_SPIN);
2228 mtx_init(&txr->hn_tx_lock, "hn tx", NULL, MTX_DEF);
2230 txr->hn_txdesc_cnt = HN_TX_DESC_CNT;
2231 txr->hn_txdesc = malloc(sizeof(struct hn_txdesc) * txr->hn_txdesc_cnt,
2232 M_NETVSC, M_WAITOK | M_ZERO);
2233 #ifndef HN_USE_TXDESC_BUFRING
2234 SLIST_INIT(&txr->hn_txlist);
2236 txr->hn_txdesc_br = buf_ring_alloc(txr->hn_txdesc_cnt, M_NETVSC,
2237 M_WAITOK, &txr->hn_tx_lock);
2240 txr->hn_tx_taskq = sc->hn_tx_taskq;
2242 if (hn_use_if_start) {
2243 txr->hn_txeof = hn_start_txeof;
2244 TASK_INIT(&txr->hn_tx_task, 0, hn_start_taskfunc, txr);
2245 TASK_INIT(&txr->hn_txeof_task, 0, hn_start_txeof_taskfunc, txr);
2247 txr->hn_txeof = hn_xmit_txeof;
2248 TASK_INIT(&txr->hn_tx_task, 0, hn_xmit_taskfunc, txr);
2249 TASK_INIT(&txr->hn_txeof_task, 0, hn_xmit_txeof_taskfunc, txr);
2250 txr->hn_mbuf_br = buf_ring_alloc(txr->hn_txdesc_cnt, M_NETVSC,
2251 M_WAITOK, &txr->hn_tx_lock);
2254 txr->hn_direct_tx_size = hn_direct_tx_size;
2255 if (hv_vmbus_protocal_version >= HV_VMBUS_VERSION_WIN8_1)
2256 txr->hn_csum_assist = HN_CSUM_ASSIST;
2258 txr->hn_csum_assist = HN_CSUM_ASSIST_WIN8;
2261 * Always schedule transmission instead of trying to do direct
2262 * transmission. This one gives the best performance so far.
2264 txr->hn_sched_tx = 1;
2266 parent_dtag = bus_get_dma_tag(sc->hn_dev);
2268 /* DMA tag for RNDIS messages. */
2269 error = bus_dma_tag_create(parent_dtag, /* parent */
2270 HN_RNDIS_MSG_ALIGN, /* alignment */
2271 HN_RNDIS_MSG_BOUNDARY, /* boundary */
2272 BUS_SPACE_MAXADDR, /* lowaddr */
2273 BUS_SPACE_MAXADDR, /* highaddr */
2274 NULL, NULL, /* filter, filterarg */
2275 HN_RNDIS_MSG_LEN, /* maxsize */
2277 HN_RNDIS_MSG_LEN, /* maxsegsize */
2279 NULL, /* lockfunc */
2280 NULL, /* lockfuncarg */
2281 &txr->hn_tx_rndis_dtag);
2283 device_printf(sc->hn_dev, "failed to create rndis dmatag\n");
2287 /* DMA tag for data. */
2288 error = bus_dma_tag_create(parent_dtag, /* parent */
2290 HN_TX_DATA_BOUNDARY, /* boundary */
2291 BUS_SPACE_MAXADDR, /* lowaddr */
2292 BUS_SPACE_MAXADDR, /* highaddr */
2293 NULL, NULL, /* filter, filterarg */
2294 HN_TX_DATA_MAXSIZE, /* maxsize */
2295 HN_TX_DATA_SEGCNT_MAX, /* nsegments */
2296 HN_TX_DATA_SEGSIZE, /* maxsegsize */
2298 NULL, /* lockfunc */
2299 NULL, /* lockfuncarg */
2300 &txr->hn_tx_data_dtag);
2302 device_printf(sc->hn_dev, "failed to create data dmatag\n");
2306 for (i = 0; i < txr->hn_txdesc_cnt; ++i) {
2307 struct hn_txdesc *txd = &txr->hn_txdesc[i];
2312 * Allocate and load RNDIS messages.
2314 error = bus_dmamem_alloc(txr->hn_tx_rndis_dtag,
2315 (void **)&txd->rndis_msg,
2316 BUS_DMA_WAITOK | BUS_DMA_COHERENT,
2317 &txd->rndis_msg_dmap);
2319 device_printf(sc->hn_dev,
2320 "failed to allocate rndis_msg, %d\n", i);
2324 error = bus_dmamap_load(txr->hn_tx_rndis_dtag,
2325 txd->rndis_msg_dmap,
2326 txd->rndis_msg, HN_RNDIS_MSG_LEN,
2327 hn_dma_map_paddr, &txd->rndis_msg_paddr,
2330 device_printf(sc->hn_dev,
2331 "failed to load rndis_msg, %d\n", i);
2332 bus_dmamem_free(txr->hn_tx_rndis_dtag,
2333 txd->rndis_msg, txd->rndis_msg_dmap);
2337 /* DMA map for TX data. */
2338 error = bus_dmamap_create(txr->hn_tx_data_dtag, 0,
2341 device_printf(sc->hn_dev,
2342 "failed to allocate tx data dmamap\n");
2343 bus_dmamap_unload(txr->hn_tx_rndis_dtag,
2344 txd->rndis_msg_dmap);
2345 bus_dmamem_free(txr->hn_tx_rndis_dtag,
2346 txd->rndis_msg, txd->rndis_msg_dmap);
2350 /* All set, put it to list */
2351 txd->flags |= HN_TXD_FLAG_ONLIST;
2352 #ifndef HN_USE_TXDESC_BUFRING
2353 SLIST_INSERT_HEAD(&txr->hn_txlist, txd, link);
2355 buf_ring_enqueue(txr->hn_txdesc_br, txd);
2358 txr->hn_txdesc_avail = txr->hn_txdesc_cnt;
2360 if (sc->hn_tx_sysctl_tree != NULL) {
2361 struct sysctl_oid_list *child;
2362 struct sysctl_ctx_list *ctx;
2366 * Create per TX ring sysctl tree:
2367 * dev.hn.UNIT.tx.RINGID
2369 ctx = device_get_sysctl_ctx(sc->hn_dev);
2370 child = SYSCTL_CHILDREN(sc->hn_tx_sysctl_tree);
2372 snprintf(name, sizeof(name), "%d", id);
2373 txr->hn_tx_sysctl_tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO,
2374 name, CTLFLAG_RD, 0, "");
2376 if (txr->hn_tx_sysctl_tree != NULL) {
2377 child = SYSCTL_CHILDREN(txr->hn_tx_sysctl_tree);
2379 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "txdesc_avail",
2380 CTLFLAG_RD, &txr->hn_txdesc_avail, 0,
2381 "# of available TX descs");
2382 if (!hn_use_if_start) {
2383 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "oactive",
2384 CTLFLAG_RD, &txr->hn_oactive, 0,
2394 hn_txdesc_dmamap_destroy(struct hn_txdesc *txd)
2396 struct hn_tx_ring *txr = txd->txr;
2398 KASSERT(txd->m == NULL, ("still has mbuf installed"));
2399 KASSERT((txd->flags & HN_TXD_FLAG_DMAMAP) == 0, ("still dma mapped"));
2401 bus_dmamap_unload(txr->hn_tx_rndis_dtag, txd->rndis_msg_dmap);
2402 bus_dmamem_free(txr->hn_tx_rndis_dtag, txd->rndis_msg,
2403 txd->rndis_msg_dmap);
2404 bus_dmamap_destroy(txr->hn_tx_data_dtag, txd->data_dmap);
2408 hn_destroy_tx_ring(struct hn_tx_ring *txr)
2410 struct hn_txdesc *txd;
2412 if (txr->hn_txdesc == NULL)
2415 #ifndef HN_USE_TXDESC_BUFRING
2416 while ((txd = SLIST_FIRST(&txr->hn_txlist)) != NULL) {
2417 SLIST_REMOVE_HEAD(&txr->hn_txlist, link);
2418 hn_txdesc_dmamap_destroy(txd);
2421 mtx_lock(&txr->hn_tx_lock);
2422 while ((txd = buf_ring_dequeue_sc(txr->hn_txdesc_br)) != NULL)
2423 hn_txdesc_dmamap_destroy(txd);
2424 mtx_unlock(&txr->hn_tx_lock);
2427 if (txr->hn_tx_data_dtag != NULL)
2428 bus_dma_tag_destroy(txr->hn_tx_data_dtag);
2429 if (txr->hn_tx_rndis_dtag != NULL)
2430 bus_dma_tag_destroy(txr->hn_tx_rndis_dtag);
2432 #ifdef HN_USE_TXDESC_BUFRING
2433 buf_ring_free(txr->hn_txdesc_br, M_NETVSC);
2436 free(txr->hn_txdesc, M_NETVSC);
2437 txr->hn_txdesc = NULL;
2439 if (txr->hn_mbuf_br != NULL)
2440 buf_ring_free(txr->hn_mbuf_br, M_NETVSC);
2442 #ifndef HN_USE_TXDESC_BUFRING
2443 mtx_destroy(&txr->hn_txlist_spin);
2445 mtx_destroy(&txr->hn_tx_lock);
2449 hn_create_tx_data(struct hn_softc *sc)
2451 struct sysctl_oid_list *child;
2452 struct sysctl_ctx_list *ctx;
2455 if (hn_use_if_start) {
2456 /* ifnet.if_start only needs one TX ring */
2457 sc->hn_tx_ring_cnt = 1;
2459 sc->hn_tx_ring_cnt = 1; /* TODO: vRSS */
2461 sc->hn_tx_ring = malloc(sizeof(struct hn_tx_ring) * sc->hn_tx_ring_cnt,
2462 M_NETVSC, M_WAITOK | M_ZERO);
2464 ctx = device_get_sysctl_ctx(sc->hn_dev);
2465 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->hn_dev));
2467 /* Create dev.hn.UNIT.tx sysctl tree */
2468 sc->hn_tx_sysctl_tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "tx",
2471 for (i = 0; i < sc->hn_tx_ring_cnt; ++i) {
2474 error = hn_create_tx_ring(sc, i);
2479 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "no_txdescs",
2480 CTLTYPE_ULONG | CTLFLAG_RW, sc,
2481 __offsetof(struct hn_tx_ring, hn_no_txdescs),
2482 hn_tx_stat_ulong_sysctl, "LU", "# of times short of TX descs");
2483 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "send_failed",
2484 CTLTYPE_ULONG | CTLFLAG_RW, sc,
2485 __offsetof(struct hn_tx_ring, hn_send_failed),
2486 hn_tx_stat_ulong_sysctl, "LU", "# of hyper-v sending failure");
2487 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txdma_failed",
2488 CTLTYPE_ULONG | CTLFLAG_RW, sc,
2489 __offsetof(struct hn_tx_ring, hn_txdma_failed),
2490 hn_tx_stat_ulong_sysctl, "LU", "# of TX DMA failure");
2491 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_collapsed",
2492 CTLTYPE_ULONG | CTLFLAG_RW, sc,
2493 __offsetof(struct hn_tx_ring, hn_tx_collapsed),
2494 hn_tx_stat_ulong_sysctl, "LU", "# of TX mbuf collapsed");
2495 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_chimney",
2496 CTLTYPE_ULONG | CTLFLAG_RW, sc,
2497 __offsetof(struct hn_tx_ring, hn_tx_chimney),
2498 hn_tx_stat_ulong_sysctl, "LU", "# of chimney send");
2499 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "txdesc_cnt",
2500 CTLFLAG_RD, &sc->hn_tx_ring[0].hn_txdesc_cnt, 0,
2501 "# of total TX descs");
2502 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_chimney_max",
2503 CTLFLAG_RD, &sc->hn_tx_chimney_max, 0,
2504 "Chimney send packet size upper boundary");
2505 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_chimney_size",
2506 CTLTYPE_INT | CTLFLAG_RW, sc, 0, hn_tx_chimney_size_sysctl,
2507 "I", "Chimney send packet size limit");
2508 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "direct_tx_size",
2509 CTLTYPE_INT | CTLFLAG_RW, sc,
2510 __offsetof(struct hn_tx_ring, hn_direct_tx_size),
2511 hn_tx_conf_int_sysctl, "I",
2512 "Size of the packet for direct transmission");
2513 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "sched_tx",
2514 CTLTYPE_INT | CTLFLAG_RW, sc,
2515 __offsetof(struct hn_tx_ring, hn_sched_tx),
2516 hn_tx_conf_int_sysctl, "I",
2517 "Always schedule transmission "
2518 "instead of doing direct transmission");
2524 hn_set_tx_chimney_size(struct hn_softc *sc, int chimney_size)
2529 for (i = 0; i < sc->hn_tx_ring_cnt; ++i)
2530 sc->hn_tx_ring[i].hn_tx_chimney_size = chimney_size;
2535 hn_destroy_tx_data(struct hn_softc *sc)
2539 if (sc->hn_tx_ring_cnt == 0)
2542 for (i = 0; i < sc->hn_tx_ring_cnt; ++i)
2543 hn_destroy_tx_ring(&sc->hn_tx_ring[i]);
2545 free(sc->hn_tx_ring, M_NETVSC);
2546 sc->hn_tx_ring = NULL;
2548 sc->hn_tx_ring_cnt = 0;
2552 hn_start_taskfunc(void *xtxr, int pending __unused)
2554 struct hn_tx_ring *txr = xtxr;
2556 mtx_lock(&txr->hn_tx_lock);
2557 hn_start_locked(txr, 0);
2558 mtx_unlock(&txr->hn_tx_lock);
2562 hn_start_txeof_taskfunc(void *xtxr, int pending __unused)
2564 struct hn_tx_ring *txr = xtxr;
2566 mtx_lock(&txr->hn_tx_lock);
2567 atomic_clear_int(&txr->hn_sc->hn_ifp->if_drv_flags, IFF_DRV_OACTIVE);
2568 hn_start_locked(txr, 0);
2569 mtx_unlock(&txr->hn_tx_lock);
2573 hn_stop_tx_tasks(struct hn_softc *sc)
2577 for (i = 0; i < sc->hn_tx_ring_cnt; ++i) {
2578 struct hn_tx_ring *txr = &sc->hn_tx_ring[i];
2580 taskqueue_drain(txr->hn_tx_taskq, &txr->hn_tx_task);
2581 taskqueue_drain(txr->hn_tx_taskq, &txr->hn_txeof_task);
2586 hn_xmit(struct hn_tx_ring *txr, int len)
2588 struct hn_softc *sc = txr->hn_sc;
2589 struct ifnet *ifp = sc->hn_ifp;
2590 struct mbuf *m_head;
2592 mtx_assert(&txr->hn_tx_lock, MA_OWNED);
2593 KASSERT(hn_use_if_start == 0,
2594 ("hn_xmit is called, when if_start is enabled"));
2596 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || txr->hn_oactive)
2599 while ((m_head = drbr_peek(ifp, txr->hn_mbuf_br)) != NULL) {
2600 struct hn_txdesc *txd;
2603 if (len > 0 && m_head->m_pkthdr.len > len) {
2605 * This sending could be time consuming; let callers
2606 * dispatch this packet sending (and sending of any
2607 * following up packets) to tx taskqueue.
2609 drbr_putback(ifp, txr->hn_mbuf_br, m_head);
2613 txd = hn_txdesc_get(txr);
2615 txr->hn_no_txdescs++;
2616 drbr_putback(ifp, txr->hn_mbuf_br, m_head);
2617 txr->hn_oactive = 1;
2621 error = hn_encap(txr, txd, &m_head);
2623 /* Both txd and m_head are freed; discard */
2624 drbr_advance(ifp, txr->hn_mbuf_br);
2628 error = hn_send_pkt(ifp, txr, txd);
2629 if (__predict_false(error)) {
2630 /* txd is freed, but m_head is not */
2631 drbr_putback(ifp, txr->hn_mbuf_br, m_head);
2632 txr->hn_oactive = 1;
2637 drbr_advance(ifp, txr->hn_mbuf_br);
2643 hn_transmit(struct ifnet *ifp, struct mbuf *m)
2645 struct hn_softc *sc = ifp->if_softc;
2646 struct hn_tx_ring *txr;
2650 * Select the TX ring based on flowid
2652 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
2653 idx = m->m_pkthdr.flowid % sc->hn_tx_ring_cnt;
2654 txr = &sc->hn_tx_ring[idx];
2656 error = drbr_enqueue(ifp, txr->hn_mbuf_br, m);
2660 if (txr->hn_oactive)
2663 if (txr->hn_sched_tx)
2666 if (mtx_trylock(&txr->hn_tx_lock)) {
2669 sched = hn_xmit(txr, txr->hn_direct_tx_size);
2670 mtx_unlock(&txr->hn_tx_lock);
2675 taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_tx_task);
2680 hn_xmit_qflush(struct ifnet *ifp)
2682 struct hn_softc *sc = ifp->if_softc;
2685 for (i = 0; i < sc->hn_tx_ring_cnt; ++i) {
2686 struct hn_tx_ring *txr = &sc->hn_tx_ring[i];
2689 mtx_lock(&txr->hn_tx_lock);
2690 while ((m = buf_ring_dequeue_sc(txr->hn_mbuf_br)) != NULL)
2692 mtx_unlock(&txr->hn_tx_lock);
2698 hn_xmit_txeof(struct hn_tx_ring *txr)
2701 if (txr->hn_sched_tx)
2704 if (mtx_trylock(&txr->hn_tx_lock)) {
2707 txr->hn_oactive = 0;
2708 sched = hn_xmit(txr, txr->hn_direct_tx_size);
2709 mtx_unlock(&txr->hn_tx_lock);
2711 taskqueue_enqueue(txr->hn_tx_taskq,
2717 * Release the oactive earlier, with the hope, that
2718 * others could catch up. The task will clear the
2719 * oactive again with the hn_tx_lock to avoid possible
2722 txr->hn_oactive = 0;
2723 taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_txeof_task);
2728 hn_xmit_taskfunc(void *xtxr, int pending __unused)
2730 struct hn_tx_ring *txr = xtxr;
2732 mtx_lock(&txr->hn_tx_lock);
2734 mtx_unlock(&txr->hn_tx_lock);
2738 hn_xmit_txeof_taskfunc(void *xtxr, int pending __unused)
2740 struct hn_tx_ring *txr = xtxr;
2742 mtx_lock(&txr->hn_tx_lock);
2743 txr->hn_oactive = 0;
2745 mtx_unlock(&txr->hn_tx_lock);
2749 hn_tx_taskq_create(void *arg __unused)
2751 if (!hn_share_tx_taskq)
2754 hn_tx_taskq = taskqueue_create("hn_tx", M_WAITOK,
2755 taskqueue_thread_enqueue, &hn_tx_taskq);
2756 taskqueue_start_threads(&hn_tx_taskq, 1, PI_NET, "hn tx");
2757 if (hn_bind_tx_taskq >= 0) {
2758 int cpu = hn_bind_tx_taskq;
2759 struct task cpuset_task;
2762 if (cpu > mp_ncpus - 1)
2764 CPU_SETOF(cpu, &cpu_set);
2765 TASK_INIT(&cpuset_task, 0, hn_cpuset_setthread_task, &cpu_set);
2766 taskqueue_enqueue(hn_tx_taskq, &cpuset_task);
2767 taskqueue_drain(hn_tx_taskq, &cpuset_task);
2770 SYSINIT(hn_txtq_create, SI_SUB_DRIVERS, SI_ORDER_FIRST,
2771 hn_tx_taskq_create, NULL);
2774 hn_tx_taskq_destroy(void *arg __unused)
2776 if (hn_tx_taskq != NULL)
2777 taskqueue_free(hn_tx_taskq);
2779 SYSUNINIT(hn_txtq_destroy, SI_SUB_DRIVERS, SI_ORDER_FIRST,
2780 hn_tx_taskq_destroy, NULL);
2782 static device_method_t netvsc_methods[] = {
2783 /* Device interface */
2784 DEVMETHOD(device_probe, netvsc_probe),
2785 DEVMETHOD(device_attach, netvsc_attach),
2786 DEVMETHOD(device_detach, netvsc_detach),
2787 DEVMETHOD(device_shutdown, netvsc_shutdown),
2792 static driver_t netvsc_driver = {
2798 static devclass_t netvsc_devclass;
2800 DRIVER_MODULE(hn, vmbus, netvsc_driver, netvsc_devclass, 0, 0);
2801 MODULE_VERSION(hn, 1);
2802 MODULE_DEPEND(hn, vmbus, 1, 1, 1);