2 * Copyright (c) 2010-2012 Citrix Inc.
3 * Copyright (c) 2009-2012,2016 Microsoft Corp.
4 * Copyright (c) 2012 NetApp Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 * Copyright (c) 2004-2006 Kip Macy
31 * All rights reserved.
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 #include <sys/cdefs.h>
56 __FBSDID("$FreeBSD$");
58 #include "opt_inet6.h"
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/sockio.h>
65 #include <sys/malloc.h>
66 #include <sys/module.h>
67 #include <sys/kernel.h>
68 #include <sys/socket.h>
70 #include <sys/queue.h>
73 #include <sys/sysctl.h>
74 #include <sys/buf_ring.h>
77 #include <net/if_arp.h>
78 #include <net/ethernet.h>
79 #include <net/if_dl.h>
80 #include <net/if_media.h>
84 #include <net/if_types.h>
85 #include <net/if_vlan_var.h>
88 #include <netinet/in_systm.h>
89 #include <netinet/in.h>
90 #include <netinet/ip.h>
91 #include <netinet/if_ether.h>
92 #include <netinet/tcp.h>
93 #include <netinet/udp.h>
94 #include <netinet/ip6.h>
97 #include <vm/vm_param.h>
98 #include <vm/vm_kern.h>
101 #include <machine/bus.h>
102 #include <machine/resource.h>
103 #include <machine/frame.h>
104 #include <machine/vmparam.h>
107 #include <sys/rman.h>
108 #include <sys/mutex.h>
109 #include <sys/errno.h>
110 #include <sys/types.h>
111 #include <machine/atomic.h>
113 #include <machine/intr_machdep.h>
115 #include <machine/in_cksum.h>
117 #include <dev/hyperv/include/hyperv.h>
118 #include <dev/hyperv/include/hyperv_busdma.h>
120 #include "hv_net_vsc.h"
121 #include "hv_rndis.h"
122 #include "hv_rndis_filter.h"
123 #include "vmbus_if.h"
125 #define hv_chan_rxr hv_chan_priv1
126 #define hv_chan_txr hv_chan_priv2
128 /* Short for Hyper-V network interface */
129 #define NETVSC_DEVNAME "hn"
132 * It looks like offset 0 of buf is reserved to hold the softc pointer.
133 * The sc pointer evidently not needed, and is not presently populated.
134 * The packet offset is where the netvsc_packet starts in the buffer.
136 #define HV_NV_SC_PTR_OFFSET_IN_BUF 0
137 #define HV_NV_PACKET_OFFSET_IN_BUF 16
139 /* YYY should get it from the underlying channel */
140 #define HN_TX_DESC_CNT 512
142 #define HN_LROENT_CNT_DEF 128
144 #define HN_RING_CNT_DEF_MAX 8
146 #define HN_RNDIS_MSG_LEN \
147 (sizeof(rndis_msg) + \
148 RNDIS_HASHVAL_PPI_SIZE + \
149 RNDIS_VLAN_PPI_SIZE + \
150 RNDIS_TSO_PPI_SIZE + \
152 #define HN_RNDIS_MSG_BOUNDARY PAGE_SIZE
153 #define HN_RNDIS_MSG_ALIGN CACHE_LINE_SIZE
155 #define HN_TX_DATA_BOUNDARY PAGE_SIZE
156 #define HN_TX_DATA_MAXSIZE IP_MAXPACKET
157 #define HN_TX_DATA_SEGSIZE PAGE_SIZE
158 #define HN_TX_DATA_SEGCNT_MAX \
159 (NETVSC_PACKET_MAXPAGE - HV_RF_NUM_TX_RESERVED_PAGE_BUFS)
161 #define HN_DIRECT_TX_SIZE_DEF 128
163 #define HN_EARLY_TXEOF_THRESH 8
166 #ifndef HN_USE_TXDESC_BUFRING
167 SLIST_ENTRY(hn_txdesc) link;
170 struct hn_tx_ring *txr;
172 uint32_t flags; /* HN_TXD_FLAG_ */
173 netvsc_packet netvsc_pkt; /* XXX to be removed */
175 bus_dmamap_t data_dmap;
177 bus_addr_t rndis_msg_paddr;
178 rndis_msg *rndis_msg;
179 bus_dmamap_t rndis_msg_dmap;
182 #define HN_TXD_FLAG_ONLIST 0x1
183 #define HN_TXD_FLAG_DMAMAP 0x2
186 * Only enable UDP checksum offloading when it is on 2012R2 or
187 * later. UDP checksum offloading doesn't work on earlier
190 #define HN_CSUM_ASSIST_WIN8 (CSUM_IP | CSUM_TCP)
191 #define HN_CSUM_ASSIST (CSUM_IP | CSUM_UDP | CSUM_TCP)
193 #define HN_LRO_LENLIM_MULTIRX_DEF (12 * ETHERMTU)
194 #define HN_LRO_LENLIM_DEF (25 * ETHERMTU)
195 /* YYY 2*MTU is a bit rough, but should be good enough. */
196 #define HN_LRO_LENLIM_MIN(ifp) (2 * (ifp)->if_mtu)
198 #define HN_LRO_ACKCNT_DEF 1
201 * Be aware that this sleepable mutex will exhibit WITNESS errors when
202 * certain TCP and ARP code paths are taken. This appears to be a
203 * well-known condition, as all other drivers checked use a sleeping
204 * mutex to protect their transmit paths.
205 * Also Be aware that mutexes do not play well with semaphores, and there
206 * is a conflicting semaphore in a certain channel code path.
208 #define NV_LOCK_INIT(_sc, _name) \
209 mtx_init(&(_sc)->hn_lock, _name, MTX_NETWORK_LOCK, MTX_DEF)
210 #define NV_LOCK(_sc) mtx_lock(&(_sc)->hn_lock)
211 #define NV_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->hn_lock, MA_OWNED)
212 #define NV_UNLOCK(_sc) mtx_unlock(&(_sc)->hn_lock)
213 #define NV_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->hn_lock)
220 int hv_promisc_mode = 0; /* normal mode by default */
222 SYSCTL_NODE(_hw, OID_AUTO, hn, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
223 "Hyper-V network interface");
225 /* Trust tcp segements verification on host side. */
226 static int hn_trust_hosttcp = 1;
227 SYSCTL_INT(_hw_hn, OID_AUTO, trust_hosttcp, CTLFLAG_RDTUN,
228 &hn_trust_hosttcp, 0,
229 "Trust tcp segement verification on host side, "
230 "when csum info is missing (global setting)");
232 /* Trust udp datagrams verification on host side. */
233 static int hn_trust_hostudp = 1;
234 SYSCTL_INT(_hw_hn, OID_AUTO, trust_hostudp, CTLFLAG_RDTUN,
235 &hn_trust_hostudp, 0,
236 "Trust udp datagram verification on host side, "
237 "when csum info is missing (global setting)");
239 /* Trust ip packets verification on host side. */
240 static int hn_trust_hostip = 1;
241 SYSCTL_INT(_hw_hn, OID_AUTO, trust_hostip, CTLFLAG_RDTUN,
243 "Trust ip packet verification on host side, "
244 "when csum info is missing (global setting)");
246 /* Limit TSO burst size */
247 static int hn_tso_maxlen = 0;
248 SYSCTL_INT(_hw_hn, OID_AUTO, tso_maxlen, CTLFLAG_RDTUN,
249 &hn_tso_maxlen, 0, "TSO burst limit");
251 /* Limit chimney send size */
252 static int hn_tx_chimney_size = 0;
253 SYSCTL_INT(_hw_hn, OID_AUTO, tx_chimney_size, CTLFLAG_RDTUN,
254 &hn_tx_chimney_size, 0, "Chimney send packet size limit");
256 /* Limit the size of packet for direct transmission */
257 static int hn_direct_tx_size = HN_DIRECT_TX_SIZE_DEF;
258 SYSCTL_INT(_hw_hn, OID_AUTO, direct_tx_size, CTLFLAG_RDTUN,
259 &hn_direct_tx_size, 0, "Size of the packet for direct transmission");
261 #if defined(INET) || defined(INET6)
262 #if __FreeBSD_version >= 1100095
263 static int hn_lro_entry_count = HN_LROENT_CNT_DEF;
264 SYSCTL_INT(_hw_hn, OID_AUTO, lro_entry_count, CTLFLAG_RDTUN,
265 &hn_lro_entry_count, 0, "LRO entry count");
269 static int hn_share_tx_taskq = 0;
270 SYSCTL_INT(_hw_hn, OID_AUTO, share_tx_taskq, CTLFLAG_RDTUN,
271 &hn_share_tx_taskq, 0, "Enable shared TX taskqueue");
273 static struct taskqueue *hn_tx_taskq;
275 #ifndef HN_USE_TXDESC_BUFRING
276 static int hn_use_txdesc_bufring = 0;
278 static int hn_use_txdesc_bufring = 1;
280 SYSCTL_INT(_hw_hn, OID_AUTO, use_txdesc_bufring, CTLFLAG_RD,
281 &hn_use_txdesc_bufring, 0, "Use buf_ring for TX descriptors");
283 static int hn_bind_tx_taskq = -1;
284 SYSCTL_INT(_hw_hn, OID_AUTO, bind_tx_taskq, CTLFLAG_RDTUN,
285 &hn_bind_tx_taskq, 0, "Bind TX taskqueue to the specified cpu");
287 static int hn_use_if_start = 0;
288 SYSCTL_INT(_hw_hn, OID_AUTO, use_if_start, CTLFLAG_RDTUN,
289 &hn_use_if_start, 0, "Use if_start TX method");
291 static int hn_chan_cnt = 0;
292 SYSCTL_INT(_hw_hn, OID_AUTO, chan_cnt, CTLFLAG_RDTUN,
294 "# of channels to use; each channel has one RX ring and one TX ring");
296 static int hn_tx_ring_cnt = 0;
297 SYSCTL_INT(_hw_hn, OID_AUTO, tx_ring_cnt, CTLFLAG_RDTUN,
298 &hn_tx_ring_cnt, 0, "# of TX rings to use");
300 static int hn_tx_swq_depth = 0;
301 SYSCTL_INT(_hw_hn, OID_AUTO, tx_swq_depth, CTLFLAG_RDTUN,
302 &hn_tx_swq_depth, 0, "Depth of IFQ or BUFRING");
304 #if __FreeBSD_version >= 1100095
305 static u_int hn_lro_mbufq_depth = 0;
306 SYSCTL_UINT(_hw_hn, OID_AUTO, lro_mbufq_depth, CTLFLAG_RDTUN,
307 &hn_lro_mbufq_depth, 0, "Depth of LRO mbuf queue");
310 static u_int hn_cpu_index;
313 * Forward declarations
315 static void hn_stop(hn_softc_t *sc);
316 static void hn_ifinit_locked(hn_softc_t *sc);
317 static void hn_ifinit(void *xsc);
318 static int hn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data);
319 static int hn_start_locked(struct hn_tx_ring *txr, int len);
320 static void hn_start(struct ifnet *ifp);
321 static void hn_start_txeof(struct hn_tx_ring *);
322 static int hn_ifmedia_upd(struct ifnet *ifp);
323 static void hn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr);
324 #if __FreeBSD_version >= 1100099
325 static int hn_lro_lenlim_sysctl(SYSCTL_HANDLER_ARGS);
326 static int hn_lro_ackcnt_sysctl(SYSCTL_HANDLER_ARGS);
328 static int hn_trust_hcsum_sysctl(SYSCTL_HANDLER_ARGS);
329 static int hn_tx_chimney_size_sysctl(SYSCTL_HANDLER_ARGS);
330 #if __FreeBSD_version < 1100095
331 static int hn_rx_stat_int_sysctl(SYSCTL_HANDLER_ARGS);
333 static int hn_rx_stat_u64_sysctl(SYSCTL_HANDLER_ARGS);
335 static int hn_rx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS);
336 static int hn_tx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS);
337 static int hn_tx_conf_int_sysctl(SYSCTL_HANDLER_ARGS);
338 static int hn_check_iplen(const struct mbuf *, int);
339 static int hn_create_tx_ring(struct hn_softc *, int);
340 static void hn_destroy_tx_ring(struct hn_tx_ring *);
341 static int hn_create_tx_data(struct hn_softc *, int);
342 static void hn_destroy_tx_data(struct hn_softc *);
343 static void hn_start_taskfunc(void *, int);
344 static void hn_start_txeof_taskfunc(void *, int);
345 static void hn_stop_tx_tasks(struct hn_softc *);
346 static int hn_encap(struct hn_tx_ring *, struct hn_txdesc *, struct mbuf **);
347 static void hn_create_rx_data(struct hn_softc *sc, int);
348 static void hn_destroy_rx_data(struct hn_softc *sc);
349 static void hn_set_tx_chimney_size(struct hn_softc *, int);
350 static void hn_channel_attach(struct hn_softc *, struct hv_vmbus_channel *);
351 static void hn_subchan_attach(struct hn_softc *, struct hv_vmbus_channel *);
353 static int hn_transmit(struct ifnet *, struct mbuf *);
354 static void hn_xmit_qflush(struct ifnet *);
355 static int hn_xmit(struct hn_tx_ring *, int);
356 static void hn_xmit_txeof(struct hn_tx_ring *);
357 static void hn_xmit_taskfunc(void *, int);
358 static void hn_xmit_txeof_taskfunc(void *, int);
360 #if __FreeBSD_version >= 1100099
362 hn_set_lro_lenlim(struct hn_softc *sc, int lenlim)
366 for (i = 0; i < sc->hn_rx_ring_inuse; ++i)
367 sc->hn_rx_ring[i].hn_lro.lro_length_lim = lenlim;
372 hn_get_txswq_depth(const struct hn_tx_ring *txr)
375 KASSERT(txr->hn_txdesc_cnt > 0, ("tx ring is not setup yet"));
376 if (hn_tx_swq_depth < txr->hn_txdesc_cnt)
377 return txr->hn_txdesc_cnt;
378 return hn_tx_swq_depth;
382 hn_ifmedia_upd(struct ifnet *ifp __unused)
389 hn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
391 struct hn_softc *sc = ifp->if_softc;
393 ifmr->ifm_status = IFM_AVALID;
394 ifmr->ifm_active = IFM_ETHER;
396 if (!sc->hn_carrier) {
397 ifmr->ifm_active |= IFM_NONE;
400 ifmr->ifm_status |= IFM_ACTIVE;
401 ifmr->ifm_active |= IFM_10G_T | IFM_FDX;
404 /* {F8615163-DF3E-46c5-913F-F2D2F965ED0E} */
405 static const hv_guid g_net_vsc_device_type = {
406 .data = {0x63, 0x51, 0x61, 0xF8, 0x3E, 0xDF, 0xc5, 0x46,
407 0x91, 0x3F, 0xF2, 0xD2, 0xF9, 0x65, 0xED, 0x0E}
411 * Standard probe entry point.
415 netvsc_probe(device_t dev)
419 p = vmbus_get_type(dev);
420 if (!memcmp(p, &g_net_vsc_device_type.data, sizeof(hv_guid))) {
421 device_set_desc(dev, "Hyper-V Network Interface");
423 printf("Netvsc probe... DONE \n");
425 return (BUS_PROBE_DEFAULT);
432 hn_cpuset_setthread_task(void *xmask, int pending __unused)
434 cpuset_t *mask = xmask;
437 error = cpuset_setthread(curthread->td_tid, mask);
439 panic("curthread=%ju: can't pin; error=%d",
440 (uintmax_t)curthread->td_tid, error);
445 * Standard attach entry point.
447 * Called when the driver is loaded. It allocates needed resources,
448 * and initializes the "hardware" and software.
451 netvsc_attach(device_t dev)
453 struct hv_device *device_ctx = vmbus_get_devctx(dev);
454 struct hv_vmbus_channel *pri_chan;
455 netvsc_device_info device_info;
457 int unit = device_get_unit(dev);
458 struct ifnet *ifp = NULL;
459 int error, ring_cnt, tx_ring_cnt;
462 sc = device_get_softc(dev);
467 if (hn_tx_taskq == NULL) {
468 sc->hn_tx_taskq = taskqueue_create("hn_tx", M_WAITOK,
469 taskqueue_thread_enqueue, &sc->hn_tx_taskq);
470 taskqueue_start_threads(&sc->hn_tx_taskq, 1, PI_NET, "%s tx",
471 device_get_nameunit(dev));
472 if (hn_bind_tx_taskq >= 0) {
473 int cpu = hn_bind_tx_taskq;
474 struct task cpuset_task;
477 if (cpu > mp_ncpus - 1)
479 CPU_SETOF(cpu, &cpu_set);
480 TASK_INIT(&cpuset_task, 0, hn_cpuset_setthread_task,
482 taskqueue_enqueue(sc->hn_tx_taskq, &cpuset_task);
483 taskqueue_drain(sc->hn_tx_taskq, &cpuset_task);
486 sc->hn_tx_taskq = hn_tx_taskq;
488 NV_LOCK_INIT(sc, "NetVSCLock");
490 sc->hn_dev_obj = device_ctx;
492 ifp = sc->hn_ifp = sc->arpcom.ac_ifp = if_alloc(IFT_ETHER);
494 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
497 * Figure out the # of RX rings (ring_cnt) and the # of TX rings
498 * to use (tx_ring_cnt).
501 * The # of RX rings to use is same as the # of channels to use.
503 ring_cnt = hn_chan_cnt;
507 if (ring_cnt > HN_RING_CNT_DEF_MAX)
508 ring_cnt = HN_RING_CNT_DEF_MAX;
509 } else if (ring_cnt > mp_ncpus) {
513 tx_ring_cnt = hn_tx_ring_cnt;
514 if (tx_ring_cnt <= 0 || tx_ring_cnt > ring_cnt)
515 tx_ring_cnt = ring_cnt;
516 if (hn_use_if_start) {
517 /* ifnet.if_start only needs one TX ring. */
522 * Set the leader CPU for channels.
524 sc->hn_cpu = atomic_fetchadd_int(&hn_cpu_index, ring_cnt) % mp_ncpus;
526 error = hn_create_tx_data(sc, tx_ring_cnt);
529 hn_create_rx_data(sc, ring_cnt);
532 * Associate the first TX/RX ring w/ the primary channel.
534 pri_chan = device_ctx->channel;
535 KASSERT(HV_VMBUS_CHAN_ISPRIMARY(pri_chan), ("not primary channel"));
536 KASSERT(pri_chan->offer_msg.offer.sub_channel_index == 0,
537 ("primary channel subidx %u",
538 pri_chan->offer_msg.offer.sub_channel_index));
539 hn_channel_attach(sc, pri_chan);
541 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
542 ifp->if_ioctl = hn_ioctl;
543 ifp->if_init = hn_ifinit;
544 /* needed by hv_rf_on_device_add() code */
545 ifp->if_mtu = ETHERMTU;
546 if (hn_use_if_start) {
547 int qdepth = hn_get_txswq_depth(&sc->hn_tx_ring[0]);
549 ifp->if_start = hn_start;
550 IFQ_SET_MAXLEN(&ifp->if_snd, qdepth);
551 ifp->if_snd.ifq_drv_maxlen = qdepth - 1;
552 IFQ_SET_READY(&ifp->if_snd);
554 ifp->if_transmit = hn_transmit;
555 ifp->if_qflush = hn_xmit_qflush;
558 ifmedia_init(&sc->hn_media, 0, hn_ifmedia_upd, hn_ifmedia_sts);
559 ifmedia_add(&sc->hn_media, IFM_ETHER | IFM_AUTO, 0, NULL);
560 ifmedia_set(&sc->hn_media, IFM_ETHER | IFM_AUTO);
561 /* XXX ifmedia_set really should do this for us */
562 sc->hn_media.ifm_media = sc->hn_media.ifm_cur->ifm_media;
565 * Tell upper layers that we support full VLAN capability.
567 ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header);
568 ifp->if_capabilities |=
569 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_TSO |
572 IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_TSO |
574 ifp->if_hwassist = sc->hn_tx_ring[0].hn_csum_assist | CSUM_TSO;
576 error = hv_rf_on_device_add(device_ctx, &device_info, ring_cnt);
579 KASSERT(sc->net_dev->num_channel > 0 &&
580 sc->net_dev->num_channel <= sc->hn_rx_ring_inuse,
581 ("invalid channel count %u, should be less than %d",
582 sc->net_dev->num_channel, sc->hn_rx_ring_inuse));
585 * Set the # of TX/RX rings that could be used according to
586 * the # of channels that host offered.
588 if (sc->hn_tx_ring_inuse > sc->net_dev->num_channel)
589 sc->hn_tx_ring_inuse = sc->net_dev->num_channel;
590 sc->hn_rx_ring_inuse = sc->net_dev->num_channel;
591 device_printf(dev, "%d TX ring, %d RX ring\n",
592 sc->hn_tx_ring_inuse, sc->hn_rx_ring_inuse);
594 if (sc->net_dev->num_channel > 1) {
595 struct hv_vmbus_channel **subchan;
596 int subchan_cnt = sc->net_dev->num_channel - 1;
599 /* Wait for sub-channels setup to complete. */
600 subchan = vmbus_get_subchan(pri_chan, subchan_cnt);
602 /* Attach the sub-channels. */
603 for (i = 0; i < subchan_cnt; ++i) {
604 /* NOTE: Calling order is critical. */
605 hn_subchan_attach(sc, subchan[i]);
606 hv_nv_subchan_attach(subchan[i]);
609 /* Release the sub-channels */
610 vmbus_rel_subchan(subchan, subchan_cnt);
611 device_printf(dev, "%d sub-channels setup done\n", subchan_cnt);
614 #if __FreeBSD_version >= 1100099
615 if (sc->hn_rx_ring_inuse > 1) {
617 * Reduce TCP segment aggregation limit for multiple
618 * RX rings to increase ACK timeliness.
620 hn_set_lro_lenlim(sc, HN_LRO_LENLIM_MULTIRX_DEF);
624 if (device_info.link_state == 0) {
628 tso_maxlen = hn_tso_maxlen;
629 if (tso_maxlen <= 0 || tso_maxlen > IP_MAXPACKET)
630 tso_maxlen = IP_MAXPACKET;
632 ifp->if_hw_tsomaxsegcount = HN_TX_DATA_SEGCNT_MAX;
633 ifp->if_hw_tsomaxsegsize = PAGE_SIZE;
634 ifp->if_hw_tsomax = tso_maxlen -
635 (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN);
637 ether_ifattach(ifp, device_info.mac_addr);
639 if_printf(ifp, "TSO: %u/%u/%u\n", ifp->if_hw_tsomax,
640 ifp->if_hw_tsomaxsegcount, ifp->if_hw_tsomaxsegsize);
642 sc->hn_tx_chimney_max = sc->net_dev->send_section_size;
643 hn_set_tx_chimney_size(sc, sc->hn_tx_chimney_max);
644 if (hn_tx_chimney_size > 0 &&
645 hn_tx_chimney_size < sc->hn_tx_chimney_max)
646 hn_set_tx_chimney_size(sc, hn_tx_chimney_size);
650 hn_destroy_tx_data(sc);
657 * Standard detach entry point
660 netvsc_detach(device_t dev)
662 struct hn_softc *sc = device_get_softc(dev);
663 struct hv_device *hv_device = vmbus_get_devctx(dev);
666 printf("netvsc_detach\n");
669 * XXXKYS: Need to clean up all our
670 * driver state; this is the driver
675 * XXXKYS: Need to stop outgoing traffic and unregister
679 hv_rf_on_device_remove(hv_device, HV_RF_NV_DESTROY_CHANNEL);
681 hn_stop_tx_tasks(sc);
683 ifmedia_removeall(&sc->hn_media);
684 hn_destroy_rx_data(sc);
685 hn_destroy_tx_data(sc);
687 if (sc->hn_tx_taskq != hn_tx_taskq)
688 taskqueue_free(sc->hn_tx_taskq);
694 * Standard shutdown entry point
697 netvsc_shutdown(device_t dev)
703 hn_txdesc_dmamap_load(struct hn_tx_ring *txr, struct hn_txdesc *txd,
704 struct mbuf **m_head, bus_dma_segment_t *segs, int *nsegs)
706 struct mbuf *m = *m_head;
709 error = bus_dmamap_load_mbuf_sg(txr->hn_tx_data_dtag, txd->data_dmap,
710 m, segs, nsegs, BUS_DMA_NOWAIT);
711 if (error == EFBIG) {
714 m_new = m_collapse(m, M_NOWAIT, HN_TX_DATA_SEGCNT_MAX);
719 txr->hn_tx_collapsed++;
721 error = bus_dmamap_load_mbuf_sg(txr->hn_tx_data_dtag,
722 txd->data_dmap, m, segs, nsegs, BUS_DMA_NOWAIT);
725 bus_dmamap_sync(txr->hn_tx_data_dtag, txd->data_dmap,
726 BUS_DMASYNC_PREWRITE);
727 txd->flags |= HN_TXD_FLAG_DMAMAP;
733 hn_txdesc_dmamap_unload(struct hn_tx_ring *txr, struct hn_txdesc *txd)
736 if (txd->flags & HN_TXD_FLAG_DMAMAP) {
737 bus_dmamap_sync(txr->hn_tx_data_dtag,
738 txd->data_dmap, BUS_DMASYNC_POSTWRITE);
739 bus_dmamap_unload(txr->hn_tx_data_dtag,
741 txd->flags &= ~HN_TXD_FLAG_DMAMAP;
746 hn_txdesc_put(struct hn_tx_ring *txr, struct hn_txdesc *txd)
749 KASSERT((txd->flags & HN_TXD_FLAG_ONLIST) == 0,
750 ("put an onlist txd %#x", txd->flags));
752 KASSERT(txd->refs > 0, ("invalid txd refs %d", txd->refs));
753 if (atomic_fetchadd_int(&txd->refs, -1) != 1)
756 hn_txdesc_dmamap_unload(txr, txd);
757 if (txd->m != NULL) {
762 txd->flags |= HN_TXD_FLAG_ONLIST;
764 #ifndef HN_USE_TXDESC_BUFRING
765 mtx_lock_spin(&txr->hn_txlist_spin);
766 KASSERT(txr->hn_txdesc_avail >= 0 &&
767 txr->hn_txdesc_avail < txr->hn_txdesc_cnt,
768 ("txdesc_put: invalid txd avail %d", txr->hn_txdesc_avail));
769 txr->hn_txdesc_avail++;
770 SLIST_INSERT_HEAD(&txr->hn_txlist, txd, link);
771 mtx_unlock_spin(&txr->hn_txlist_spin);
773 atomic_add_int(&txr->hn_txdesc_avail, 1);
774 buf_ring_enqueue(txr->hn_txdesc_br, txd);
780 static __inline struct hn_txdesc *
781 hn_txdesc_get(struct hn_tx_ring *txr)
783 struct hn_txdesc *txd;
785 #ifndef HN_USE_TXDESC_BUFRING
786 mtx_lock_spin(&txr->hn_txlist_spin);
787 txd = SLIST_FIRST(&txr->hn_txlist);
789 KASSERT(txr->hn_txdesc_avail > 0,
790 ("txdesc_get: invalid txd avail %d", txr->hn_txdesc_avail));
791 txr->hn_txdesc_avail--;
792 SLIST_REMOVE_HEAD(&txr->hn_txlist, link);
794 mtx_unlock_spin(&txr->hn_txlist_spin);
796 txd = buf_ring_dequeue_sc(txr->hn_txdesc_br);
800 #ifdef HN_USE_TXDESC_BUFRING
801 atomic_subtract_int(&txr->hn_txdesc_avail, 1);
803 KASSERT(txd->m == NULL && txd->refs == 0 &&
804 (txd->flags & HN_TXD_FLAG_ONLIST), ("invalid txd"));
805 txd->flags &= ~HN_TXD_FLAG_ONLIST;
812 hn_txdesc_hold(struct hn_txdesc *txd)
815 /* 0->1 transition will never work */
816 KASSERT(txd->refs > 0, ("invalid refs %d", txd->refs));
817 atomic_add_int(&txd->refs, 1);
821 hn_txeof(struct hn_tx_ring *txr)
823 txr->hn_has_txeof = 0;
828 hn_tx_done(struct hv_vmbus_channel *chan, void *xpkt)
830 netvsc_packet *packet = xpkt;
831 struct hn_txdesc *txd;
832 struct hn_tx_ring *txr;
834 txd = (struct hn_txdesc *)(uintptr_t)
835 packet->compl.send.send_completion_tid;
838 KASSERT(txr->hn_chan == chan,
839 ("channel mismatch, on channel%u, should be channel%u",
840 chan->offer_msg.offer.sub_channel_index,
841 txr->hn_chan->offer_msg.offer.sub_channel_index));
843 txr->hn_has_txeof = 1;
844 hn_txdesc_put(txr, txd);
846 ++txr->hn_txdone_cnt;
847 if (txr->hn_txdone_cnt >= HN_EARLY_TXEOF_THRESH) {
848 txr->hn_txdone_cnt = 0;
855 netvsc_channel_rollup(struct hv_vmbus_channel *chan)
857 struct hn_tx_ring *txr = chan->hv_chan_txr;
858 #if defined(INET) || defined(INET6)
859 struct hn_rx_ring *rxr = chan->hv_chan_rxr;
860 struct lro_ctrl *lro = &rxr->hn_lro;
861 struct lro_entry *queued;
863 while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) {
864 SLIST_REMOVE_HEAD(&lro->lro_active, next);
865 tcp_lro_flush(lro, queued);
871 * 'txr' could be NULL, if multiple channels and
872 * ifnet.if_start method are enabled.
874 if (txr == NULL || !txr->hn_has_txeof)
877 txr->hn_txdone_cnt = 0;
883 * If this function fails, then both txd and m_head0 will be freed.
886 hn_encap(struct hn_tx_ring *txr, struct hn_txdesc *txd, struct mbuf **m_head0)
888 bus_dma_segment_t segs[HN_TX_DATA_SEGCNT_MAX];
890 struct mbuf *m_head = *m_head0;
891 netvsc_packet *packet;
892 rndis_msg *rndis_mesg;
893 rndis_packet *rndis_pkt;
894 rndis_per_packet_info *rppi;
895 struct rndis_hash_value *hash_value;
896 uint32_t rndis_msg_size;
898 packet = &txd->netvsc_pkt;
899 packet->is_data_pkt = TRUE;
900 packet->tot_data_buf_len = m_head->m_pkthdr.len;
903 * extension points to the area reserved for the
904 * rndis_filter_packet, which is placed just after
905 * the netvsc_packet (and rppi struct, if present;
906 * length is updated later).
908 rndis_mesg = txd->rndis_msg;
909 /* XXX not necessary */
910 memset(rndis_mesg, 0, HN_RNDIS_MSG_LEN);
911 rndis_mesg->ndis_msg_type = REMOTE_NDIS_PACKET_MSG;
913 rndis_pkt = &rndis_mesg->msg.packet;
914 rndis_pkt->data_offset = sizeof(rndis_packet);
915 rndis_pkt->data_length = packet->tot_data_buf_len;
916 rndis_pkt->per_pkt_info_offset = sizeof(rndis_packet);
918 rndis_msg_size = RNDIS_MESSAGE_SIZE(rndis_packet);
921 * Set the hash value for this packet, so that the host could
922 * dispatch the TX done event for this packet back to this TX
925 rndis_msg_size += RNDIS_HASHVAL_PPI_SIZE;
926 rppi = hv_set_rppi_data(rndis_mesg, RNDIS_HASHVAL_PPI_SIZE,
928 hash_value = (struct rndis_hash_value *)((uint8_t *)rppi +
929 rppi->per_packet_info_offset);
930 hash_value->hash_value = txr->hn_tx_idx;
932 if (m_head->m_flags & M_VLANTAG) {
933 ndis_8021q_info *rppi_vlan_info;
935 rndis_msg_size += RNDIS_VLAN_PPI_SIZE;
936 rppi = hv_set_rppi_data(rndis_mesg, RNDIS_VLAN_PPI_SIZE,
939 rppi_vlan_info = (ndis_8021q_info *)((uint8_t *)rppi +
940 rppi->per_packet_info_offset);
941 rppi_vlan_info->u1.s1.vlan_id =
942 m_head->m_pkthdr.ether_vtag & 0xfff;
945 if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
946 rndis_tcp_tso_info *tso_info;
947 struct ether_vlan_header *eh;
951 * XXX need m_pullup and use mtodo
953 eh = mtod(m_head, struct ether_vlan_header*);
954 if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN))
955 ether_len = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN;
957 ether_len = ETHER_HDR_LEN;
959 rndis_msg_size += RNDIS_TSO_PPI_SIZE;
960 rppi = hv_set_rppi_data(rndis_mesg, RNDIS_TSO_PPI_SIZE,
961 tcp_large_send_info);
963 tso_info = (rndis_tcp_tso_info *)((uint8_t *)rppi +
964 rppi->per_packet_info_offset);
965 tso_info->lso_v2_xmit.type =
966 RNDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
969 if (m_head->m_pkthdr.csum_flags & CSUM_IP_TSO) {
971 (struct ip *)(m_head->m_data + ether_len);
972 unsigned long iph_len = ip->ip_hl << 2;
974 (struct tcphdr *)((caddr_t)ip + iph_len);
976 tso_info->lso_v2_xmit.ip_version =
977 RNDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
981 th->th_sum = in_pseudo(ip->ip_src.s_addr,
982 ip->ip_dst.s_addr, htons(IPPROTO_TCP));
985 #if defined(INET6) && defined(INET)
990 struct ip6_hdr *ip6 = (struct ip6_hdr *)
991 (m_head->m_data + ether_len);
992 struct tcphdr *th = (struct tcphdr *)(ip6 + 1);
994 tso_info->lso_v2_xmit.ip_version =
995 RNDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
997 th->th_sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0);
1000 tso_info->lso_v2_xmit.tcp_header_offset = 0;
1001 tso_info->lso_v2_xmit.mss = m_head->m_pkthdr.tso_segsz;
1002 } else if (m_head->m_pkthdr.csum_flags & txr->hn_csum_assist) {
1003 rndis_tcp_ip_csum_info *csum_info;
1005 rndis_msg_size += RNDIS_CSUM_PPI_SIZE;
1006 rppi = hv_set_rppi_data(rndis_mesg, RNDIS_CSUM_PPI_SIZE,
1008 csum_info = (rndis_tcp_ip_csum_info *)((uint8_t *)rppi +
1009 rppi->per_packet_info_offset);
1011 csum_info->xmit.is_ipv4 = 1;
1012 if (m_head->m_pkthdr.csum_flags & CSUM_IP)
1013 csum_info->xmit.ip_header_csum = 1;
1015 if (m_head->m_pkthdr.csum_flags & CSUM_TCP) {
1016 csum_info->xmit.tcp_csum = 1;
1017 csum_info->xmit.tcp_header_offset = 0;
1018 } else if (m_head->m_pkthdr.csum_flags & CSUM_UDP) {
1019 csum_info->xmit.udp_csum = 1;
1023 rndis_mesg->msg_len = packet->tot_data_buf_len + rndis_msg_size;
1024 packet->tot_data_buf_len = rndis_mesg->msg_len;
1027 * Chimney send, if the packet could fit into one chimney buffer.
1029 if (packet->tot_data_buf_len < txr->hn_tx_chimney_size) {
1030 netvsc_dev *net_dev = txr->hn_sc->net_dev;
1031 uint32_t send_buf_section_idx;
1033 txr->hn_tx_chimney_tried++;
1034 send_buf_section_idx =
1035 hv_nv_get_next_send_section(net_dev);
1036 if (send_buf_section_idx !=
1037 NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX) {
1038 uint8_t *dest = ((uint8_t *)net_dev->send_buf +
1039 (send_buf_section_idx *
1040 net_dev->send_section_size));
1042 memcpy(dest, rndis_mesg, rndis_msg_size);
1043 dest += rndis_msg_size;
1044 m_copydata(m_head, 0, m_head->m_pkthdr.len, dest);
1046 packet->send_buf_section_idx = send_buf_section_idx;
1047 packet->send_buf_section_size =
1048 packet->tot_data_buf_len;
1049 packet->page_buf_count = 0;
1050 txr->hn_tx_chimney++;
1055 error = hn_txdesc_dmamap_load(txr, txd, &m_head, segs, &nsegs);
1060 * This mbuf is not linked w/ the txd yet, so free it now.
1065 freed = hn_txdesc_put(txr, txd);
1067 ("fail to free txd upon txdma error"));
1069 txr->hn_txdma_failed++;
1070 if_inc_counter(txr->hn_sc->hn_ifp, IFCOUNTER_OERRORS, 1);
1075 packet->page_buf_count = nsegs + HV_RF_NUM_TX_RESERVED_PAGE_BUFS;
1077 /* send packet with page buffer */
1078 packet->page_buffers[0].pfn = atop(txd->rndis_msg_paddr);
1079 packet->page_buffers[0].offset = txd->rndis_msg_paddr & PAGE_MASK;
1080 packet->page_buffers[0].length = rndis_msg_size;
1083 * Fill the page buffers with mbuf info starting at index
1084 * HV_RF_NUM_TX_RESERVED_PAGE_BUFS.
1086 for (i = 0; i < nsegs; ++i) {
1087 hv_vmbus_page_buffer *pb = &packet->page_buffers[
1088 i + HV_RF_NUM_TX_RESERVED_PAGE_BUFS];
1090 pb->pfn = atop(segs[i].ds_addr);
1091 pb->offset = segs[i].ds_addr & PAGE_MASK;
1092 pb->length = segs[i].ds_len;
1095 packet->send_buf_section_idx =
1096 NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX;
1097 packet->send_buf_section_size = 0;
1101 /* Set the completion routine */
1102 packet->compl.send.on_send_completion = hn_tx_done;
1103 packet->compl.send.send_completion_context = packet;
1104 packet->compl.send.send_completion_tid = (uint64_t)(uintptr_t)txd;
1111 * If this function fails, then txd will be freed, but the mbuf
1112 * associated w/ the txd will _not_ be freed.
1115 hn_send_pkt(struct ifnet *ifp, struct hn_tx_ring *txr, struct hn_txdesc *txd)
1117 int error, send_failed = 0;
1121 * Make sure that txd is not freed before ETHER_BPF_MTAP.
1123 hn_txdesc_hold(txd);
1124 error = hv_nv_on_send(txr->hn_chan, &txd->netvsc_pkt);
1126 ETHER_BPF_MTAP(ifp, txd->m);
1127 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
1128 if (!hn_use_if_start) {
1129 if_inc_counter(ifp, IFCOUNTER_OBYTES,
1130 txd->m->m_pkthdr.len);
1131 if (txd->m->m_flags & M_MCAST)
1132 if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1);
1136 hn_txdesc_put(txr, txd);
1138 if (__predict_false(error)) {
1142 * This should "really rarely" happen.
1144 * XXX Too many RX to be acked or too many sideband
1145 * commands to run? Ask netvsc_channel_rollup()
1146 * to kick start later.
1148 txr->hn_has_txeof = 1;
1150 txr->hn_send_failed++;
1153 * Try sending again after set hn_has_txeof;
1154 * in case that we missed the last
1155 * netvsc_channel_rollup().
1159 if_printf(ifp, "send failed\n");
1162 * Caller will perform further processing on the
1163 * associated mbuf, so don't free it in hn_txdesc_put();
1164 * only unload it from the DMA map in hn_txdesc_put(),
1168 freed = hn_txdesc_put(txr, txd);
1170 ("fail to free txd upon send error"));
1172 txr->hn_send_failed++;
1178 * Start a transmit of one or more packets
1181 hn_start_locked(struct hn_tx_ring *txr, int len)
1183 struct hn_softc *sc = txr->hn_sc;
1184 struct ifnet *ifp = sc->hn_ifp;
1186 KASSERT(hn_use_if_start,
1187 ("hn_start_locked is called, when if_start is disabled"));
1188 KASSERT(txr == &sc->hn_tx_ring[0], ("not the first TX ring"));
1189 mtx_assert(&txr->hn_tx_lock, MA_OWNED);
1191 if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) !=
1195 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
1196 struct hn_txdesc *txd;
1197 struct mbuf *m_head;
1200 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
1204 if (len > 0 && m_head->m_pkthdr.len > len) {
1206 * This sending could be time consuming; let callers
1207 * dispatch this packet sending (and sending of any
1208 * following up packets) to tx taskqueue.
1210 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1214 txd = hn_txdesc_get(txr);
1216 txr->hn_no_txdescs++;
1217 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1218 atomic_set_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
1222 error = hn_encap(txr, txd, &m_head);
1224 /* Both txd and m_head are freed */
1228 error = hn_send_pkt(ifp, txr, txd);
1229 if (__predict_false(error)) {
1230 /* txd is freed, but m_head is not */
1231 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
1232 atomic_set_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
1240 * Link up/down notification
1243 netvsc_linkstatus_callback(struct hv_device *device_obj, uint32_t status)
1245 hn_softc_t *sc = device_get_softc(device_obj->device);
1255 * Append the specified data to the indicated mbuf chain,
1256 * Extend the mbuf chain if the new data does not fit in
1259 * This is a minor rewrite of m_append() from sys/kern/uipc_mbuf.c.
1260 * There should be an equivalent in the kernel mbuf code,
1261 * but there does not appear to be one yet.
1263 * Differs from m_append() in that additional mbufs are
1264 * allocated with cluster size MJUMPAGESIZE, and filled
1267 * Return 1 if able to complete the job; otherwise 0.
1270 hv_m_append(struct mbuf *m0, int len, c_caddr_t cp)
1273 int remainder, space;
1275 for (m = m0; m->m_next != NULL; m = m->m_next)
1278 space = M_TRAILINGSPACE(m);
1281 * Copy into available space.
1283 if (space > remainder)
1285 bcopy(cp, mtod(m, caddr_t) + m->m_len, space);
1290 while (remainder > 0) {
1292 * Allocate a new mbuf; could check space
1293 * and allocate a cluster instead.
1295 n = m_getjcl(M_DONTWAIT, m->m_type, 0, MJUMPAGESIZE);
1298 n->m_len = min(MJUMPAGESIZE, remainder);
1299 bcopy(cp, mtod(n, caddr_t), n->m_len);
1301 remainder -= n->m_len;
1305 if (m0->m_flags & M_PKTHDR)
1306 m0->m_pkthdr.len += len - remainder;
1308 return (remainder == 0);
1311 #if defined(INET) || defined(INET6)
1313 hn_lro_rx(struct lro_ctrl *lc, struct mbuf *m)
1315 #if __FreeBSD_version >= 1100095
1316 if (hn_lro_mbufq_depth) {
1317 tcp_lro_queue_mbuf(lc, m);
1321 return tcp_lro_rx(lc, m, 0);
1326 * Called when we receive a data packet from the "wire" on the
1329 * Note: This is no longer used as a callback
1332 netvsc_recv(struct hv_vmbus_channel *chan, netvsc_packet *packet,
1333 const rndis_tcp_ip_csum_info *csum_info,
1334 const struct rndis_hash_info *hash_info,
1335 const struct rndis_hash_value *hash_value)
1337 struct hn_rx_ring *rxr = chan->hv_chan_rxr;
1338 struct ifnet *ifp = rxr->hn_ifp;
1340 int size, do_lro = 0, do_csum = 1;
1342 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1346 * Bail out if packet contains more data than configured MTU.
1348 if (packet->tot_data_buf_len > (ifp->if_mtu + ETHER_HDR_LEN)) {
1350 } else if (packet->tot_data_buf_len <= MHLEN) {
1351 m_new = m_gethdr(M_NOWAIT, MT_DATA);
1352 if (m_new == NULL) {
1353 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1356 memcpy(mtod(m_new, void *), packet->data,
1357 packet->tot_data_buf_len);
1358 m_new->m_pkthdr.len = m_new->m_len = packet->tot_data_buf_len;
1359 rxr->hn_small_pkts++;
1362 * Get an mbuf with a cluster. For packets 2K or less,
1363 * get a standard 2K cluster. For anything larger, get a
1364 * 4K cluster. Any buffers larger than 4K can cause problems
1365 * if looped around to the Hyper-V TX channel, so avoid them.
1368 if (packet->tot_data_buf_len > MCLBYTES) {
1370 size = MJUMPAGESIZE;
1373 m_new = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, size);
1374 if (m_new == NULL) {
1375 if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1);
1379 hv_m_append(m_new, packet->tot_data_buf_len, packet->data);
1381 m_new->m_pkthdr.rcvif = ifp;
1383 if (__predict_false((ifp->if_capenable & IFCAP_RXCSUM) == 0))
1386 /* receive side checksum offload */
1387 if (csum_info != NULL) {
1388 /* IP csum offload */
1389 if (csum_info->receive.ip_csum_succeeded && do_csum) {
1390 m_new->m_pkthdr.csum_flags |=
1391 (CSUM_IP_CHECKED | CSUM_IP_VALID);
1395 /* TCP/UDP csum offload */
1396 if ((csum_info->receive.tcp_csum_succeeded ||
1397 csum_info->receive.udp_csum_succeeded) && do_csum) {
1398 m_new->m_pkthdr.csum_flags |=
1399 (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1400 m_new->m_pkthdr.csum_data = 0xffff;
1401 if (csum_info->receive.tcp_csum_succeeded)
1407 if (csum_info->receive.ip_csum_succeeded &&
1408 csum_info->receive.tcp_csum_succeeded)
1411 const struct ether_header *eh;
1416 if (m_new->m_len < hoff)
1418 eh = mtod(m_new, struct ether_header *);
1419 etype = ntohs(eh->ether_type);
1420 if (etype == ETHERTYPE_VLAN) {
1421 const struct ether_vlan_header *evl;
1423 hoff = sizeof(*evl);
1424 if (m_new->m_len < hoff)
1426 evl = mtod(m_new, struct ether_vlan_header *);
1427 etype = ntohs(evl->evl_proto);
1430 if (etype == ETHERTYPE_IP) {
1433 pr = hn_check_iplen(m_new, hoff);
1434 if (pr == IPPROTO_TCP) {
1436 (rxr->hn_trust_hcsum &
1437 HN_TRUST_HCSUM_TCP)) {
1438 rxr->hn_csum_trusted++;
1439 m_new->m_pkthdr.csum_flags |=
1440 (CSUM_IP_CHECKED | CSUM_IP_VALID |
1441 CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1442 m_new->m_pkthdr.csum_data = 0xffff;
1445 } else if (pr == IPPROTO_UDP) {
1447 (rxr->hn_trust_hcsum &
1448 HN_TRUST_HCSUM_UDP)) {
1449 rxr->hn_csum_trusted++;
1450 m_new->m_pkthdr.csum_flags |=
1451 (CSUM_IP_CHECKED | CSUM_IP_VALID |
1452 CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
1453 m_new->m_pkthdr.csum_data = 0xffff;
1455 } else if (pr != IPPROTO_DONE && do_csum &&
1456 (rxr->hn_trust_hcsum & HN_TRUST_HCSUM_IP)) {
1457 rxr->hn_csum_trusted++;
1458 m_new->m_pkthdr.csum_flags |=
1459 (CSUM_IP_CHECKED | CSUM_IP_VALID);
1464 if ((packet->vlan_tci != 0) &&
1465 (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0) {
1466 m_new->m_pkthdr.ether_vtag = packet->vlan_tci;
1467 m_new->m_flags |= M_VLANTAG;
1470 if (hash_info != NULL && hash_value != NULL) {
1471 int hash_type = M_HASHTYPE_OPAQUE;
1474 m_new->m_pkthdr.flowid = hash_value->hash_value;
1475 if ((hash_info->hash_info & NDIS_HASH_FUNCTION_MASK) ==
1476 NDIS_HASH_FUNCTION_TOEPLITZ) {
1478 (hash_info->hash_info & NDIS_HASH_TYPE_MASK);
1481 case NDIS_HASH_IPV4:
1482 hash_type = M_HASHTYPE_RSS_IPV4;
1485 case NDIS_HASH_TCP_IPV4:
1486 hash_type = M_HASHTYPE_RSS_TCP_IPV4;
1489 case NDIS_HASH_IPV6:
1490 hash_type = M_HASHTYPE_RSS_IPV6;
1493 case NDIS_HASH_IPV6_EX:
1494 hash_type = M_HASHTYPE_RSS_IPV6_EX;
1497 case NDIS_HASH_TCP_IPV6:
1498 hash_type = M_HASHTYPE_RSS_TCP_IPV6;
1501 case NDIS_HASH_TCP_IPV6_EX:
1502 hash_type = M_HASHTYPE_RSS_TCP_IPV6_EX;
1506 M_HASHTYPE_SET(m_new, hash_type);
1508 if (hash_value != NULL)
1509 m_new->m_pkthdr.flowid = hash_value->hash_value;
1511 m_new->m_pkthdr.flowid = rxr->hn_rx_idx;
1512 M_HASHTYPE_SET(m_new, M_HASHTYPE_OPAQUE);
1516 * Note: Moved RX completion back to hv_nv_on_receive() so all
1517 * messages (not just data messages) will trigger a response.
1523 if ((ifp->if_capenable & IFCAP_LRO) && do_lro) {
1524 #if defined(INET) || defined(INET6)
1525 struct lro_ctrl *lro = &rxr->hn_lro;
1528 rxr->hn_lro_tried++;
1529 if (hn_lro_rx(lro, m_new) == 0) {
1537 /* We're not holding the lock here, so don't release it */
1538 (*ifp->if_input)(ifp, m_new);
1544 * Rules for using sc->temp_unusable:
1545 * 1. sc->temp_unusable can only be read or written while holding NV_LOCK()
1546 * 2. code reading sc->temp_unusable under NV_LOCK(), and finding
1547 * sc->temp_unusable set, must release NV_LOCK() and exit
1548 * 3. to retain exclusive control of the interface,
1549 * sc->temp_unusable must be set by code before releasing NV_LOCK()
1550 * 4. only code setting sc->temp_unusable can clear sc->temp_unusable
1551 * 5. code setting sc->temp_unusable must eventually clear sc->temp_unusable
1555 * Standard ioctl entry point. Called when the user wants to configure
1559 hn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1561 hn_softc_t *sc = ifp->if_softc;
1562 struct ifreq *ifr = (struct ifreq *)data;
1564 struct ifaddr *ifa = (struct ifaddr *)data;
1566 netvsc_device_info device_info;
1567 struct hv_device *hn_dev;
1568 int mask, error = 0;
1569 int retry_cnt = 500;
1575 if (ifa->ifa_addr->sa_family == AF_INET) {
1576 ifp->if_flags |= IFF_UP;
1577 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
1579 arp_ifinit(ifp, ifa);
1582 error = ether_ioctl(ifp, cmd, data);
1585 hn_dev = vmbus_get_devctx(sc->hn_dev);
1587 /* Check MTU value change */
1588 if (ifp->if_mtu == ifr->ifr_mtu)
1591 if (ifr->ifr_mtu > NETVSC_MAX_CONFIGURABLE_MTU) {
1596 /* Obtain and record requested MTU */
1597 ifp->if_mtu = ifr->ifr_mtu;
1599 #if __FreeBSD_version >= 1100099
1601 * Make sure that LRO aggregation length limit is still
1602 * valid, after the MTU change.
1605 if (sc->hn_rx_ring[0].hn_lro.lro_length_lim <
1606 HN_LRO_LENLIM_MIN(ifp))
1607 hn_set_lro_lenlim(sc, HN_LRO_LENLIM_MIN(ifp));
1613 if (!sc->temp_unusable) {
1614 sc->temp_unusable = TRUE;
1618 if (retry_cnt > 0) {
1622 } while (retry_cnt > 0);
1624 if (retry_cnt == 0) {
1629 /* We must remove and add back the device to cause the new
1630 * MTU to take effect. This includes tearing down, but not
1631 * deleting the channel, then bringing it back up.
1633 error = hv_rf_on_device_remove(hn_dev, HV_RF_NV_RETAIN_CHANNEL);
1636 sc->temp_unusable = FALSE;
1640 error = hv_rf_on_device_add(hn_dev, &device_info,
1641 sc->hn_rx_ring_inuse);
1644 sc->temp_unusable = FALSE;
1649 sc->hn_tx_chimney_max = sc->net_dev->send_section_size;
1650 if (sc->hn_tx_ring[0].hn_tx_chimney_size >
1651 sc->hn_tx_chimney_max)
1652 hn_set_tx_chimney_size(sc, sc->hn_tx_chimney_max);
1654 hn_ifinit_locked(sc);
1657 sc->temp_unusable = FALSE;
1663 if (!sc->temp_unusable) {
1664 sc->temp_unusable = TRUE;
1668 if (retry_cnt > 0) {
1672 } while (retry_cnt > 0);
1674 if (retry_cnt == 0) {
1679 if (ifp->if_flags & IFF_UP) {
1681 * If only the state of the PROMISC flag changed,
1682 * then just use the 'set promisc mode' command
1683 * instead of reinitializing the entire NIC. Doing
1684 * a full re-init means reloading the firmware and
1685 * waiting for it to start up, which may take a
1689 /* Fixme: Promiscuous mode? */
1690 if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1691 ifp->if_flags & IFF_PROMISC &&
1692 !(sc->hn_if_flags & IFF_PROMISC)) {
1693 /* do something here for Hyper-V */
1694 } else if (ifp->if_drv_flags & IFF_DRV_RUNNING &&
1695 !(ifp->if_flags & IFF_PROMISC) &&
1696 sc->hn_if_flags & IFF_PROMISC) {
1697 /* do something here for Hyper-V */
1700 hn_ifinit_locked(sc);
1702 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1707 sc->temp_unusable = FALSE;
1709 sc->hn_if_flags = ifp->if_flags;
1715 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
1716 if (mask & IFCAP_TXCSUM) {
1717 ifp->if_capenable ^= IFCAP_TXCSUM;
1718 if (ifp->if_capenable & IFCAP_TXCSUM) {
1720 sc->hn_tx_ring[0].hn_csum_assist;
1723 ~sc->hn_tx_ring[0].hn_csum_assist;
1727 if (mask & IFCAP_RXCSUM)
1728 ifp->if_capenable ^= IFCAP_RXCSUM;
1730 if (mask & IFCAP_LRO)
1731 ifp->if_capenable ^= IFCAP_LRO;
1733 if (mask & IFCAP_TSO4) {
1734 ifp->if_capenable ^= IFCAP_TSO4;
1735 if (ifp->if_capenable & IFCAP_TSO4)
1736 ifp->if_hwassist |= CSUM_IP_TSO;
1738 ifp->if_hwassist &= ~CSUM_IP_TSO;
1741 if (mask & IFCAP_TSO6) {
1742 ifp->if_capenable ^= IFCAP_TSO6;
1743 if (ifp->if_capenable & IFCAP_TSO6)
1744 ifp->if_hwassist |= CSUM_IP6_TSO;
1746 ifp->if_hwassist &= ~CSUM_IP6_TSO;
1755 /* Fixme: Multicast mode? */
1756 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1758 netvsc_setmulti(sc);
1767 error = ifmedia_ioctl(ifp, ifr, &sc->hn_media, cmd);
1770 error = ether_ioctl(ifp, cmd, data);
1781 hn_stop(hn_softc_t *sc)
1785 struct hv_device *device_ctx = vmbus_get_devctx(sc->hn_dev);
1790 printf(" Closing Device ...\n");
1792 atomic_clear_int(&ifp->if_drv_flags,
1793 (IFF_DRV_RUNNING | IFF_DRV_OACTIVE));
1794 for (i = 0; i < sc->hn_tx_ring_inuse; ++i)
1795 sc->hn_tx_ring[i].hn_oactive = 0;
1797 if_link_state_change(ifp, LINK_STATE_DOWN);
1798 sc->hn_initdone = 0;
1800 ret = hv_rf_on_close(device_ctx);
1804 * FreeBSD transmit entry point
1807 hn_start(struct ifnet *ifp)
1809 struct hn_softc *sc = ifp->if_softc;
1810 struct hn_tx_ring *txr = &sc->hn_tx_ring[0];
1812 if (txr->hn_sched_tx)
1815 if (mtx_trylock(&txr->hn_tx_lock)) {
1818 sched = hn_start_locked(txr, txr->hn_direct_tx_size);
1819 mtx_unlock(&txr->hn_tx_lock);
1824 taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_tx_task);
1828 hn_start_txeof(struct hn_tx_ring *txr)
1830 struct hn_softc *sc = txr->hn_sc;
1831 struct ifnet *ifp = sc->hn_ifp;
1833 KASSERT(txr == &sc->hn_tx_ring[0], ("not the first TX ring"));
1835 if (txr->hn_sched_tx)
1838 if (mtx_trylock(&txr->hn_tx_lock)) {
1841 atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
1842 sched = hn_start_locked(txr, txr->hn_direct_tx_size);
1843 mtx_unlock(&txr->hn_tx_lock);
1845 taskqueue_enqueue(txr->hn_tx_taskq,
1851 * Release the OACTIVE earlier, with the hope, that
1852 * others could catch up. The task will clear the
1853 * flag again with the hn_tx_lock to avoid possible
1856 atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
1857 taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_txeof_task);
1865 hn_ifinit_locked(hn_softc_t *sc)
1868 struct hv_device *device_ctx = vmbus_get_devctx(sc->hn_dev);
1873 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
1877 hv_promisc_mode = 1;
1879 ret = hv_rf_on_open(device_ctx);
1883 sc->hn_initdone = 1;
1886 atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE);
1887 for (i = 0; i < sc->hn_tx_ring_inuse; ++i)
1888 sc->hn_tx_ring[i].hn_oactive = 0;
1890 atomic_set_int(&ifp->if_drv_flags, IFF_DRV_RUNNING);
1891 if_link_state_change(ifp, LINK_STATE_UP);
1898 hn_ifinit(void *xsc)
1900 hn_softc_t *sc = xsc;
1903 if (sc->temp_unusable) {
1907 sc->temp_unusable = TRUE;
1910 hn_ifinit_locked(sc);
1913 sc->temp_unusable = FALSE;
1922 hn_watchdog(struct ifnet *ifp)
1927 printf("hn%d: watchdog timeout -- resetting\n", sc->hn_unit);
1928 hn_ifinit(sc); /*???*/
1933 #if __FreeBSD_version >= 1100099
1936 hn_lro_lenlim_sysctl(SYSCTL_HANDLER_ARGS)
1938 struct hn_softc *sc = arg1;
1939 unsigned int lenlim;
1942 lenlim = sc->hn_rx_ring[0].hn_lro.lro_length_lim;
1943 error = sysctl_handle_int(oidp, &lenlim, 0, req);
1944 if (error || req->newptr == NULL)
1947 if (lenlim < HN_LRO_LENLIM_MIN(sc->hn_ifp) ||
1948 lenlim > TCP_LRO_LENGTH_MAX)
1952 hn_set_lro_lenlim(sc, lenlim);
1958 hn_lro_ackcnt_sysctl(SYSCTL_HANDLER_ARGS)
1960 struct hn_softc *sc = arg1;
1961 int ackcnt, error, i;
1964 * lro_ackcnt_lim is append count limit,
1965 * +1 to turn it into aggregation limit.
1967 ackcnt = sc->hn_rx_ring[0].hn_lro.lro_ackcnt_lim + 1;
1968 error = sysctl_handle_int(oidp, &ackcnt, 0, req);
1969 if (error || req->newptr == NULL)
1972 if (ackcnt < 2 || ackcnt > (TCP_LRO_ACKCNT_MAX + 1))
1976 * Convert aggregation limit back to append
1981 for (i = 0; i < sc->hn_rx_ring_inuse; ++i)
1982 sc->hn_rx_ring[i].hn_lro.lro_ackcnt_lim = ackcnt;
1990 hn_trust_hcsum_sysctl(SYSCTL_HANDLER_ARGS)
1992 struct hn_softc *sc = arg1;
1997 if (sc->hn_rx_ring[0].hn_trust_hcsum & hcsum)
2000 error = sysctl_handle_int(oidp, &on, 0, req);
2001 if (error || req->newptr == NULL)
2005 for (i = 0; i < sc->hn_rx_ring_inuse; ++i) {
2006 struct hn_rx_ring *rxr = &sc->hn_rx_ring[i];
2009 rxr->hn_trust_hcsum |= hcsum;
2011 rxr->hn_trust_hcsum &= ~hcsum;
2018 hn_tx_chimney_size_sysctl(SYSCTL_HANDLER_ARGS)
2020 struct hn_softc *sc = arg1;
2021 int chimney_size, error;
2023 chimney_size = sc->hn_tx_ring[0].hn_tx_chimney_size;
2024 error = sysctl_handle_int(oidp, &chimney_size, 0, req);
2025 if (error || req->newptr == NULL)
2028 if (chimney_size > sc->hn_tx_chimney_max || chimney_size <= 0)
2031 hn_set_tx_chimney_size(sc, chimney_size);
2035 #if __FreeBSD_version < 1100095
2037 hn_rx_stat_int_sysctl(SYSCTL_HANDLER_ARGS)
2039 struct hn_softc *sc = arg1;
2040 int ofs = arg2, i, error;
2041 struct hn_rx_ring *rxr;
2045 for (i = 0; i < sc->hn_rx_ring_inuse; ++i) {
2046 rxr = &sc->hn_rx_ring[i];
2047 stat += *((int *)((uint8_t *)rxr + ofs));
2050 error = sysctl_handle_64(oidp, &stat, 0, req);
2051 if (error || req->newptr == NULL)
2054 /* Zero out this stat. */
2055 for (i = 0; i < sc->hn_rx_ring_inuse; ++i) {
2056 rxr = &sc->hn_rx_ring[i];
2057 *((int *)((uint8_t *)rxr + ofs)) = 0;
2063 hn_rx_stat_u64_sysctl(SYSCTL_HANDLER_ARGS)
2065 struct hn_softc *sc = arg1;
2066 int ofs = arg2, i, error;
2067 struct hn_rx_ring *rxr;
2071 for (i = 0; i < sc->hn_rx_ring_inuse; ++i) {
2072 rxr = &sc->hn_rx_ring[i];
2073 stat += *((uint64_t *)((uint8_t *)rxr + ofs));
2076 error = sysctl_handle_64(oidp, &stat, 0, req);
2077 if (error || req->newptr == NULL)
2080 /* Zero out this stat. */
2081 for (i = 0; i < sc->hn_rx_ring_inuse; ++i) {
2082 rxr = &sc->hn_rx_ring[i];
2083 *((uint64_t *)((uint8_t *)rxr + ofs)) = 0;
2091 hn_rx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS)
2093 struct hn_softc *sc = arg1;
2094 int ofs = arg2, i, error;
2095 struct hn_rx_ring *rxr;
2099 for (i = 0; i < sc->hn_rx_ring_cnt; ++i) {
2100 rxr = &sc->hn_rx_ring[i];
2101 stat += *((u_long *)((uint8_t *)rxr + ofs));
2104 error = sysctl_handle_long(oidp, &stat, 0, req);
2105 if (error || req->newptr == NULL)
2108 /* Zero out this stat. */
2109 for (i = 0; i < sc->hn_rx_ring_cnt; ++i) {
2110 rxr = &sc->hn_rx_ring[i];
2111 *((u_long *)((uint8_t *)rxr + ofs)) = 0;
2117 hn_tx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS)
2119 struct hn_softc *sc = arg1;
2120 int ofs = arg2, i, error;
2121 struct hn_tx_ring *txr;
2125 for (i = 0; i < sc->hn_tx_ring_inuse; ++i) {
2126 txr = &sc->hn_tx_ring[i];
2127 stat += *((u_long *)((uint8_t *)txr + ofs));
2130 error = sysctl_handle_long(oidp, &stat, 0, req);
2131 if (error || req->newptr == NULL)
2134 /* Zero out this stat. */
2135 for (i = 0; i < sc->hn_tx_ring_inuse; ++i) {
2136 txr = &sc->hn_tx_ring[i];
2137 *((u_long *)((uint8_t *)txr + ofs)) = 0;
2143 hn_tx_conf_int_sysctl(SYSCTL_HANDLER_ARGS)
2145 struct hn_softc *sc = arg1;
2146 int ofs = arg2, i, error, conf;
2147 struct hn_tx_ring *txr;
2149 txr = &sc->hn_tx_ring[0];
2150 conf = *((int *)((uint8_t *)txr + ofs));
2152 error = sysctl_handle_int(oidp, &conf, 0, req);
2153 if (error || req->newptr == NULL)
2157 for (i = 0; i < sc->hn_tx_ring_inuse; ++i) {
2158 txr = &sc->hn_tx_ring[i];
2159 *((int *)((uint8_t *)txr + ofs)) = conf;
2167 hn_check_iplen(const struct mbuf *m, int hoff)
2169 const struct ip *ip;
2170 int len, iphlen, iplen;
2171 const struct tcphdr *th;
2172 int thoff; /* TCP data offset */
2174 len = hoff + sizeof(struct ip);
2176 /* The packet must be at least the size of an IP header. */
2177 if (m->m_pkthdr.len < len)
2178 return IPPROTO_DONE;
2180 /* The fixed IP header must reside completely in the first mbuf. */
2182 return IPPROTO_DONE;
2184 ip = mtodo(m, hoff);
2186 /* Bound check the packet's stated IP header length. */
2187 iphlen = ip->ip_hl << 2;
2188 if (iphlen < sizeof(struct ip)) /* minimum header length */
2189 return IPPROTO_DONE;
2191 /* The full IP header must reside completely in the one mbuf. */
2192 if (m->m_len < hoff + iphlen)
2193 return IPPROTO_DONE;
2195 iplen = ntohs(ip->ip_len);
2198 * Check that the amount of data in the buffers is as
2199 * at least much as the IP header would have us expect.
2201 if (m->m_pkthdr.len < hoff + iplen)
2202 return IPPROTO_DONE;
2205 * Ignore IP fragments.
2207 if (ntohs(ip->ip_off) & (IP_OFFMASK | IP_MF))
2208 return IPPROTO_DONE;
2211 * The TCP/IP or UDP/IP header must be entirely contained within
2212 * the first fragment of a packet.
2216 if (iplen < iphlen + sizeof(struct tcphdr))
2217 return IPPROTO_DONE;
2218 if (m->m_len < hoff + iphlen + sizeof(struct tcphdr))
2219 return IPPROTO_DONE;
2220 th = (const struct tcphdr *)((const uint8_t *)ip + iphlen);
2221 thoff = th->th_off << 2;
2222 if (thoff < sizeof(struct tcphdr) || thoff + iphlen > iplen)
2223 return IPPROTO_DONE;
2224 if (m->m_len < hoff + iphlen + thoff)
2225 return IPPROTO_DONE;
2228 if (iplen < iphlen + sizeof(struct udphdr))
2229 return IPPROTO_DONE;
2230 if (m->m_len < hoff + iphlen + sizeof(struct udphdr))
2231 return IPPROTO_DONE;
2235 return IPPROTO_DONE;
2242 hn_create_rx_data(struct hn_softc *sc, int ring_cnt)
2244 struct sysctl_oid_list *child;
2245 struct sysctl_ctx_list *ctx;
2246 device_t dev = sc->hn_dev;
2247 #if defined(INET) || defined(INET6)
2248 #if __FreeBSD_version >= 1100095
2254 sc->hn_rx_ring_cnt = ring_cnt;
2255 sc->hn_rx_ring_inuse = sc->hn_rx_ring_cnt;
2257 sc->hn_rx_ring = malloc(sizeof(struct hn_rx_ring) * sc->hn_rx_ring_cnt,
2258 M_NETVSC, M_WAITOK | M_ZERO);
2260 #if defined(INET) || defined(INET6)
2261 #if __FreeBSD_version >= 1100095
2262 lroent_cnt = hn_lro_entry_count;
2263 if (lroent_cnt < TCP_LRO_ENTRIES)
2264 lroent_cnt = TCP_LRO_ENTRIES;
2265 device_printf(dev, "LRO: entry count %d\n", lroent_cnt);
2267 #endif /* INET || INET6 */
2269 ctx = device_get_sysctl_ctx(dev);
2270 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2272 /* Create dev.hn.UNIT.rx sysctl tree */
2273 sc->hn_rx_sysctl_tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "rx",
2274 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
2276 for (i = 0; i < sc->hn_rx_ring_cnt; ++i) {
2277 struct hn_rx_ring *rxr = &sc->hn_rx_ring[i];
2279 if (hn_trust_hosttcp)
2280 rxr->hn_trust_hcsum |= HN_TRUST_HCSUM_TCP;
2281 if (hn_trust_hostudp)
2282 rxr->hn_trust_hcsum |= HN_TRUST_HCSUM_UDP;
2283 if (hn_trust_hostip)
2284 rxr->hn_trust_hcsum |= HN_TRUST_HCSUM_IP;
2285 rxr->hn_ifp = sc->hn_ifp;
2291 #if defined(INET) || defined(INET6)
2292 #if __FreeBSD_version >= 1100095
2293 tcp_lro_init_args(&rxr->hn_lro, sc->hn_ifp, lroent_cnt,
2294 hn_lro_mbufq_depth);
2296 tcp_lro_init(&rxr->hn_lro);
2297 rxr->hn_lro.ifp = sc->hn_ifp;
2299 #if __FreeBSD_version >= 1100099
2300 rxr->hn_lro.lro_length_lim = HN_LRO_LENLIM_DEF;
2301 rxr->hn_lro.lro_ackcnt_lim = HN_LRO_ACKCNT_DEF;
2303 #endif /* INET || INET6 */
2305 if (sc->hn_rx_sysctl_tree != NULL) {
2309 * Create per RX ring sysctl tree:
2310 * dev.hn.UNIT.rx.RINGID
2312 snprintf(name, sizeof(name), "%d", i);
2313 rxr->hn_rx_sysctl_tree = SYSCTL_ADD_NODE(ctx,
2314 SYSCTL_CHILDREN(sc->hn_rx_sysctl_tree),
2315 OID_AUTO, name, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
2317 if (rxr->hn_rx_sysctl_tree != NULL) {
2318 SYSCTL_ADD_ULONG(ctx,
2319 SYSCTL_CHILDREN(rxr->hn_rx_sysctl_tree),
2320 OID_AUTO, "packets", CTLFLAG_RW,
2321 &rxr->hn_pkts, "# of packets received");
2322 SYSCTL_ADD_ULONG(ctx,
2323 SYSCTL_CHILDREN(rxr->hn_rx_sysctl_tree),
2324 OID_AUTO, "rss_pkts", CTLFLAG_RW,
2326 "# of packets w/ RSS info received");
2331 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_queued",
2332 CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2333 __offsetof(struct hn_rx_ring, hn_lro.lro_queued),
2334 #if __FreeBSD_version < 1100095
2335 hn_rx_stat_int_sysctl,
2337 hn_rx_stat_u64_sysctl,
2339 "LU", "LRO queued");
2340 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_flushed",
2341 CTLTYPE_U64 | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2342 __offsetof(struct hn_rx_ring, hn_lro.lro_flushed),
2343 #if __FreeBSD_version < 1100095
2344 hn_rx_stat_int_sysctl,
2346 hn_rx_stat_u64_sysctl,
2348 "LU", "LRO flushed");
2349 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_tried",
2350 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2351 __offsetof(struct hn_rx_ring, hn_lro_tried),
2352 hn_rx_stat_ulong_sysctl, "LU", "# of LRO tries");
2353 #if __FreeBSD_version >= 1100099
2354 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_length_lim",
2355 CTLTYPE_UINT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
2356 hn_lro_lenlim_sysctl, "IU",
2357 "Max # of data bytes to be aggregated by LRO");
2358 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_ackcnt_lim",
2359 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
2360 hn_lro_ackcnt_sysctl, "I",
2361 "Max # of ACKs to be aggregated by LRO");
2363 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hosttcp",
2364 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, HN_TRUST_HCSUM_TCP,
2365 hn_trust_hcsum_sysctl, "I",
2366 "Trust tcp segement verification on host side, "
2367 "when csum info is missing");
2368 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hostudp",
2369 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, HN_TRUST_HCSUM_UDP,
2370 hn_trust_hcsum_sysctl, "I",
2371 "Trust udp datagram verification on host side, "
2372 "when csum info is missing");
2373 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hostip",
2374 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, HN_TRUST_HCSUM_IP,
2375 hn_trust_hcsum_sysctl, "I",
2376 "Trust ip packet verification on host side, "
2377 "when csum info is missing");
2378 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_ip",
2379 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2380 __offsetof(struct hn_rx_ring, hn_csum_ip),
2381 hn_rx_stat_ulong_sysctl, "LU", "RXCSUM IP");
2382 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_tcp",
2383 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2384 __offsetof(struct hn_rx_ring, hn_csum_tcp),
2385 hn_rx_stat_ulong_sysctl, "LU", "RXCSUM TCP");
2386 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_udp",
2387 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2388 __offsetof(struct hn_rx_ring, hn_csum_udp),
2389 hn_rx_stat_ulong_sysctl, "LU", "RXCSUM UDP");
2390 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_trusted",
2391 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2392 __offsetof(struct hn_rx_ring, hn_csum_trusted),
2393 hn_rx_stat_ulong_sysctl, "LU",
2394 "# of packets that we trust host's csum verification");
2395 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "small_pkts",
2396 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2397 __offsetof(struct hn_rx_ring, hn_small_pkts),
2398 hn_rx_stat_ulong_sysctl, "LU", "# of small packets received");
2399 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rx_ring_cnt",
2400 CTLFLAG_RD, &sc->hn_rx_ring_cnt, 0, "# created RX rings");
2401 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rx_ring_inuse",
2402 CTLFLAG_RD, &sc->hn_rx_ring_inuse, 0, "# used RX rings");
2406 hn_destroy_rx_data(struct hn_softc *sc)
2408 #if defined(INET) || defined(INET6)
2412 if (sc->hn_rx_ring_cnt == 0)
2415 #if defined(INET) || defined(INET6)
2416 for (i = 0; i < sc->hn_rx_ring_cnt; ++i)
2417 tcp_lro_free(&sc->hn_rx_ring[i].hn_lro);
2419 free(sc->hn_rx_ring, M_NETVSC);
2420 sc->hn_rx_ring = NULL;
2422 sc->hn_rx_ring_cnt = 0;
2423 sc->hn_rx_ring_inuse = 0;
2427 hn_create_tx_ring(struct hn_softc *sc, int id)
2429 struct hn_tx_ring *txr = &sc->hn_tx_ring[id];
2430 device_t dev = sc->hn_dev;
2431 bus_dma_tag_t parent_dtag;
2436 txr->hn_tx_idx = id;
2438 #ifndef HN_USE_TXDESC_BUFRING
2439 mtx_init(&txr->hn_txlist_spin, "hn txlist", NULL, MTX_SPIN);
2441 mtx_init(&txr->hn_tx_lock, "hn tx", NULL, MTX_DEF);
2443 txr->hn_txdesc_cnt = HN_TX_DESC_CNT;
2444 txr->hn_txdesc = malloc(sizeof(struct hn_txdesc) * txr->hn_txdesc_cnt,
2445 M_NETVSC, M_WAITOK | M_ZERO);
2446 #ifndef HN_USE_TXDESC_BUFRING
2447 SLIST_INIT(&txr->hn_txlist);
2449 txr->hn_txdesc_br = buf_ring_alloc(txr->hn_txdesc_cnt, M_NETVSC,
2450 M_WAITOK, &txr->hn_tx_lock);
2453 txr->hn_tx_taskq = sc->hn_tx_taskq;
2455 if (hn_use_if_start) {
2456 txr->hn_txeof = hn_start_txeof;
2457 TASK_INIT(&txr->hn_tx_task, 0, hn_start_taskfunc, txr);
2458 TASK_INIT(&txr->hn_txeof_task, 0, hn_start_txeof_taskfunc, txr);
2462 txr->hn_txeof = hn_xmit_txeof;
2463 TASK_INIT(&txr->hn_tx_task, 0, hn_xmit_taskfunc, txr);
2464 TASK_INIT(&txr->hn_txeof_task, 0, hn_xmit_txeof_taskfunc, txr);
2466 br_depth = hn_get_txswq_depth(txr);
2467 txr->hn_mbuf_br = buf_ring_alloc(br_depth, M_NETVSC,
2468 M_WAITOK, &txr->hn_tx_lock);
2471 txr->hn_direct_tx_size = hn_direct_tx_size;
2472 version = VMBUS_GET_VERSION(device_get_parent(dev), dev);
2473 if (version >= VMBUS_VERSION_WIN8_1) {
2474 txr->hn_csum_assist = HN_CSUM_ASSIST;
2476 txr->hn_csum_assist = HN_CSUM_ASSIST_WIN8;
2478 device_printf(dev, "bus version %u.%u, "
2479 "no UDP checksum offloading\n",
2480 VMBUS_VERSION_MAJOR(version),
2481 VMBUS_VERSION_MINOR(version));
2486 * Always schedule transmission instead of trying to do direct
2487 * transmission. This one gives the best performance so far.
2489 txr->hn_sched_tx = 1;
2491 parent_dtag = bus_get_dma_tag(dev);
2493 /* DMA tag for RNDIS messages. */
2494 error = bus_dma_tag_create(parent_dtag, /* parent */
2495 HN_RNDIS_MSG_ALIGN, /* alignment */
2496 HN_RNDIS_MSG_BOUNDARY, /* boundary */
2497 BUS_SPACE_MAXADDR, /* lowaddr */
2498 BUS_SPACE_MAXADDR, /* highaddr */
2499 NULL, NULL, /* filter, filterarg */
2500 HN_RNDIS_MSG_LEN, /* maxsize */
2502 HN_RNDIS_MSG_LEN, /* maxsegsize */
2504 NULL, /* lockfunc */
2505 NULL, /* lockfuncarg */
2506 &txr->hn_tx_rndis_dtag);
2508 device_printf(dev, "failed to create rndis dmatag\n");
2512 /* DMA tag for data. */
2513 error = bus_dma_tag_create(parent_dtag, /* parent */
2515 HN_TX_DATA_BOUNDARY, /* boundary */
2516 BUS_SPACE_MAXADDR, /* lowaddr */
2517 BUS_SPACE_MAXADDR, /* highaddr */
2518 NULL, NULL, /* filter, filterarg */
2519 HN_TX_DATA_MAXSIZE, /* maxsize */
2520 HN_TX_DATA_SEGCNT_MAX, /* nsegments */
2521 HN_TX_DATA_SEGSIZE, /* maxsegsize */
2523 NULL, /* lockfunc */
2524 NULL, /* lockfuncarg */
2525 &txr->hn_tx_data_dtag);
2527 device_printf(dev, "failed to create data dmatag\n");
2531 for (i = 0; i < txr->hn_txdesc_cnt; ++i) {
2532 struct hn_txdesc *txd = &txr->hn_txdesc[i];
2537 * Allocate and load RNDIS messages.
2539 error = bus_dmamem_alloc(txr->hn_tx_rndis_dtag,
2540 (void **)&txd->rndis_msg,
2541 BUS_DMA_WAITOK | BUS_DMA_COHERENT,
2542 &txd->rndis_msg_dmap);
2545 "failed to allocate rndis_msg, %d\n", i);
2549 error = bus_dmamap_load(txr->hn_tx_rndis_dtag,
2550 txd->rndis_msg_dmap,
2551 txd->rndis_msg, HN_RNDIS_MSG_LEN,
2552 hyperv_dma_map_paddr, &txd->rndis_msg_paddr,
2556 "failed to load rndis_msg, %d\n", i);
2557 bus_dmamem_free(txr->hn_tx_rndis_dtag,
2558 txd->rndis_msg, txd->rndis_msg_dmap);
2562 /* DMA map for TX data. */
2563 error = bus_dmamap_create(txr->hn_tx_data_dtag, 0,
2567 "failed to allocate tx data dmamap\n");
2568 bus_dmamap_unload(txr->hn_tx_rndis_dtag,
2569 txd->rndis_msg_dmap);
2570 bus_dmamem_free(txr->hn_tx_rndis_dtag,
2571 txd->rndis_msg, txd->rndis_msg_dmap);
2575 /* All set, put it to list */
2576 txd->flags |= HN_TXD_FLAG_ONLIST;
2577 #ifndef HN_USE_TXDESC_BUFRING
2578 SLIST_INSERT_HEAD(&txr->hn_txlist, txd, link);
2580 buf_ring_enqueue(txr->hn_txdesc_br, txd);
2583 txr->hn_txdesc_avail = txr->hn_txdesc_cnt;
2585 if (sc->hn_tx_sysctl_tree != NULL) {
2586 struct sysctl_oid_list *child;
2587 struct sysctl_ctx_list *ctx;
2591 * Create per TX ring sysctl tree:
2592 * dev.hn.UNIT.tx.RINGID
2594 ctx = device_get_sysctl_ctx(dev);
2595 child = SYSCTL_CHILDREN(sc->hn_tx_sysctl_tree);
2597 snprintf(name, sizeof(name), "%d", id);
2598 txr->hn_tx_sysctl_tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO,
2599 name, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
2601 if (txr->hn_tx_sysctl_tree != NULL) {
2602 child = SYSCTL_CHILDREN(txr->hn_tx_sysctl_tree);
2604 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "txdesc_avail",
2605 CTLFLAG_RD, &txr->hn_txdesc_avail, 0,
2606 "# of available TX descs");
2607 if (!hn_use_if_start) {
2608 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "oactive",
2609 CTLFLAG_RD, &txr->hn_oactive, 0,
2612 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "packets",
2613 CTLFLAG_RW, &txr->hn_pkts,
2614 "# of packets transmitted");
2622 hn_txdesc_dmamap_destroy(struct hn_txdesc *txd)
2624 struct hn_tx_ring *txr = txd->txr;
2626 KASSERT(txd->m == NULL, ("still has mbuf installed"));
2627 KASSERT((txd->flags & HN_TXD_FLAG_DMAMAP) == 0, ("still dma mapped"));
2629 bus_dmamap_unload(txr->hn_tx_rndis_dtag, txd->rndis_msg_dmap);
2630 bus_dmamem_free(txr->hn_tx_rndis_dtag, txd->rndis_msg,
2631 txd->rndis_msg_dmap);
2632 bus_dmamap_destroy(txr->hn_tx_data_dtag, txd->data_dmap);
2636 hn_destroy_tx_ring(struct hn_tx_ring *txr)
2638 struct hn_txdesc *txd;
2640 if (txr->hn_txdesc == NULL)
2643 #ifndef HN_USE_TXDESC_BUFRING
2644 while ((txd = SLIST_FIRST(&txr->hn_txlist)) != NULL) {
2645 SLIST_REMOVE_HEAD(&txr->hn_txlist, link);
2646 hn_txdesc_dmamap_destroy(txd);
2649 mtx_lock(&txr->hn_tx_lock);
2650 while ((txd = buf_ring_dequeue_sc(txr->hn_txdesc_br)) != NULL)
2651 hn_txdesc_dmamap_destroy(txd);
2652 mtx_unlock(&txr->hn_tx_lock);
2655 if (txr->hn_tx_data_dtag != NULL)
2656 bus_dma_tag_destroy(txr->hn_tx_data_dtag);
2657 if (txr->hn_tx_rndis_dtag != NULL)
2658 bus_dma_tag_destroy(txr->hn_tx_rndis_dtag);
2660 #ifdef HN_USE_TXDESC_BUFRING
2661 buf_ring_free(txr->hn_txdesc_br, M_NETVSC);
2664 free(txr->hn_txdesc, M_NETVSC);
2665 txr->hn_txdesc = NULL;
2667 if (txr->hn_mbuf_br != NULL)
2668 buf_ring_free(txr->hn_mbuf_br, M_NETVSC);
2670 #ifndef HN_USE_TXDESC_BUFRING
2671 mtx_destroy(&txr->hn_txlist_spin);
2673 mtx_destroy(&txr->hn_tx_lock);
2677 hn_create_tx_data(struct hn_softc *sc, int ring_cnt)
2679 struct sysctl_oid_list *child;
2680 struct sysctl_ctx_list *ctx;
2683 sc->hn_tx_ring_cnt = ring_cnt;
2684 sc->hn_tx_ring_inuse = sc->hn_tx_ring_cnt;
2686 sc->hn_tx_ring = malloc(sizeof(struct hn_tx_ring) * sc->hn_tx_ring_cnt,
2687 M_NETVSC, M_WAITOK | M_ZERO);
2689 ctx = device_get_sysctl_ctx(sc->hn_dev);
2690 child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->hn_dev));
2692 /* Create dev.hn.UNIT.tx sysctl tree */
2693 sc->hn_tx_sysctl_tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "tx",
2694 CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
2696 for (i = 0; i < sc->hn_tx_ring_cnt; ++i) {
2699 error = hn_create_tx_ring(sc, i);
2704 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "no_txdescs",
2705 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2706 __offsetof(struct hn_tx_ring, hn_no_txdescs),
2707 hn_tx_stat_ulong_sysctl, "LU", "# of times short of TX descs");
2708 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "send_failed",
2709 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2710 __offsetof(struct hn_tx_ring, hn_send_failed),
2711 hn_tx_stat_ulong_sysctl, "LU", "# of hyper-v sending failure");
2712 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txdma_failed",
2713 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2714 __offsetof(struct hn_tx_ring, hn_txdma_failed),
2715 hn_tx_stat_ulong_sysctl, "LU", "# of TX DMA failure");
2716 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_collapsed",
2717 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2718 __offsetof(struct hn_tx_ring, hn_tx_collapsed),
2719 hn_tx_stat_ulong_sysctl, "LU", "# of TX mbuf collapsed");
2720 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_chimney",
2721 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2722 __offsetof(struct hn_tx_ring, hn_tx_chimney),
2723 hn_tx_stat_ulong_sysctl, "LU", "# of chimney send");
2724 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_chimney_tried",
2725 CTLTYPE_ULONG | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2726 __offsetof(struct hn_tx_ring, hn_tx_chimney_tried),
2727 hn_tx_stat_ulong_sysctl, "LU", "# of chimney send tries");
2728 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "txdesc_cnt",
2729 CTLFLAG_RD, &sc->hn_tx_ring[0].hn_txdesc_cnt, 0,
2730 "# of total TX descs");
2731 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_chimney_max",
2732 CTLFLAG_RD, &sc->hn_tx_chimney_max, 0,
2733 "Chimney send packet size upper boundary");
2734 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_chimney_size",
2735 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0,
2736 hn_tx_chimney_size_sysctl,
2737 "I", "Chimney send packet size limit");
2738 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "direct_tx_size",
2739 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2740 __offsetof(struct hn_tx_ring, hn_direct_tx_size),
2741 hn_tx_conf_int_sysctl, "I",
2742 "Size of the packet for direct transmission");
2743 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "sched_tx",
2744 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, sc,
2745 __offsetof(struct hn_tx_ring, hn_sched_tx),
2746 hn_tx_conf_int_sysctl, "I",
2747 "Always schedule transmission "
2748 "instead of doing direct transmission");
2749 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_ring_cnt",
2750 CTLFLAG_RD, &sc->hn_tx_ring_cnt, 0, "# created TX rings");
2751 SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_ring_inuse",
2752 CTLFLAG_RD, &sc->hn_tx_ring_inuse, 0, "# used TX rings");
2758 hn_set_tx_chimney_size(struct hn_softc *sc, int chimney_size)
2763 for (i = 0; i < sc->hn_tx_ring_inuse; ++i)
2764 sc->hn_tx_ring[i].hn_tx_chimney_size = chimney_size;
2769 hn_destroy_tx_data(struct hn_softc *sc)
2773 if (sc->hn_tx_ring_cnt == 0)
2776 for (i = 0; i < sc->hn_tx_ring_cnt; ++i)
2777 hn_destroy_tx_ring(&sc->hn_tx_ring[i]);
2779 free(sc->hn_tx_ring, M_NETVSC);
2780 sc->hn_tx_ring = NULL;
2782 sc->hn_tx_ring_cnt = 0;
2783 sc->hn_tx_ring_inuse = 0;
2787 hn_start_taskfunc(void *xtxr, int pending __unused)
2789 struct hn_tx_ring *txr = xtxr;
2791 mtx_lock(&txr->hn_tx_lock);
2792 hn_start_locked(txr, 0);
2793 mtx_unlock(&txr->hn_tx_lock);
2797 hn_start_txeof_taskfunc(void *xtxr, int pending __unused)
2799 struct hn_tx_ring *txr = xtxr;
2801 mtx_lock(&txr->hn_tx_lock);
2802 atomic_clear_int(&txr->hn_sc->hn_ifp->if_drv_flags, IFF_DRV_OACTIVE);
2803 hn_start_locked(txr, 0);
2804 mtx_unlock(&txr->hn_tx_lock);
2808 hn_stop_tx_tasks(struct hn_softc *sc)
2812 for (i = 0; i < sc->hn_tx_ring_inuse; ++i) {
2813 struct hn_tx_ring *txr = &sc->hn_tx_ring[i];
2815 taskqueue_drain(txr->hn_tx_taskq, &txr->hn_tx_task);
2816 taskqueue_drain(txr->hn_tx_taskq, &txr->hn_txeof_task);
2821 hn_xmit(struct hn_tx_ring *txr, int len)
2823 struct hn_softc *sc = txr->hn_sc;
2824 struct ifnet *ifp = sc->hn_ifp;
2825 struct mbuf *m_head;
2827 mtx_assert(&txr->hn_tx_lock, MA_OWNED);
2828 KASSERT(hn_use_if_start == 0,
2829 ("hn_xmit is called, when if_start is enabled"));
2831 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || txr->hn_oactive)
2834 while ((m_head = drbr_peek(ifp, txr->hn_mbuf_br)) != NULL) {
2835 struct hn_txdesc *txd;
2838 if (len > 0 && m_head->m_pkthdr.len > len) {
2840 * This sending could be time consuming; let callers
2841 * dispatch this packet sending (and sending of any
2842 * following up packets) to tx taskqueue.
2844 drbr_putback(ifp, txr->hn_mbuf_br, m_head);
2848 txd = hn_txdesc_get(txr);
2850 txr->hn_no_txdescs++;
2851 drbr_putback(ifp, txr->hn_mbuf_br, m_head);
2852 txr->hn_oactive = 1;
2856 error = hn_encap(txr, txd, &m_head);
2858 /* Both txd and m_head are freed; discard */
2859 drbr_advance(ifp, txr->hn_mbuf_br);
2863 error = hn_send_pkt(ifp, txr, txd);
2864 if (__predict_false(error)) {
2865 /* txd is freed, but m_head is not */
2866 drbr_putback(ifp, txr->hn_mbuf_br, m_head);
2867 txr->hn_oactive = 1;
2872 drbr_advance(ifp, txr->hn_mbuf_br);
2878 hn_transmit(struct ifnet *ifp, struct mbuf *m)
2880 struct hn_softc *sc = ifp->if_softc;
2881 struct hn_tx_ring *txr;
2885 * Select the TX ring based on flowid
2887 if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
2888 idx = m->m_pkthdr.flowid % sc->hn_tx_ring_inuse;
2889 txr = &sc->hn_tx_ring[idx];
2891 error = drbr_enqueue(ifp, txr->hn_mbuf_br, m);
2893 if_inc_counter(ifp, IFCOUNTER_OQDROPS, 1);
2897 if (txr->hn_oactive)
2900 if (txr->hn_sched_tx)
2903 if (mtx_trylock(&txr->hn_tx_lock)) {
2906 sched = hn_xmit(txr, txr->hn_direct_tx_size);
2907 mtx_unlock(&txr->hn_tx_lock);
2912 taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_tx_task);
2917 hn_xmit_qflush(struct ifnet *ifp)
2919 struct hn_softc *sc = ifp->if_softc;
2922 for (i = 0; i < sc->hn_tx_ring_inuse; ++i) {
2923 struct hn_tx_ring *txr = &sc->hn_tx_ring[i];
2926 mtx_lock(&txr->hn_tx_lock);
2927 while ((m = buf_ring_dequeue_sc(txr->hn_mbuf_br)) != NULL)
2929 mtx_unlock(&txr->hn_tx_lock);
2935 hn_xmit_txeof(struct hn_tx_ring *txr)
2938 if (txr->hn_sched_tx)
2941 if (mtx_trylock(&txr->hn_tx_lock)) {
2944 txr->hn_oactive = 0;
2945 sched = hn_xmit(txr, txr->hn_direct_tx_size);
2946 mtx_unlock(&txr->hn_tx_lock);
2948 taskqueue_enqueue(txr->hn_tx_taskq,
2954 * Release the oactive earlier, with the hope, that
2955 * others could catch up. The task will clear the
2956 * oactive again with the hn_tx_lock to avoid possible
2959 txr->hn_oactive = 0;
2960 taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_txeof_task);
2965 hn_xmit_taskfunc(void *xtxr, int pending __unused)
2967 struct hn_tx_ring *txr = xtxr;
2969 mtx_lock(&txr->hn_tx_lock);
2971 mtx_unlock(&txr->hn_tx_lock);
2975 hn_xmit_txeof_taskfunc(void *xtxr, int pending __unused)
2977 struct hn_tx_ring *txr = xtxr;
2979 mtx_lock(&txr->hn_tx_lock);
2980 txr->hn_oactive = 0;
2982 mtx_unlock(&txr->hn_tx_lock);
2986 hn_channel_attach(struct hn_softc *sc, struct hv_vmbus_channel *chan)
2988 struct hn_rx_ring *rxr;
2991 idx = chan->offer_msg.offer.sub_channel_index;
2993 KASSERT(idx >= 0 && idx < sc->hn_rx_ring_inuse,
2994 ("invalid channel index %d, should > 0 && < %d",
2995 idx, sc->hn_rx_ring_inuse));
2996 rxr = &sc->hn_rx_ring[idx];
2997 KASSERT((rxr->hn_rx_flags & HN_RX_FLAG_ATTACHED) == 0,
2998 ("RX ring %d already attached", idx));
2999 rxr->hn_rx_flags |= HN_RX_FLAG_ATTACHED;
3001 chan->hv_chan_rxr = rxr;
3003 if_printf(sc->hn_ifp, "link RX ring %d to channel%u\n",
3004 idx, chan->offer_msg.child_rel_id);
3007 if (idx < sc->hn_tx_ring_inuse) {
3008 struct hn_tx_ring *txr = &sc->hn_tx_ring[idx];
3010 KASSERT((txr->hn_tx_flags & HN_TX_FLAG_ATTACHED) == 0,
3011 ("TX ring %d already attached", idx));
3012 txr->hn_tx_flags |= HN_TX_FLAG_ATTACHED;
3014 chan->hv_chan_txr = txr;
3015 txr->hn_chan = chan;
3017 if_printf(sc->hn_ifp, "link TX ring %d to channel%u\n",
3018 idx, chan->offer_msg.child_rel_id);
3022 /* Bind channel to a proper CPU */
3023 vmbus_channel_cpu_set(chan, (sc->hn_cpu + idx) % mp_ncpus);
3027 hn_subchan_attach(struct hn_softc *sc, struct hv_vmbus_channel *chan)
3030 KASSERT(!HV_VMBUS_CHAN_ISPRIMARY(chan),
3031 ("subchannel callback on primary channel"));
3032 KASSERT(chan->offer_msg.offer.sub_channel_index > 0,
3033 ("invalid channel subidx %u",
3034 chan->offer_msg.offer.sub_channel_index));
3035 hn_channel_attach(sc, chan);
3039 hn_tx_taskq_create(void *arg __unused)
3041 if (!hn_share_tx_taskq)
3044 hn_tx_taskq = taskqueue_create("hn_tx", M_WAITOK,
3045 taskqueue_thread_enqueue, &hn_tx_taskq);
3046 taskqueue_start_threads(&hn_tx_taskq, 1, PI_NET, "hn tx");
3047 if (hn_bind_tx_taskq >= 0) {
3048 int cpu = hn_bind_tx_taskq;
3049 struct task cpuset_task;
3052 if (cpu > mp_ncpus - 1)
3054 CPU_SETOF(cpu, &cpu_set);
3055 TASK_INIT(&cpuset_task, 0, hn_cpuset_setthread_task, &cpu_set);
3056 taskqueue_enqueue(hn_tx_taskq, &cpuset_task);
3057 taskqueue_drain(hn_tx_taskq, &cpuset_task);
3060 SYSINIT(hn_txtq_create, SI_SUB_DRIVERS, SI_ORDER_FIRST,
3061 hn_tx_taskq_create, NULL);
3064 hn_tx_taskq_destroy(void *arg __unused)
3066 if (hn_tx_taskq != NULL)
3067 taskqueue_free(hn_tx_taskq);
3069 SYSUNINIT(hn_txtq_destroy, SI_SUB_DRIVERS, SI_ORDER_FIRST,
3070 hn_tx_taskq_destroy, NULL);
3072 static device_method_t netvsc_methods[] = {
3073 /* Device interface */
3074 DEVMETHOD(device_probe, netvsc_probe),
3075 DEVMETHOD(device_attach, netvsc_attach),
3076 DEVMETHOD(device_detach, netvsc_detach),
3077 DEVMETHOD(device_shutdown, netvsc_shutdown),
3082 static driver_t netvsc_driver = {
3088 static devclass_t netvsc_devclass;
3090 DRIVER_MODULE(hn, vmbus, netvsc_driver, netvsc_devclass, 0, 0);
3091 MODULE_VERSION(hn, 1);
3092 MODULE_DEPEND(hn, vmbus, 1, 1, 1);