2 * Copyright (c) 2016, Vincenzo Maffione
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 /* Driver for ptnet paravirtualized network device. */
31 #include <sys/cdefs.h>
33 #include <sys/types.h>
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/sockio.h>
39 #include <sys/malloc.h>
40 #include <sys/module.h>
41 #include <sys/socket.h>
42 #include <sys/sysctl.h>
44 #include <sys/mutex.h>
45 #include <sys/taskqueue.h>
48 #include <machine/smp.h>
54 #include <net/ethernet.h>
56 #include <net/if_var.h>
57 #include <net/if_arp.h>
58 #include <net/if_dl.h>
59 #include <net/if_types.h>
60 #include <net/if_media.h>
61 #include <net/if_vlan_var.h>
64 #include <netinet/in_systm.h>
65 #include <netinet/in.h>
66 #include <netinet/ip.h>
67 #include <netinet/ip6.h>
68 #include <netinet6/ip6_var.h>
69 #include <netinet/udp.h>
70 #include <netinet/tcp.h>
72 #include <machine/bus.h>
73 #include <machine/resource.h>
77 #include <dev/pci/pcivar.h>
78 #include <dev/pci/pcireg.h>
81 #include "opt_inet6.h"
83 #include <sys/selinfo.h>
84 #include <net/netmap.h>
85 #include <dev/netmap/netmap_kern.h>
86 #include <net/netmap_virt.h>
87 #include <dev/netmap/netmap_mem2.h>
88 #include <dev/virtio/network/virtio_net.h>
93 #error "INET not defined, cannot support offloadings"
96 #if __FreeBSD_version >= 1100000
97 static uint64_t ptnet_get_counter(if_t, ift_counter);
99 typedef struct ifnet *if_t;
100 #define if_getsoftc(_ifp) (_ifp)->if_softc
103 //#define PTNETMAP_STATS
111 extern int ptnet_vnet_hdr; /* Tunable parameter */
115 struct ptnet_queue_stats {
116 uint64_t packets; /* if_[io]packets */
117 uint64_t bytes; /* if_[io]bytes */
118 uint64_t errors; /* if_[io]errors */
119 uint64_t iqdrops; /* if_iqdrops */
120 uint64_t mcasts; /* if_[io]mcasts */
121 #ifdef PTNETMAP_STATS
124 #endif /* PTNETMAP_STATS */
128 struct ptnet_softc *sc;
129 struct resource *irq;
132 struct nm_csb_atok *atok;
133 struct nm_csb_ktoa *ktoa;
136 struct buf_ring *bufring; /* for TX queues */
137 struct ptnet_queue_stats stats;
138 #ifdef PTNETMAP_STATS
139 struct ptnet_queue_stats last_stats;
140 #endif /* PTNETMAP_STATS */
141 struct taskqueue *taskq;
146 #define PTNET_Q_LOCK(_pq) mtx_lock(&(_pq)->lock)
147 #define PTNET_Q_TRYLOCK(_pq) mtx_trylock(&(_pq)->lock)
148 #define PTNET_Q_UNLOCK(_pq) mtx_unlock(&(_pq)->lock)
153 struct ifmedia media;
156 char hwaddr[ETHER_ADDR_LEN];
158 /* Mirror of PTFEAT register. */
160 unsigned int vnet_hdr_len;
162 /* PCI BARs support. */
163 struct resource *iomem;
164 struct resource *msix_mem;
166 unsigned int num_rings;
167 unsigned int num_tx_rings;
168 struct ptnet_queue *queues;
169 struct ptnet_queue *rxqueues;
170 struct nm_csb_atok *csb_gh;
171 struct nm_csb_ktoa *csb_hg;
173 unsigned int min_tx_space;
175 struct netmap_pt_guest_adapter *ptna;
178 #ifdef PTNETMAP_STATS
179 struct timeval last_ts;
180 #endif /* PTNETMAP_STATS */
183 #define PTNET_CORE_LOCK(_sc) mtx_lock(&(_sc)->lock)
184 #define PTNET_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->lock)
186 static int ptnet_probe(device_t);
187 static int ptnet_attach(device_t);
188 static int ptnet_detach(device_t);
189 static int ptnet_suspend(device_t);
190 static int ptnet_resume(device_t);
191 static int ptnet_shutdown(device_t);
193 static void ptnet_init(void *opaque);
194 static int ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data);
195 static int ptnet_init_locked(struct ptnet_softc *sc);
196 static int ptnet_stop(struct ptnet_softc *sc);
197 static int ptnet_transmit(if_t ifp, struct mbuf *m);
198 static int ptnet_drain_transmit_queue(struct ptnet_queue *pq,
201 static void ptnet_qflush(if_t ifp);
202 static void ptnet_tx_task(void *context, int pending);
204 static int ptnet_media_change(if_t ifp);
205 static void ptnet_media_status(if_t ifp, struct ifmediareq *ifmr);
206 #ifdef PTNETMAP_STATS
207 static void ptnet_tick(void *opaque);
210 static int ptnet_irqs_init(struct ptnet_softc *sc);
211 static void ptnet_irqs_fini(struct ptnet_softc *sc);
213 static uint32_t ptnet_nm_ptctl(struct ptnet_softc *sc, uint32_t cmd);
214 static int ptnet_nm_config(struct netmap_adapter *na,
215 struct nm_config_info *info);
216 static void ptnet_update_vnet_hdr(struct ptnet_softc *sc);
217 static int ptnet_nm_register(struct netmap_adapter *na, int onoff);
218 static int ptnet_nm_txsync(struct netmap_kring *kring, int flags);
219 static int ptnet_nm_rxsync(struct netmap_kring *kring, int flags);
220 static void ptnet_nm_intr(struct netmap_adapter *na, int onoff);
222 static void ptnet_tx_intr(void *opaque);
223 static void ptnet_rx_intr(void *opaque);
225 static unsigned ptnet_rx_discard(struct netmap_kring *kring,
227 static int ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget,
229 static void ptnet_rx_task(void *context, int pending);
231 #ifdef DEVICE_POLLING
232 static poll_handler_t ptnet_poll;
235 static device_method_t ptnet_methods[] = {
236 DEVMETHOD(device_probe, ptnet_probe),
237 DEVMETHOD(device_attach, ptnet_attach),
238 DEVMETHOD(device_detach, ptnet_detach),
239 DEVMETHOD(device_suspend, ptnet_suspend),
240 DEVMETHOD(device_resume, ptnet_resume),
241 DEVMETHOD(device_shutdown, ptnet_shutdown),
245 static driver_t ptnet_driver = {
248 sizeof(struct ptnet_softc)
251 /* We use (SI_ORDER_MIDDLE+2) here, see DEV_MODULE_ORDERED() invocation. */
252 static devclass_t ptnet_devclass;
253 DRIVER_MODULE_ORDERED(ptnet, pci, ptnet_driver, ptnet_devclass,
254 NULL, NULL, SI_ORDER_MIDDLE + 2);
257 ptnet_probe(device_t dev)
259 if (pci_get_vendor(dev) != PTNETMAP_PCI_VENDOR_ID ||
260 pci_get_device(dev) != PTNETMAP_PCI_NETIF_ID) {
264 device_set_desc(dev, "ptnet network adapter");
266 return (BUS_PROBE_DEFAULT);
269 static inline void ptnet_kick(struct ptnet_queue *pq)
271 #ifdef PTNETMAP_STATS
273 #endif /* PTNETMAP_STATS */
274 bus_write_4(pq->sc->iomem, pq->kick, 0);
277 #define PTNET_BUF_RING_SIZE 4096
278 #define PTNET_RX_BUDGET 512
279 #define PTNET_RX_BATCH 1
280 #define PTNET_TX_BUDGET 512
281 #define PTNET_TX_BATCH 64
282 #define PTNET_HDR_SIZE sizeof(struct virtio_net_hdr_mrg_rxbuf)
283 #define PTNET_MAX_PKT_SIZE 65536
285 #define PTNET_CSUM_OFFLOAD (CSUM_TCP | CSUM_UDP)
286 #define PTNET_CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6 | CSUM_UDP_IPV6)
287 #define PTNET_ALL_OFFLOAD (CSUM_TSO | PTNET_CSUM_OFFLOAD |\
288 PTNET_CSUM_OFFLOAD_IPV6)
291 ptnet_attach(device_t dev)
293 uint32_t ptfeatures = 0;
294 unsigned int num_rx_rings, num_tx_rings;
295 struct netmap_adapter na_arg;
296 unsigned int nifp_offset;
297 struct ptnet_softc *sc;
303 sc = device_get_softc(dev);
306 /* Setup PCI resources. */
307 pci_enable_busmaster(dev);
309 rid = PCIR_BAR(PTNETMAP_IO_PCI_BAR);
310 sc->iomem = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
312 if (sc->iomem == NULL) {
313 device_printf(dev, "Failed to map I/O BAR\n");
317 /* Negotiate features with the hypervisor. */
318 if (ptnet_vnet_hdr) {
319 ptfeatures |= PTNETMAP_F_VNET_HDR;
321 bus_write_4(sc->iomem, PTNET_IO_PTFEAT, ptfeatures); /* wanted */
322 ptfeatures = bus_read_4(sc->iomem, PTNET_IO_PTFEAT); /* acked */
323 sc->ptfeatures = ptfeatures;
325 num_tx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS);
326 num_rx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS);
327 sc->num_rings = num_tx_rings + num_rx_rings;
328 sc->num_tx_rings = num_tx_rings;
330 if (sc->num_rings * sizeof(struct nm_csb_atok) > PAGE_SIZE) {
331 device_printf(dev, "CSB cannot handle that many rings (%u)\n",
337 /* Allocate CSB and carry out CSB allocation protocol. */
338 sc->csb_gh = contigmalloc(2*PAGE_SIZE, M_DEVBUF, M_NOWAIT | M_ZERO,
339 (size_t)0, -1UL, PAGE_SIZE, 0);
340 if (sc->csb_gh == NULL) {
341 device_printf(dev, "Failed to allocate CSB\n");
345 sc->csb_hg = (struct nm_csb_ktoa *)(((char *)sc->csb_gh) + PAGE_SIZE);
349 * We use uint64_t rather than vm_paddr_t since we
350 * need 64 bit addresses even on 32 bit platforms.
352 uint64_t paddr = vtophys(sc->csb_gh);
354 /* CSB allocation protocol: write to BAH first, then
355 * to BAL (for both GH and HG sections). */
356 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAH,
357 (paddr >> 32) & 0xffffffff);
358 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAL,
360 paddr = vtophys(sc->csb_hg);
361 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAH,
362 (paddr >> 32) & 0xffffffff);
363 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAL,
367 /* Allocate and initialize per-queue data structures. */
368 sc->queues = malloc(sizeof(struct ptnet_queue) * sc->num_rings,
369 M_DEVBUF, M_NOWAIT | M_ZERO);
370 if (sc->queues == NULL) {
374 sc->rxqueues = sc->queues + num_tx_rings;
376 for (i = 0; i < sc->num_rings; i++) {
377 struct ptnet_queue *pq = sc->queues + i;
381 pq->kick = PTNET_IO_KICK_BASE + 4 * i;
382 pq->atok = sc->csb_gh + i;
383 pq->ktoa = sc->csb_hg + i;
384 snprintf(pq->lock_name, sizeof(pq->lock_name), "%s-%d",
385 device_get_nameunit(dev), i);
386 mtx_init(&pq->lock, pq->lock_name, NULL, MTX_DEF);
387 if (i >= num_tx_rings) {
388 /* RX queue: fix kring_id. */
389 pq->kring_id -= num_tx_rings;
391 /* TX queue: allocate buf_ring. */
392 pq->bufring = buf_ring_alloc(PTNET_BUF_RING_SIZE,
393 M_DEVBUF, M_NOWAIT, &pq->lock);
394 if (pq->bufring == NULL) {
401 sc->min_tx_space = 64; /* Safe initial value. */
403 err = ptnet_irqs_init(sc);
408 /* Setup Ethernet interface. */
409 sc->ifp = ifp = if_alloc(IFT_ETHER);
411 device_printf(dev, "Failed to allocate ifnet\n");
416 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
417 ifp->if_baudrate = IF_Gbps(10);
419 ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX;
420 ifp->if_init = ptnet_init;
421 ifp->if_ioctl = ptnet_ioctl;
422 #if __FreeBSD_version >= 1100000
423 ifp->if_get_counter = ptnet_get_counter;
425 ifp->if_transmit = ptnet_transmit;
426 ifp->if_qflush = ptnet_qflush;
428 ifmedia_init(&sc->media, IFM_IMASK, ptnet_media_change,
430 ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX, 0, NULL);
431 ifmedia_set(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX);
433 macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_HI);
434 sc->hwaddr[0] = (macreg >> 8) & 0xff;
435 sc->hwaddr[1] = macreg & 0xff;
436 macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_LO);
437 sc->hwaddr[2] = (macreg >> 24) & 0xff;
438 sc->hwaddr[3] = (macreg >> 16) & 0xff;
439 sc->hwaddr[4] = (macreg >> 8) & 0xff;
440 sc->hwaddr[5] = macreg & 0xff;
442 ether_ifattach(ifp, sc->hwaddr);
444 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
445 ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU;
447 if (sc->ptfeatures & PTNETMAP_F_VNET_HDR) {
448 /* Similarly to what the vtnet driver does, we can emulate
449 * VLAN offloadings by inserting and removing the 802.1Q
450 * header during transmit and receive. We are then able
451 * to do checksum offloading of VLAN frames. */
452 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6
454 | IFCAP_TSO | IFCAP_LRO
456 | IFCAP_VLAN_HWTAGGING;
459 ifp->if_capenable = ifp->if_capabilities;
460 #ifdef DEVICE_POLLING
461 /* Don't enable polling by default. */
462 ifp->if_capabilities |= IFCAP_POLLING;
464 snprintf(sc->lock_name, sizeof(sc->lock_name),
465 "%s", device_get_nameunit(dev));
466 mtx_init(&sc->lock, sc->lock_name, "ptnet core lock", MTX_DEF);
467 callout_init_mtx(&sc->tick, &sc->lock, 0);
469 /* Prepare a netmap_adapter struct instance to do netmap_attach(). */
470 nifp_offset = bus_read_4(sc->iomem, PTNET_IO_NIFP_OFS);
471 memset(&na_arg, 0, sizeof(na_arg));
473 na_arg.num_tx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS);
474 na_arg.num_rx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS);
475 na_arg.num_tx_rings = num_tx_rings;
476 na_arg.num_rx_rings = num_rx_rings;
477 na_arg.nm_config = ptnet_nm_config;
478 na_arg.nm_krings_create = ptnet_nm_krings_create;
479 na_arg.nm_krings_delete = ptnet_nm_krings_delete;
480 na_arg.nm_dtor = ptnet_nm_dtor;
481 na_arg.nm_intr = ptnet_nm_intr;
482 na_arg.nm_register = ptnet_nm_register;
483 na_arg.nm_txsync = ptnet_nm_txsync;
484 na_arg.nm_rxsync = ptnet_nm_rxsync;
486 netmap_pt_guest_attach(&na_arg, nifp_offset,
487 bus_read_4(sc->iomem, PTNET_IO_HOSTMEMID));
489 /* Now a netmap adapter for this ifp has been allocated, and it
490 * can be accessed through NA(ifp). We also have to initialize the CSB
492 sc->ptna = (struct netmap_pt_guest_adapter *)NA(ifp);
494 /* If virtio-net header was negotiated, set the virt_hdr_len field in
495 * the netmap adapter, to inform users that this netmap adapter requires
496 * the application to deal with the headers. */
497 ptnet_update_vnet_hdr(sc);
499 device_printf(dev, "%s() completed\n", __func__);
508 /* Stop host sync-kloop if it was running. */
510 ptnet_device_shutdown(struct ptnet_softc *sc)
512 ptnet_nm_ptctl(sc, PTNETMAP_PTCTL_DELETE);
513 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAH, 0);
514 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAL, 0);
515 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAH, 0);
516 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAL, 0);
520 ptnet_detach(device_t dev)
522 struct ptnet_softc *sc = device_get_softc(dev);
525 ptnet_device_shutdown(sc);
527 #ifdef DEVICE_POLLING
528 if (sc->ifp->if_capenable & IFCAP_POLLING) {
529 ether_poll_deregister(sc->ifp);
532 callout_drain(&sc->tick);
535 /* Drain taskqueues before calling if_detach. */
536 for (i = 0; i < sc->num_rings; i++) {
537 struct ptnet_queue *pq = sc->queues + i;
540 taskqueue_drain(pq->taskq, &pq->task);
546 ether_ifdetach(sc->ifp);
548 /* Uninitialize netmap adapters for this device. */
549 netmap_detach(sc->ifp);
551 ifmedia_removeall(&sc->media);
559 contigfree(sc->csb_gh, 2*PAGE_SIZE, M_DEVBUF);
565 for (i = 0; i < sc->num_rings; i++) {
566 struct ptnet_queue *pq = sc->queues + i;
568 if (mtx_initialized(&pq->lock)) {
569 mtx_destroy(&pq->lock);
571 if (pq->bufring != NULL) {
572 buf_ring_free(pq->bufring, M_DEVBUF);
575 free(sc->queues, M_DEVBUF);
580 bus_release_resource(dev, SYS_RES_IOPORT,
581 PCIR_BAR(PTNETMAP_IO_PCI_BAR), sc->iomem);
585 mtx_destroy(&sc->lock);
587 device_printf(dev, "%s() completed\n", __func__);
593 ptnet_suspend(device_t dev)
595 struct ptnet_softc *sc = device_get_softc(dev);
603 ptnet_resume(device_t dev)
605 struct ptnet_softc *sc = device_get_softc(dev);
613 ptnet_shutdown(device_t dev)
615 struct ptnet_softc *sc = device_get_softc(dev);
617 ptnet_device_shutdown(sc);
623 ptnet_irqs_init(struct ptnet_softc *sc)
625 int rid = PCIR_BAR(PTNETMAP_MSIX_PCI_BAR);
626 int nvecs = sc->num_rings;
627 device_t dev = sc->dev;
632 if (pci_find_cap(dev, PCIY_MSIX, NULL) != 0) {
633 device_printf(dev, "Could not find MSI-X capability\n");
637 sc->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
639 if (sc->msix_mem == NULL) {
640 device_printf(dev, "Failed to allocate MSIX PCI BAR\n");
644 if (pci_msix_count(dev) < nvecs) {
645 device_printf(dev, "Not enough MSI-X vectors\n");
649 err = pci_alloc_msix(dev, &nvecs);
651 device_printf(dev, "Failed to allocate MSI-X vectors\n");
655 for (i = 0; i < nvecs; i++) {
656 struct ptnet_queue *pq = sc->queues + i;
659 pq->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
661 if (pq->irq == NULL) {
662 device_printf(dev, "Failed to allocate interrupt "
663 "for queue #%d\n", i);
669 cpu_cur = CPU_FIRST();
670 for (i = 0; i < nvecs; i++) {
671 struct ptnet_queue *pq = sc->queues + i;
672 void (*handler)(void *) = ptnet_tx_intr;
674 if (i >= sc->num_tx_rings) {
675 handler = ptnet_rx_intr;
677 err = bus_setup_intr(dev, pq->irq, INTR_TYPE_NET | INTR_MPSAFE,
678 NULL /* intr_filter */, handler,
681 device_printf(dev, "Failed to register intr handler "
682 "for queue #%d\n", i);
686 bus_describe_intr(dev, pq->irq, pq->cookie, "q%d", i);
688 bus_bind_intr(sc->dev, pq->irq, cpu_cur);
690 cpu_cur = CPU_NEXT(cpu_cur);
693 device_printf(dev, "Allocated %d MSI-X vectors\n", nvecs);
695 cpu_cur = CPU_FIRST();
696 for (i = 0; i < nvecs; i++) {
697 struct ptnet_queue *pq = sc->queues + i;
699 if (i < sc->num_tx_rings)
700 TASK_INIT(&pq->task, 0, ptnet_tx_task, pq);
702 NET_TASK_INIT(&pq->task, 0, ptnet_rx_task, pq);
704 pq->taskq = taskqueue_create_fast("ptnet_queue", M_NOWAIT,
705 taskqueue_thread_enqueue, &pq->taskq);
706 taskqueue_start_threads(&pq->taskq, 1, PI_NET, "%s-pq-%d",
707 device_get_nameunit(sc->dev), cpu_cur);
708 cpu_cur = CPU_NEXT(cpu_cur);
718 ptnet_irqs_fini(struct ptnet_softc *sc)
720 device_t dev = sc->dev;
723 for (i = 0; i < sc->num_rings; i++) {
724 struct ptnet_queue *pq = sc->queues + i;
727 taskqueue_free(pq->taskq);
732 bus_teardown_intr(dev, pq->irq, pq->cookie);
737 bus_release_resource(dev, SYS_RES_IRQ, i + 1, pq->irq);
743 pci_release_msi(dev);
745 bus_release_resource(dev, SYS_RES_MEMORY,
746 PCIR_BAR(PTNETMAP_MSIX_PCI_BAR),
753 ptnet_init(void *opaque)
755 struct ptnet_softc *sc = opaque;
758 ptnet_init_locked(sc);
759 PTNET_CORE_UNLOCK(sc);
763 ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data)
765 struct ptnet_softc *sc = if_getsoftc(ifp);
766 device_t dev = sc->dev;
767 struct ifreq *ifr = (struct ifreq *)data;
768 int mask __unused, err = 0;
772 device_printf(dev, "SIOCSIFFLAGS %x\n", ifp->if_flags);
774 if (ifp->if_flags & IFF_UP) {
775 /* Network stack wants the iff to be up. */
776 err = ptnet_init_locked(sc);
778 /* Network stack wants the iff to be down. */
779 err = ptnet_stop(sc);
781 /* We don't need to do nothing to support IFF_PROMISC,
782 * since that is managed by the backend port. */
783 PTNET_CORE_UNLOCK(sc);
787 device_printf(dev, "SIOCSIFCAP %x %x\n",
788 ifr->ifr_reqcap, ifp->if_capenable);
789 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
790 #ifdef DEVICE_POLLING
791 if (mask & IFCAP_POLLING) {
792 struct ptnet_queue *pq;
795 if (ifr->ifr_reqcap & IFCAP_POLLING) {
796 err = ether_poll_register(ptnet_poll, ifp);
800 /* Stop queues and sync with taskqueues. */
801 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
802 for (i = 0; i < sc->num_rings; i++) {
803 pq = sc-> queues + i;
804 /* Make sure the worker sees the
805 * IFF_DRV_RUNNING down. */
807 pq->atok->appl_need_kick = 0;
809 /* Wait for rescheduling to finish. */
811 taskqueue_drain(pq->taskq,
815 ifp->if_drv_flags |= IFF_DRV_RUNNING;
817 err = ether_poll_deregister(ifp);
818 for (i = 0; i < sc->num_rings; i++) {
819 pq = sc-> queues + i;
821 pq->atok->appl_need_kick = 1;
826 #endif /* DEVICE_POLLING */
827 ifp->if_capenable = ifr->ifr_reqcap;
831 /* We support any reasonable MTU. */
832 if (ifr->ifr_mtu < ETHERMIN ||
833 ifr->ifr_mtu > PTNET_MAX_PKT_SIZE) {
837 ifp->if_mtu = ifr->ifr_mtu;
838 PTNET_CORE_UNLOCK(sc);
844 err = ifmedia_ioctl(ifp, ifr, &sc->media, cmd);
848 err = ether_ioctl(ifp, cmd, data);
856 ptnet_init_locked(struct ptnet_softc *sc)
859 struct netmap_adapter *na_dr = &sc->ptna->dr.up;
860 struct netmap_adapter *na_nm = &sc->ptna->hwup.up;
861 unsigned int nm_buf_size;
864 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
865 return 0; /* nothing to do */
868 device_printf(sc->dev, "%s\n", __func__);
870 /* Translate offload capabilities according to if_capenable. */
871 ifp->if_hwassist = 0;
872 if (ifp->if_capenable & IFCAP_TXCSUM)
873 ifp->if_hwassist |= PTNET_CSUM_OFFLOAD;
874 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
875 ifp->if_hwassist |= PTNET_CSUM_OFFLOAD_IPV6;
876 if (ifp->if_capenable & IFCAP_TSO4)
877 ifp->if_hwassist |= CSUM_IP_TSO;
878 if (ifp->if_capenable & IFCAP_TSO6)
879 ifp->if_hwassist |= CSUM_IP6_TSO;
882 * Prepare the interface for netmap mode access.
884 netmap_update_config(na_dr);
886 ret = netmap_mem_finalize(na_dr->nm_mem, na_dr);
888 device_printf(sc->dev, "netmap_mem_finalize() failed\n");
892 if (sc->ptna->backend_users == 0) {
893 ret = ptnet_nm_krings_create(na_nm);
895 device_printf(sc->dev, "ptnet_nm_krings_create() "
897 goto err_mem_finalize;
900 ret = netmap_mem_rings_create(na_dr);
902 device_printf(sc->dev, "netmap_mem_rings_create() "
904 goto err_rings_create;
907 ret = netmap_mem_get_lut(na_dr->nm_mem, &na_dr->na_lut);
909 device_printf(sc->dev, "netmap_mem_get_lut() "
915 ret = ptnet_nm_register(na_dr, 1 /* on */);
920 nm_buf_size = NETMAP_BUF_SIZE(na_dr);
922 KASSERT(nm_buf_size > 0, ("Invalid netmap buffer size"));
923 sc->min_tx_space = PTNET_MAX_PKT_SIZE / nm_buf_size + 2;
924 device_printf(sc->dev, "%s: min_tx_space = %u\n", __func__,
926 #ifdef PTNETMAP_STATS
927 callout_reset(&sc->tick, hz, ptnet_tick, sc);
930 ifp->if_drv_flags |= IFF_DRV_RUNNING;
935 memset(&na_dr->na_lut, 0, sizeof(na_dr->na_lut));
937 netmap_mem_rings_delete(na_dr);
939 ptnet_nm_krings_delete(na_nm);
941 netmap_mem_deref(na_dr->nm_mem, na_dr);
946 /* To be called under core lock. */
948 ptnet_stop(struct ptnet_softc *sc)
951 struct netmap_adapter *na_dr = &sc->ptna->dr.up;
952 struct netmap_adapter *na_nm = &sc->ptna->hwup.up;
955 device_printf(sc->dev, "%s\n", __func__);
957 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
958 return 0; /* nothing to do */
961 /* Clear the driver-ready flag, and synchronize with all the queues,
962 * so that after this loop we are sure nobody is working anymore with
963 * the device. This scheme is taken from the vtnet driver. */
964 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
965 callout_stop(&sc->tick);
966 for (i = 0; i < sc->num_rings; i++) {
967 PTNET_Q_LOCK(sc->queues + i);
968 PTNET_Q_UNLOCK(sc->queues + i);
971 ptnet_nm_register(na_dr, 0 /* off */);
973 if (sc->ptna->backend_users == 0) {
974 netmap_mem_rings_delete(na_dr);
975 ptnet_nm_krings_delete(na_nm);
977 netmap_mem_deref(na_dr->nm_mem, na_dr);
983 ptnet_qflush(if_t ifp)
985 struct ptnet_softc *sc = if_getsoftc(ifp);
988 /* Flush all the bufrings and do the interface flush. */
989 for (i = 0; i < sc->num_rings; i++) {
990 struct ptnet_queue *pq = sc->queues + i;
995 while ((m = buf_ring_dequeue_sc(pq->bufring))) {
1006 ptnet_media_change(if_t ifp)
1008 struct ptnet_softc *sc = if_getsoftc(ifp);
1009 struct ifmedia *ifm = &sc->media;
1011 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
1018 #if __FreeBSD_version >= 1100000
1020 ptnet_get_counter(if_t ifp, ift_counter cnt)
1022 struct ptnet_softc *sc = if_getsoftc(ifp);
1023 struct ptnet_queue_stats stats[2];
1026 /* Accumulate statistics over the queues. */
1027 memset(stats, 0, sizeof(stats));
1028 for (i = 0; i < sc->num_rings; i++) {
1029 struct ptnet_queue *pq = sc->queues + i;
1030 int idx = (i < sc->num_tx_rings) ? 0 : 1;
1032 stats[idx].packets += pq->stats.packets;
1033 stats[idx].bytes += pq->stats.bytes;
1034 stats[idx].errors += pq->stats.errors;
1035 stats[idx].iqdrops += pq->stats.iqdrops;
1036 stats[idx].mcasts += pq->stats.mcasts;
1040 case IFCOUNTER_IPACKETS:
1041 return (stats[1].packets);
1042 case IFCOUNTER_IQDROPS:
1043 return (stats[1].iqdrops);
1044 case IFCOUNTER_IERRORS:
1045 return (stats[1].errors);
1046 case IFCOUNTER_OPACKETS:
1047 return (stats[0].packets);
1048 case IFCOUNTER_OBYTES:
1049 return (stats[0].bytes);
1050 case IFCOUNTER_OMCASTS:
1051 return (stats[0].mcasts);
1053 return (if_get_counter_default(ifp, cnt));
1059 #ifdef PTNETMAP_STATS
1060 /* Called under core lock. */
1062 ptnet_tick(void *opaque)
1064 struct ptnet_softc *sc = opaque;
1067 for (i = 0; i < sc->num_rings; i++) {
1068 struct ptnet_queue *pq = sc->queues + i;
1069 struct ptnet_queue_stats cur = pq->stats;
1074 delta = now.tv_usec - sc->last_ts.tv_usec +
1075 (now.tv_sec - sc->last_ts.tv_sec) * 1000000;
1076 delta /= 1000; /* in milliseconds */
1081 device_printf(sc->dev, "#%d[%u ms]:pkts %lu, kicks %lu, "
1082 "intr %lu\n", i, delta,
1083 (cur.packets - pq->last_stats.packets),
1084 (cur.kicks - pq->last_stats.kicks),
1085 (cur.intrs - pq->last_stats.intrs));
1086 pq->last_stats = cur;
1088 microtime(&sc->last_ts);
1089 callout_schedule(&sc->tick, hz);
1091 #endif /* PTNETMAP_STATS */
1094 ptnet_media_status(if_t ifp, struct ifmediareq *ifmr)
1096 /* We are always active, as the backend netmap port is
1097 * always open in netmap mode. */
1098 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
1099 ifmr->ifm_active = IFM_ETHER | IFM_10G_T | IFM_FDX;
1103 ptnet_nm_ptctl(struct ptnet_softc *sc, uint32_t cmd)
1106 * Write a command and read back error status,
1107 * with zero meaning success.
1109 bus_write_4(sc->iomem, PTNET_IO_PTCTL, cmd);
1110 return bus_read_4(sc->iomem, PTNET_IO_PTCTL);
1114 ptnet_nm_config(struct netmap_adapter *na, struct nm_config_info *info)
1116 struct ptnet_softc *sc = if_getsoftc(na->ifp);
1118 info->num_tx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS);
1119 info->num_rx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS);
1120 info->num_tx_descs = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS);
1121 info->num_rx_descs = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS);
1122 info->rx_buf_maxsize = NETMAP_BUF_SIZE(na);
1124 device_printf(sc->dev, "txr %u, rxr %u, txd %u, rxd %u, rxbufsz %u\n",
1125 info->num_tx_rings, info->num_rx_rings,
1126 info->num_tx_descs, info->num_rx_descs,
1127 info->rx_buf_maxsize);
1133 ptnet_sync_from_csb(struct ptnet_softc *sc, struct netmap_adapter *na)
1137 /* Sync krings from the host, reading from
1139 for (i = 0; i < sc->num_rings; i++) {
1140 struct nm_csb_atok *atok = sc->queues[i].atok;
1141 struct nm_csb_ktoa *ktoa = sc->queues[i].ktoa;
1142 struct netmap_kring *kring;
1144 if (i < na->num_tx_rings) {
1145 kring = na->tx_rings[i];
1147 kring = na->rx_rings[i - na->num_tx_rings];
1149 kring->rhead = kring->ring->head = atok->head;
1150 kring->rcur = kring->ring->cur = atok->cur;
1151 kring->nr_hwcur = ktoa->hwcur;
1152 kring->nr_hwtail = kring->rtail =
1153 kring->ring->tail = ktoa->hwtail;
1155 nm_prdis("%d,%d: csb {hc %u h %u c %u ht %u}", t, i,
1156 ktoa->hwcur, atok->head, atok->cur,
1158 nm_prdis("%d,%d: kring {hc %u rh %u rc %u h %u c %u ht %u rt %u t %u}",
1159 t, i, kring->nr_hwcur, kring->rhead, kring->rcur,
1160 kring->ring->head, kring->ring->cur, kring->nr_hwtail,
1161 kring->rtail, kring->ring->tail);
1166 ptnet_update_vnet_hdr(struct ptnet_softc *sc)
1168 unsigned int wanted_hdr_len = ptnet_vnet_hdr ? PTNET_HDR_SIZE : 0;
1170 bus_write_4(sc->iomem, PTNET_IO_VNET_HDR_LEN, wanted_hdr_len);
1171 sc->vnet_hdr_len = bus_read_4(sc->iomem, PTNET_IO_VNET_HDR_LEN);
1172 sc->ptna->hwup.up.virt_hdr_len = sc->vnet_hdr_len;
1176 ptnet_nm_register(struct netmap_adapter *na, int onoff)
1178 /* device-specific */
1180 struct ptnet_softc *sc = if_getsoftc(ifp);
1181 int native = (na == &sc->ptna->hwup.up);
1182 struct ptnet_queue *pq;
1187 sc->ptna->backend_users--;
1190 /* If this is the last netmap client, guest interrupt enable flags may
1191 * be in arbitrary state. Since these flags are going to be used also
1192 * by the netdevice driver, we have to make sure to start with
1193 * notifications enabled. Also, schedule NAPI to flush pending packets
1194 * in the RX rings, since we will not receive further interrupts
1195 * until these will be processed. */
1196 if (native && !onoff && na->active_fds == 0) {
1197 nm_prinf("Exit netmap mode, re-enable interrupts");
1198 for (i = 0; i < sc->num_rings; i++) {
1199 pq = sc->queues + i;
1200 pq->atok->appl_need_kick = 1;
1205 if (sc->ptna->backend_users == 0) {
1206 /* Initialize notification enable fields in the CSB. */
1207 for (i = 0; i < sc->num_rings; i++) {
1208 pq = sc->queues + i;
1209 pq->ktoa->kern_need_kick = 1;
1210 pq->atok->appl_need_kick =
1211 (!(ifp->if_capenable & IFCAP_POLLING)
1212 && i >= sc->num_tx_rings);
1215 /* Set the virtio-net header length. */
1216 ptnet_update_vnet_hdr(sc);
1218 /* Make sure the host adapter passed through is ready
1219 * for txsync/rxsync. */
1220 ret = ptnet_nm_ptctl(sc, PTNETMAP_PTCTL_CREATE);
1225 /* Align the guest krings and rings to the state stored
1227 ptnet_sync_from_csb(sc, na);
1230 /* If not native, don't call nm_set_native_flags, since we don't want
1231 * to replace if_transmit method, nor set NAF_NETMAP_ON */
1233 netmap_krings_mode_commit(na, onoff);
1234 nm_set_native_flags(na);
1239 nm_clear_native_flags(na);
1240 netmap_krings_mode_commit(na, onoff);
1243 if (sc->ptna->backend_users == 0) {
1244 ret = ptnet_nm_ptctl(sc, PTNETMAP_PTCTL_DELETE);
1249 sc->ptna->backend_users++;
1256 ptnet_nm_txsync(struct netmap_kring *kring, int flags)
1258 struct ptnet_softc *sc = if_getsoftc(kring->na->ifp);
1259 struct ptnet_queue *pq = sc->queues + kring->ring_id;
1262 notify = netmap_pt_guest_txsync(pq->atok, pq->ktoa, kring, flags);
1271 ptnet_nm_rxsync(struct netmap_kring *kring, int flags)
1273 struct ptnet_softc *sc = if_getsoftc(kring->na->ifp);
1274 struct ptnet_queue *pq = sc->rxqueues + kring->ring_id;
1277 notify = netmap_pt_guest_rxsync(pq->atok, pq->ktoa, kring, flags);
1286 ptnet_nm_intr(struct netmap_adapter *na, int onoff)
1288 struct ptnet_softc *sc = if_getsoftc(na->ifp);
1291 for (i = 0; i < sc->num_rings; i++) {
1292 struct ptnet_queue *pq = sc->queues + i;
1293 pq->atok->appl_need_kick = onoff;
1298 ptnet_tx_intr(void *opaque)
1300 struct ptnet_queue *pq = opaque;
1301 struct ptnet_softc *sc = pq->sc;
1303 DBG(device_printf(sc->dev, "Tx interrupt #%d\n", pq->kring_id));
1304 #ifdef PTNETMAP_STATS
1306 #endif /* PTNETMAP_STATS */
1308 if (netmap_tx_irq(sc->ifp, pq->kring_id) != NM_IRQ_PASS) {
1312 /* Schedule the tasqueue to flush process transmissions requests.
1313 * However, vtnet, if_em and if_igb just call ptnet_transmit() here,
1314 * at least when using MSI-X interrupts. The if_em driver, instead
1315 * schedule taskqueue when using legacy interrupts. */
1316 taskqueue_enqueue(pq->taskq, &pq->task);
1320 ptnet_rx_intr(void *opaque)
1322 struct ptnet_queue *pq = opaque;
1323 struct ptnet_softc *sc = pq->sc;
1324 unsigned int unused;
1326 DBG(device_printf(sc->dev, "Rx interrupt #%d\n", pq->kring_id));
1327 #ifdef PTNETMAP_STATS
1329 #endif /* PTNETMAP_STATS */
1331 if (netmap_rx_irq(sc->ifp, pq->kring_id, &unused) != NM_IRQ_PASS) {
1335 /* Like vtnet, if_igb and if_em drivers when using MSI-X interrupts,
1336 * receive-side processing is executed directly in the interrupt
1337 * service routine. Alternatively, we may schedule the taskqueue. */
1338 ptnet_rx_eof(pq, PTNET_RX_BUDGET, true);
1342 ptnet_vlan_tag_remove(struct mbuf *m)
1344 struct ether_vlan_header *evh;
1346 evh = mtod(m, struct ether_vlan_header *);
1347 m->m_pkthdr.ether_vtag = ntohs(evh->evl_tag);
1348 m->m_flags |= M_VLANTAG;
1350 /* Strip the 802.1Q header. */
1351 bcopy((char *) evh, (char *) evh + ETHER_VLAN_ENCAP_LEN,
1352 ETHER_HDR_LEN - ETHER_TYPE_LEN);
1353 m_adj(m, ETHER_VLAN_ENCAP_LEN);
1357 ptnet_ring_update(struct ptnet_queue *pq, struct netmap_kring *kring,
1358 unsigned int head, unsigned int sync_flags)
1360 struct netmap_ring *ring = kring->ring;
1361 struct nm_csb_atok *atok = pq->atok;
1362 struct nm_csb_ktoa *ktoa = pq->ktoa;
1364 /* Some packets have been pushed to the netmap ring. We have
1365 * to tell the host to process the new packets, updating cur
1366 * and head in the CSB. */
1367 ring->head = ring->cur = head;
1369 /* Mimic nm_txsync_prologue/nm_rxsync_prologue. */
1370 kring->rcur = kring->rhead = head;
1372 nm_sync_kloop_appl_write(atok, kring->rcur, kring->rhead);
1374 /* Kick the host if needed. */
1375 if (NM_ACCESS_ONCE(ktoa->kern_need_kick)) {
1376 atok->sync_flags = sync_flags;
1381 #define PTNET_TX_NOSPACE(_h, _k, _min) \
1382 ((((_h) < (_k)->rtail) ? 0 : (_k)->nkr_num_slots) + \
1383 (_k)->rtail - (_h)) < (_min)
1385 /* This function may be called by the network stack, or by
1386 * by the taskqueue thread. */
1388 ptnet_drain_transmit_queue(struct ptnet_queue *pq, unsigned int budget,
1391 struct ptnet_softc *sc = pq->sc;
1392 bool have_vnet_hdr = sc->vnet_hdr_len;
1393 struct netmap_adapter *na = &sc->ptna->dr.up;
1395 unsigned int batch_count = 0;
1396 struct nm_csb_atok *atok;
1397 struct nm_csb_ktoa *ktoa;
1398 struct netmap_kring *kring;
1399 struct netmap_ring *ring;
1400 struct netmap_slot *slot;
1401 unsigned int count = 0;
1402 unsigned int minspace;
1410 if (!PTNET_Q_TRYLOCK(pq)) {
1411 /* We failed to acquire the lock, schedule the taskqueue. */
1412 nm_prlim(1, "Deferring TX work");
1414 taskqueue_enqueue(pq->taskq, &pq->task);
1420 if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) {
1422 nm_prlim(1, "Interface is down");
1428 kring = na->tx_rings[pq->kring_id];
1430 lim = kring->nkr_num_slots - 1;
1432 minspace = sc->min_tx_space;
1434 while (count < budget) {
1435 if (PTNET_TX_NOSPACE(head, kring, minspace)) {
1436 /* We ran out of slot, let's see if the host has
1437 * freed up some, by reading hwcur and hwtail from
1439 ptnet_sync_tail(ktoa, kring);
1441 if (PTNET_TX_NOSPACE(head, kring, minspace)) {
1442 /* Still no slots available. Reactivate the
1443 * interrupts so that we can be notified
1444 * when some free slots are made available by
1446 atok->appl_need_kick = 1;
1448 /* Double check. We need a full barrier to
1449 * prevent the store to atok->appl_need_kick
1450 * to be reordered with the load from
1451 * ktoa->hwcur and ktoa->hwtail (store-load
1454 ptnet_sync_tail(ktoa, kring);
1455 if (likely(PTNET_TX_NOSPACE(head, kring,
1460 nm_prlim(1, "Found more slots by doublecheck");
1461 /* More slots were freed before reactivating
1462 * the interrupts. */
1463 atok->appl_need_kick = 0;
1467 mhead = drbr_peek(ifp, pq->bufring);
1472 /* Initialize transmission state variables. */
1473 slot = ring->slot + head;
1474 nmbuf = NMB(na, slot);
1477 /* If needed, prepare the virtio-net header at the beginning
1478 * of the first slot. */
1479 if (have_vnet_hdr) {
1480 struct virtio_net_hdr *vh =
1481 (struct virtio_net_hdr *)nmbuf;
1483 /* For performance, we could replace this memset() with
1484 * two 8-bytes-wide writes. */
1485 memset(nmbuf, 0, PTNET_HDR_SIZE);
1486 if (mhead->m_pkthdr.csum_flags & PTNET_ALL_OFFLOAD) {
1487 mhead = virtio_net_tx_offload(ifp, mhead, false,
1489 if (unlikely(!mhead)) {
1490 /* Packet dropped because errors
1491 * occurred while preparing the vnet
1492 * header. Let's go ahead with the next
1494 pq->stats.errors ++;
1495 drbr_advance(ifp, pq->bufring);
1499 nm_prdis(1, "%s: [csum_flags %lX] vnet hdr: flags %x "
1500 "csum_start %u csum_ofs %u hdr_len = %u "
1501 "gso_size %u gso_type %x", __func__,
1502 mhead->m_pkthdr.csum_flags, vh->flags,
1503 vh->csum_start, vh->csum_offset, vh->hdr_len,
1504 vh->gso_size, vh->gso_type);
1506 nmbuf += PTNET_HDR_SIZE;
1507 nmbuf_bytes += PTNET_HDR_SIZE;
1510 for (mf = mhead; mf; mf = mf->m_next) {
1511 uint8_t *mdata = mf->m_data;
1512 int mlen = mf->m_len;
1515 int copy = NETMAP_BUF_SIZE(na) - nmbuf_bytes;
1520 memcpy(nmbuf, mdata, copy);
1525 nmbuf_bytes += copy;
1531 slot->len = nmbuf_bytes;
1532 slot->flags = NS_MOREFRAG;
1534 head = nm_next(head, lim);
1535 KASSERT(head != ring->tail,
1536 ("Unexpectedly run out of TX space"));
1537 slot = ring->slot + head;
1538 nmbuf = NMB(na, slot);
1543 /* Complete last slot and update head. */
1544 slot->len = nmbuf_bytes;
1546 head = nm_next(head, lim);
1548 /* Consume the packet just processed. */
1549 drbr_advance(ifp, pq->bufring);
1551 /* Copy the packet to listeners. */
1552 ETHER_BPF_MTAP(ifp, mhead);
1554 pq->stats.packets ++;
1555 pq->stats.bytes += mhead->m_pkthdr.len;
1556 if (mhead->m_flags & M_MCAST) {
1557 pq->stats.mcasts ++;
1563 if (++batch_count == PTNET_TX_BATCH) {
1564 ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM);
1570 ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM);
1573 if (count >= budget && may_resched) {
1574 DBG(nm_prlim(1, "out of budget: resched, %d mbufs pending\n",
1575 drbr_inuse(ifp, pq->bufring)));
1576 taskqueue_enqueue(pq->taskq, &pq->task);
1585 ptnet_transmit(if_t ifp, struct mbuf *m)
1587 struct ptnet_softc *sc = if_getsoftc(ifp);
1588 struct ptnet_queue *pq;
1589 unsigned int queue_idx;
1592 DBG(device_printf(sc->dev, "transmit %p\n", m));
1594 /* Insert 802.1Q header if needed. */
1595 if (m->m_flags & M_VLANTAG) {
1596 m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
1600 m->m_flags &= ~M_VLANTAG;
1603 /* Get the flow-id if available. */
1604 queue_idx = (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) ?
1605 m->m_pkthdr.flowid : curcpu;
1607 if (unlikely(queue_idx >= sc->num_tx_rings)) {
1608 queue_idx %= sc->num_tx_rings;
1611 pq = sc->queues + queue_idx;
1613 err = drbr_enqueue(ifp, pq->bufring, m);
1615 /* ENOBUFS when the bufring is full */
1616 nm_prlim(1, "%s: drbr_enqueue() failed %d\n",
1618 pq->stats.errors ++;
1622 if (ifp->if_capenable & IFCAP_POLLING) {
1623 /* If polling is on, the transmit queues will be
1624 * drained by the poller. */
1628 err = ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true);
1630 return (err < 0) ? err : 0;
1634 ptnet_rx_discard(struct netmap_kring *kring, unsigned int head)
1636 struct netmap_ring *ring = kring->ring;
1637 struct netmap_slot *slot = ring->slot + head;
1640 head = nm_next(head, kring->nkr_num_slots - 1);
1641 if (!(slot->flags & NS_MOREFRAG) || head == ring->tail) {
1644 slot = ring->slot + head;
1650 static inline struct mbuf *
1651 ptnet_rx_slot(struct mbuf *mtail, uint8_t *nmbuf, unsigned int nmbuf_len)
1653 uint8_t *mdata = mtod(mtail, uint8_t *) + mtail->m_len;
1658 if (mtail->m_len == MCLBYTES) {
1661 mf = m_getcl(M_NOWAIT, MT_DATA, 0);
1662 if (unlikely(!mf)) {
1668 mdata = mtod(mtail, uint8_t *);
1672 copy = MCLBYTES - mtail->m_len;
1673 if (nmbuf_len < copy) {
1677 memcpy(mdata, nmbuf, copy);
1682 mtail->m_len += copy;
1683 } while (nmbuf_len);
1689 ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, bool may_resched)
1691 struct ptnet_softc *sc = pq->sc;
1692 bool have_vnet_hdr = sc->vnet_hdr_len;
1693 struct nm_csb_atok *atok = pq->atok;
1694 struct nm_csb_ktoa *ktoa = pq->ktoa;
1695 struct netmap_adapter *na = &sc->ptna->dr.up;
1696 struct netmap_kring *kring = na->rx_rings[pq->kring_id];
1697 struct netmap_ring *ring = kring->ring;
1698 unsigned int const lim = kring->nkr_num_slots - 1;
1699 unsigned int batch_count = 0;
1701 unsigned int count = 0;
1706 if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) {
1710 kring->nr_kflags &= ~NKR_PENDINTR;
1713 while (count < budget) {
1714 uint32_t prev_head = head;
1715 struct mbuf *mhead, *mtail;
1716 struct virtio_net_hdr *vh;
1717 struct netmap_slot *slot;
1718 unsigned int nmbuf_len;
1720 int deliver = 1; /* the mbuf to the network stack. */
1722 if (head == ring->tail) {
1723 /* We ran out of slot, let's see if the host has
1724 * added some, by reading hwcur and hwtail from
1726 ptnet_sync_tail(ktoa, kring);
1728 if (head == ring->tail) {
1729 /* Still no slots available. Reactivate
1730 * interrupts as they were disabled by the
1731 * host thread right before issuing the
1732 * last interrupt. */
1733 atok->appl_need_kick = 1;
1735 /* Double check for more completed RX slots.
1736 * We need a full barrier to prevent the store
1737 * to atok->appl_need_kick to be reordered with
1738 * the load from ktoa->hwcur and ktoa->hwtail
1739 * (store-load barrier). */
1741 ptnet_sync_tail(ktoa, kring);
1742 if (likely(head == ring->tail)) {
1745 atok->appl_need_kick = 0;
1749 /* Initialize ring state variables, possibly grabbing the
1750 * virtio-net header. */
1751 slot = ring->slot + head;
1752 nmbuf = NMB(na, slot);
1753 nmbuf_len = slot->len;
1755 vh = (struct virtio_net_hdr *)nmbuf;
1756 if (have_vnet_hdr) {
1757 if (unlikely(nmbuf_len < PTNET_HDR_SIZE)) {
1758 /* There is no good reason why host should
1759 * put the header in multiple netmap slots.
1760 * If this is the case, discard. */
1761 nm_prlim(1, "Fragmented vnet-hdr: dropping");
1762 head = ptnet_rx_discard(kring, head);
1763 pq->stats.iqdrops ++;
1767 nm_prdis(1, "%s: vnet hdr: flags %x csum_start %u "
1768 "csum_ofs %u hdr_len = %u gso_size %u "
1769 "gso_type %x", __func__, vh->flags,
1770 vh->csum_start, vh->csum_offset, vh->hdr_len,
1771 vh->gso_size, vh->gso_type);
1772 nmbuf += PTNET_HDR_SIZE;
1773 nmbuf_len -= PTNET_HDR_SIZE;
1776 /* Allocate the head of a new mbuf chain.
1777 * We use m_getcl() to allocate an mbuf with standard cluster
1778 * size (MCLBYTES). In the future we could use m_getjcl()
1779 * to choose different sizes. */
1780 mhead = mtail = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1781 if (unlikely(mhead == NULL)) {
1782 device_printf(sc->dev, "%s: failed to allocate mbuf "
1783 "head\n", __func__);
1784 pq->stats.errors ++;
1788 /* Initialize the mbuf state variables. */
1789 mhead->m_pkthdr.len = nmbuf_len;
1792 /* Scan all the netmap slots containing the current packet. */
1794 DBG(device_printf(sc->dev, "%s: h %u t %u rcv frag "
1795 "len %u, flags %u\n", __func__,
1796 head, ring->tail, slot->len,
1799 mtail = ptnet_rx_slot(mtail, nmbuf, nmbuf_len);
1800 if (unlikely(!mtail)) {
1801 /* Ouch. We ran out of memory while processing
1802 * a packet. We have to restore the previous
1803 * head position, free the mbuf chain, and
1804 * schedule the taskqueue to give the packet
1805 * another chance. */
1806 device_printf(sc->dev, "%s: failed to allocate"
1807 " mbuf frag, reset head %u --> %u\n",
1808 __func__, head, prev_head);
1811 pq->stats.errors ++;
1813 taskqueue_enqueue(pq->taskq,
1819 /* We have to increment head irrespective of the
1820 * NS_MOREFRAG being set or not. */
1821 head = nm_next(head, lim);
1823 if (!(slot->flags & NS_MOREFRAG)) {
1827 if (unlikely(head == ring->tail)) {
1828 /* The very last slot prepared by the host has
1829 * the NS_MOREFRAG set. Drop it and continue
1830 * the outer cycle (to do the double-check). */
1831 nm_prlim(1, "Incomplete packet: dropping");
1833 pq->stats.iqdrops ++;
1837 slot = ring->slot + head;
1838 nmbuf = NMB(na, slot);
1839 nmbuf_len = slot->len;
1840 mhead->m_pkthdr.len += nmbuf_len;
1843 mhead->m_pkthdr.rcvif = ifp;
1844 mhead->m_pkthdr.csum_flags = 0;
1846 /* Store the queue idx in the packet header. */
1847 mhead->m_pkthdr.flowid = pq->kring_id;
1848 M_HASHTYPE_SET(mhead, M_HASHTYPE_OPAQUE);
1850 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1851 struct ether_header *eh;
1853 eh = mtod(mhead, struct ether_header *);
1854 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1855 ptnet_vlan_tag_remove(mhead);
1857 * With the 802.1Q header removed, update the
1858 * checksum starting location accordingly.
1860 if (vh->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
1861 vh->csum_start -= ETHER_VLAN_ENCAP_LEN;
1865 if (unlikely(have_vnet_hdr && virtio_net_rx_csum(mhead, vh))) {
1867 nm_prlim(1, "Csum offload error: dropping");
1868 pq->stats.iqdrops ++;
1874 if (++batch_count >= PTNET_RX_BATCH) {
1875 /* Some packets have been (or will be) pushed to the network
1876 * stack. We need to update the CSB to tell the host about
1877 * the new ring->cur and ring->head (RX buffer refill). */
1878 ptnet_ring_update(pq, kring, head, NAF_FORCE_READ);
1882 if (likely(deliver)) {
1883 pq->stats.packets ++;
1884 pq->stats.bytes += mhead->m_pkthdr.len;
1887 (*ifp->if_input)(ifp, mhead);
1889 /* The ring->head index (and related indices) are
1890 * updated under pq lock by ptnet_ring_update().
1891 * Since we dropped the lock to call if_input(), we
1892 * must reload ring->head and restart processing the
1893 * ring from there. */
1896 if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) {
1897 /* The interface has gone down while we didn't
1898 * have the lock. Stop any processing and exit. */
1905 ptnet_ring_update(pq, kring, head, NAF_FORCE_READ);
1909 if (count >= budget && may_resched) {
1910 /* If we ran out of budget or the double-check found new
1911 * slots to process, schedule the taskqueue. */
1912 DBG(nm_prlim(1, "out of budget: resched h %u t %u\n",
1914 taskqueue_enqueue(pq->taskq, &pq->task);
1923 ptnet_rx_task(void *context, int pending)
1925 struct ptnet_queue *pq = context;
1927 DBG(nm_prlim(1, "%s: pq #%u\n", __func__, pq->kring_id));
1928 ptnet_rx_eof(pq, PTNET_RX_BUDGET, true);
1932 ptnet_tx_task(void *context, int pending)
1934 struct ptnet_queue *pq = context;
1936 DBG(nm_prlim(1, "%s: pq #%u\n", __func__, pq->kring_id));
1937 ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true);
1940 #ifdef DEVICE_POLLING
1941 /* We don't need to handle differently POLL_AND_CHECK_STATUS and
1942 * POLL_ONLY, since we don't have an Interrupt Status Register. */
1944 ptnet_poll(if_t ifp, enum poll_cmd cmd, int budget)
1946 struct ptnet_softc *sc = if_getsoftc(ifp);
1947 unsigned int queue_budget;
1948 unsigned int count = 0;
1949 bool borrow = false;
1952 KASSERT(sc->num_rings > 0, ("Found no queues in while polling ptnet"));
1953 queue_budget = MAX(budget / sc->num_rings, 1);
1954 nm_prlim(1, "Per-queue budget is %d", queue_budget);
1957 unsigned int rcnt = 0;
1959 for (i = 0; i < sc->num_rings; i++) {
1960 struct ptnet_queue *pq = sc->queues + i;
1963 queue_budget = MIN(queue_budget, budget);
1964 if (queue_budget == 0) {
1969 if (i < sc->num_tx_rings) {
1970 rcnt += ptnet_drain_transmit_queue(pq,
1971 queue_budget, false);
1973 rcnt += ptnet_rx_eof(pq, queue_budget,
1979 /* A scan of the queues gave no result, we can
1984 if (rcnt > budget) {
1985 /* This may happen when initial budget < sc->num_rings,
1986 * since one packet budget is given to each queue
1987 * anyway. Just pretend we didn't eat "so much". */
1998 #endif /* DEVICE_POLLING */
1999 #endif /* WITH_PTNETMAP */