2 * Copyright (c) 2016, Vincenzo Maffione
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 /* Driver for ptnet paravirtualized network device. */
31 #include <sys/cdefs.h>
33 #include <sys/types.h>
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/sockio.h>
39 #include <sys/malloc.h>
40 #include <sys/module.h>
41 #include <sys/socket.h>
42 #include <sys/sysctl.h>
44 #include <sys/mutex.h>
45 #include <sys/taskqueue.h>
48 #include <machine/smp.h>
54 #include <net/ethernet.h>
56 #include <net/if_var.h>
57 #include <net/if_arp.h>
58 #include <net/if_dl.h>
59 #include <net/if_types.h>
60 #include <net/if_media.h>
61 #include <net/if_vlan_var.h>
64 #include <netinet/in_systm.h>
65 #include <netinet/in.h>
66 #include <netinet/ip.h>
67 #include <netinet/ip6.h>
68 #include <netinet6/ip6_var.h>
69 #include <netinet/udp.h>
70 #include <netinet/tcp.h>
72 #include <machine/bus.h>
73 #include <machine/resource.h>
77 #include <dev/pci/pcivar.h>
78 #include <dev/pci/pcireg.h>
81 #include "opt_inet6.h"
83 #include <sys/selinfo.h>
84 #include <net/netmap.h>
85 #include <dev/netmap/netmap_kern.h>
86 #include <net/netmap_virt.h>
87 #include <dev/netmap/netmap_mem2.h>
88 #include <dev/virtio/network/virtio_net.h>
91 #error "INET not defined, cannot support offloadings"
94 #if __FreeBSD_version >= 1100000
95 static uint64_t ptnet_get_counter(if_t, ift_counter);
97 typedef struct ifnet *if_t;
98 #define if_getsoftc(_ifp) (_ifp)->if_softc
101 //#define PTNETMAP_STATS
109 extern int ptnet_vnet_hdr; /* Tunable parameter */
113 struct ptnet_queue_stats {
114 uint64_t packets; /* if_[io]packets */
115 uint64_t bytes; /* if_[io]bytes */
116 uint64_t errors; /* if_[io]errors */
117 uint64_t iqdrops; /* if_iqdrops */
118 uint64_t mcasts; /* if_[io]mcasts */
119 #ifdef PTNETMAP_STATS
122 #endif /* PTNETMAP_STATS */
126 struct ptnet_softc *sc;
127 struct resource *irq;
130 struct nm_csb_atok *atok;
131 struct nm_csb_ktoa *ktoa;
134 struct buf_ring *bufring; /* for TX queues */
135 struct ptnet_queue_stats stats;
136 #ifdef PTNETMAP_STATS
137 struct ptnet_queue_stats last_stats;
138 #endif /* PTNETMAP_STATS */
139 struct taskqueue *taskq;
144 #define PTNET_Q_LOCK(_pq) mtx_lock(&(_pq)->lock)
145 #define PTNET_Q_TRYLOCK(_pq) mtx_trylock(&(_pq)->lock)
146 #define PTNET_Q_UNLOCK(_pq) mtx_unlock(&(_pq)->lock)
151 struct ifmedia media;
154 char hwaddr[ETHER_ADDR_LEN];
156 /* Mirror of PTFEAT register. */
158 unsigned int vnet_hdr_len;
160 /* PCI BARs support. */
161 struct resource *iomem;
162 struct resource *msix_mem;
164 unsigned int num_rings;
165 unsigned int num_tx_rings;
166 struct ptnet_queue *queues;
167 struct ptnet_queue *rxqueues;
168 struct nm_csb_atok *csb_gh;
169 struct nm_csb_ktoa *csb_hg;
171 unsigned int min_tx_space;
173 struct netmap_pt_guest_adapter *ptna;
176 #ifdef PTNETMAP_STATS
177 struct timeval last_ts;
178 #endif /* PTNETMAP_STATS */
181 #define PTNET_CORE_LOCK(_sc) mtx_lock(&(_sc)->lock)
182 #define PTNET_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->lock)
184 static int ptnet_probe(device_t);
185 static int ptnet_attach(device_t);
186 static int ptnet_detach(device_t);
187 static int ptnet_suspend(device_t);
188 static int ptnet_resume(device_t);
189 static int ptnet_shutdown(device_t);
191 static void ptnet_init(void *opaque);
192 static int ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data);
193 static int ptnet_init_locked(struct ptnet_softc *sc);
194 static int ptnet_stop(struct ptnet_softc *sc);
195 static int ptnet_transmit(if_t ifp, struct mbuf *m);
196 static int ptnet_drain_transmit_queue(struct ptnet_queue *pq,
199 static void ptnet_qflush(if_t ifp);
200 static void ptnet_tx_task(void *context, int pending);
202 static int ptnet_media_change(if_t ifp);
203 static void ptnet_media_status(if_t ifp, struct ifmediareq *ifmr);
204 #ifdef PTNETMAP_STATS
205 static void ptnet_tick(void *opaque);
208 static int ptnet_irqs_init(struct ptnet_softc *sc);
209 static void ptnet_irqs_fini(struct ptnet_softc *sc);
211 static uint32_t ptnet_nm_ptctl(struct ptnet_softc *sc, uint32_t cmd);
212 static int ptnet_nm_config(struct netmap_adapter *na,
213 struct nm_config_info *info);
214 static void ptnet_update_vnet_hdr(struct ptnet_softc *sc);
215 static int ptnet_nm_register(struct netmap_adapter *na, int onoff);
216 static int ptnet_nm_txsync(struct netmap_kring *kring, int flags);
217 static int ptnet_nm_rxsync(struct netmap_kring *kring, int flags);
218 static void ptnet_nm_intr(struct netmap_adapter *na, int onoff);
220 static void ptnet_tx_intr(void *opaque);
221 static void ptnet_rx_intr(void *opaque);
223 static unsigned ptnet_rx_discard(struct netmap_kring *kring,
225 static int ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget,
227 static void ptnet_rx_task(void *context, int pending);
229 #ifdef DEVICE_POLLING
230 static poll_handler_t ptnet_poll;
233 static device_method_t ptnet_methods[] = {
234 DEVMETHOD(device_probe, ptnet_probe),
235 DEVMETHOD(device_attach, ptnet_attach),
236 DEVMETHOD(device_detach, ptnet_detach),
237 DEVMETHOD(device_suspend, ptnet_suspend),
238 DEVMETHOD(device_resume, ptnet_resume),
239 DEVMETHOD(device_shutdown, ptnet_shutdown),
243 static driver_t ptnet_driver = {
246 sizeof(struct ptnet_softc)
249 /* We use (SI_ORDER_MIDDLE+2) here, see DEV_MODULE_ORDERED() invocation. */
250 static devclass_t ptnet_devclass;
251 DRIVER_MODULE_ORDERED(ptnet, pci, ptnet_driver, ptnet_devclass,
252 NULL, NULL, SI_ORDER_MIDDLE + 2);
255 ptnet_probe(device_t dev)
257 if (pci_get_vendor(dev) != PTNETMAP_PCI_VENDOR_ID ||
258 pci_get_device(dev) != PTNETMAP_PCI_NETIF_ID) {
262 device_set_desc(dev, "ptnet network adapter");
264 return (BUS_PROBE_DEFAULT);
267 static inline void ptnet_kick(struct ptnet_queue *pq)
269 #ifdef PTNETMAP_STATS
271 #endif /* PTNETMAP_STATS */
272 bus_write_4(pq->sc->iomem, pq->kick, 0);
275 #define PTNET_BUF_RING_SIZE 4096
276 #define PTNET_RX_BUDGET 512
277 #define PTNET_RX_BATCH 1
278 #define PTNET_TX_BUDGET 512
279 #define PTNET_TX_BATCH 64
280 #define PTNET_HDR_SIZE sizeof(struct virtio_net_hdr_mrg_rxbuf)
281 #define PTNET_MAX_PKT_SIZE 65536
283 #define PTNET_CSUM_OFFLOAD (CSUM_TCP | CSUM_UDP)
284 #define PTNET_CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6 | CSUM_UDP_IPV6)
285 #define PTNET_ALL_OFFLOAD (CSUM_TSO | PTNET_CSUM_OFFLOAD |\
286 PTNET_CSUM_OFFLOAD_IPV6)
289 ptnet_attach(device_t dev)
291 uint32_t ptfeatures = 0;
292 unsigned int num_rx_rings, num_tx_rings;
293 struct netmap_adapter na_arg;
294 unsigned int nifp_offset;
295 struct ptnet_softc *sc;
301 sc = device_get_softc(dev);
304 /* Setup PCI resources. */
305 pci_enable_busmaster(dev);
307 rid = PCIR_BAR(PTNETMAP_IO_PCI_BAR);
308 sc->iomem = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
310 if (sc->iomem == NULL) {
311 device_printf(dev, "Failed to map I/O BAR\n");
315 /* Negotiate features with the hypervisor. */
316 if (ptnet_vnet_hdr) {
317 ptfeatures |= PTNETMAP_F_VNET_HDR;
319 bus_write_4(sc->iomem, PTNET_IO_PTFEAT, ptfeatures); /* wanted */
320 ptfeatures = bus_read_4(sc->iomem, PTNET_IO_PTFEAT); /* acked */
321 sc->ptfeatures = ptfeatures;
323 num_tx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS);
324 num_rx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS);
325 sc->num_rings = num_tx_rings + num_rx_rings;
326 sc->num_tx_rings = num_tx_rings;
328 if (sc->num_rings * sizeof(struct nm_csb_atok) > PAGE_SIZE) {
329 device_printf(dev, "CSB cannot handle that many rings (%u)\n",
335 /* Allocate CSB and carry out CSB allocation protocol. */
336 sc->csb_gh = contigmalloc(2*PAGE_SIZE, M_DEVBUF, M_NOWAIT | M_ZERO,
337 (size_t)0, -1UL, PAGE_SIZE, 0);
338 if (sc->csb_gh == NULL) {
339 device_printf(dev, "Failed to allocate CSB\n");
343 sc->csb_hg = (struct nm_csb_ktoa *)(((char *)sc->csb_gh) + PAGE_SIZE);
347 * We use uint64_t rather than vm_paddr_t since we
348 * need 64 bit addresses even on 32 bit platforms.
350 uint64_t paddr = vtophys(sc->csb_gh);
352 /* CSB allocation protocol: write to BAH first, then
353 * to BAL (for both GH and HG sections). */
354 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAH,
355 (paddr >> 32) & 0xffffffff);
356 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAL,
358 paddr = vtophys(sc->csb_hg);
359 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAH,
360 (paddr >> 32) & 0xffffffff);
361 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAL,
365 /* Allocate and initialize per-queue data structures. */
366 sc->queues = malloc(sizeof(struct ptnet_queue) * sc->num_rings,
367 M_DEVBUF, M_NOWAIT | M_ZERO);
368 if (sc->queues == NULL) {
372 sc->rxqueues = sc->queues + num_tx_rings;
374 for (i = 0; i < sc->num_rings; i++) {
375 struct ptnet_queue *pq = sc->queues + i;
379 pq->kick = PTNET_IO_KICK_BASE + 4 * i;
380 pq->atok = sc->csb_gh + i;
381 pq->ktoa = sc->csb_hg + i;
382 snprintf(pq->lock_name, sizeof(pq->lock_name), "%s-%d",
383 device_get_nameunit(dev), i);
384 mtx_init(&pq->lock, pq->lock_name, NULL, MTX_DEF);
385 if (i >= num_tx_rings) {
386 /* RX queue: fix kring_id. */
387 pq->kring_id -= num_tx_rings;
389 /* TX queue: allocate buf_ring. */
390 pq->bufring = buf_ring_alloc(PTNET_BUF_RING_SIZE,
391 M_DEVBUF, M_NOWAIT, &pq->lock);
392 if (pq->bufring == NULL) {
399 sc->min_tx_space = 64; /* Safe initial value. */
401 err = ptnet_irqs_init(sc);
406 /* Setup Ethernet interface. */
407 sc->ifp = ifp = if_alloc(IFT_ETHER);
409 device_printf(dev, "Failed to allocate ifnet\n");
414 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
415 ifp->if_baudrate = IF_Gbps(10);
417 ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX;
418 ifp->if_init = ptnet_init;
419 ifp->if_ioctl = ptnet_ioctl;
420 #if __FreeBSD_version >= 1100000
421 ifp->if_get_counter = ptnet_get_counter;
423 ifp->if_transmit = ptnet_transmit;
424 ifp->if_qflush = ptnet_qflush;
426 ifmedia_init(&sc->media, IFM_IMASK, ptnet_media_change,
428 ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX, 0, NULL);
429 ifmedia_set(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX);
431 macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_HI);
432 sc->hwaddr[0] = (macreg >> 8) & 0xff;
433 sc->hwaddr[1] = macreg & 0xff;
434 macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_LO);
435 sc->hwaddr[2] = (macreg >> 24) & 0xff;
436 sc->hwaddr[3] = (macreg >> 16) & 0xff;
437 sc->hwaddr[4] = (macreg >> 8) & 0xff;
438 sc->hwaddr[5] = macreg & 0xff;
440 ether_ifattach(ifp, sc->hwaddr);
442 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
443 ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU;
445 if (sc->ptfeatures & PTNETMAP_F_VNET_HDR) {
446 /* Similarly to what the vtnet driver does, we can emulate
447 * VLAN offloadings by inserting and removing the 802.1Q
448 * header during transmit and receive. We are then able
449 * to do checksum offloading of VLAN frames. */
450 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6
452 | IFCAP_TSO | IFCAP_LRO
454 | IFCAP_VLAN_HWTAGGING;
457 ifp->if_capenable = ifp->if_capabilities;
458 #ifdef DEVICE_POLLING
459 /* Don't enable polling by default. */
460 ifp->if_capabilities |= IFCAP_POLLING;
462 snprintf(sc->lock_name, sizeof(sc->lock_name),
463 "%s", device_get_nameunit(dev));
464 mtx_init(&sc->lock, sc->lock_name, "ptnet core lock", MTX_DEF);
465 callout_init_mtx(&sc->tick, &sc->lock, 0);
467 /* Prepare a netmap_adapter struct instance to do netmap_attach(). */
468 nifp_offset = bus_read_4(sc->iomem, PTNET_IO_NIFP_OFS);
469 memset(&na_arg, 0, sizeof(na_arg));
471 na_arg.num_tx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS);
472 na_arg.num_rx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS);
473 na_arg.num_tx_rings = num_tx_rings;
474 na_arg.num_rx_rings = num_rx_rings;
475 na_arg.nm_config = ptnet_nm_config;
476 na_arg.nm_krings_create = ptnet_nm_krings_create;
477 na_arg.nm_krings_delete = ptnet_nm_krings_delete;
478 na_arg.nm_dtor = ptnet_nm_dtor;
479 na_arg.nm_intr = ptnet_nm_intr;
480 na_arg.nm_register = ptnet_nm_register;
481 na_arg.nm_txsync = ptnet_nm_txsync;
482 na_arg.nm_rxsync = ptnet_nm_rxsync;
484 netmap_pt_guest_attach(&na_arg, nifp_offset,
485 bus_read_4(sc->iomem, PTNET_IO_HOSTMEMID));
487 /* Now a netmap adapter for this ifp has been allocated, and it
488 * can be accessed through NA(ifp). We also have to initialize the CSB
490 sc->ptna = (struct netmap_pt_guest_adapter *)NA(ifp);
492 /* If virtio-net header was negotiated, set the virt_hdr_len field in
493 * the netmap adapter, to inform users that this netmap adapter requires
494 * the application to deal with the headers. */
495 ptnet_update_vnet_hdr(sc);
497 device_printf(dev, "%s() completed\n", __func__);
506 /* Stop host sync-kloop if it was running. */
508 ptnet_device_shutdown(struct ptnet_softc *sc)
510 ptnet_nm_ptctl(sc, PTNETMAP_PTCTL_DELETE);
511 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAH, 0);
512 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAL, 0);
513 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAH, 0);
514 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAL, 0);
518 ptnet_detach(device_t dev)
520 struct ptnet_softc *sc = device_get_softc(dev);
523 ptnet_device_shutdown(sc);
525 #ifdef DEVICE_POLLING
526 if (sc->ifp->if_capenable & IFCAP_POLLING) {
527 ether_poll_deregister(sc->ifp);
530 callout_drain(&sc->tick);
533 /* Drain taskqueues before calling if_detach. */
534 for (i = 0; i < sc->num_rings; i++) {
535 struct ptnet_queue *pq = sc->queues + i;
538 taskqueue_drain(pq->taskq, &pq->task);
544 ether_ifdetach(sc->ifp);
546 /* Uninitialize netmap adapters for this device. */
547 netmap_detach(sc->ifp);
549 ifmedia_removeall(&sc->media);
557 contigfree(sc->csb_gh, 2*PAGE_SIZE, M_DEVBUF);
563 for (i = 0; i < sc->num_rings; i++) {
564 struct ptnet_queue *pq = sc->queues + i;
566 if (mtx_initialized(&pq->lock)) {
567 mtx_destroy(&pq->lock);
569 if (pq->bufring != NULL) {
570 buf_ring_free(pq->bufring, M_DEVBUF);
573 free(sc->queues, M_DEVBUF);
578 bus_release_resource(dev, SYS_RES_IOPORT,
579 PCIR_BAR(PTNETMAP_IO_PCI_BAR), sc->iomem);
583 mtx_destroy(&sc->lock);
585 device_printf(dev, "%s() completed\n", __func__);
591 ptnet_suspend(device_t dev)
593 struct ptnet_softc *sc = device_get_softc(dev);
601 ptnet_resume(device_t dev)
603 struct ptnet_softc *sc = device_get_softc(dev);
611 ptnet_shutdown(device_t dev)
613 struct ptnet_softc *sc = device_get_softc(dev);
615 ptnet_device_shutdown(sc);
621 ptnet_irqs_init(struct ptnet_softc *sc)
623 int rid = PCIR_BAR(PTNETMAP_MSIX_PCI_BAR);
624 int nvecs = sc->num_rings;
625 device_t dev = sc->dev;
630 if (pci_find_cap(dev, PCIY_MSIX, NULL) != 0) {
631 device_printf(dev, "Could not find MSI-X capability\n");
635 sc->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
637 if (sc->msix_mem == NULL) {
638 device_printf(dev, "Failed to allocate MSIX PCI BAR\n");
642 if (pci_msix_count(dev) < nvecs) {
643 device_printf(dev, "Not enough MSI-X vectors\n");
647 err = pci_alloc_msix(dev, &nvecs);
649 device_printf(dev, "Failed to allocate MSI-X vectors\n");
653 for (i = 0; i < nvecs; i++) {
654 struct ptnet_queue *pq = sc->queues + i;
657 pq->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
659 if (pq->irq == NULL) {
660 device_printf(dev, "Failed to allocate interrupt "
661 "for queue #%d\n", i);
667 cpu_cur = CPU_FIRST();
668 for (i = 0; i < nvecs; i++) {
669 struct ptnet_queue *pq = sc->queues + i;
670 void (*handler)(void *) = ptnet_tx_intr;
672 if (i >= sc->num_tx_rings) {
673 handler = ptnet_rx_intr;
675 err = bus_setup_intr(dev, pq->irq, INTR_TYPE_NET | INTR_MPSAFE,
676 NULL /* intr_filter */, handler,
679 device_printf(dev, "Failed to register intr handler "
680 "for queue #%d\n", i);
684 bus_describe_intr(dev, pq->irq, pq->cookie, "q%d", i);
686 bus_bind_intr(sc->dev, pq->irq, cpu_cur);
688 cpu_cur = CPU_NEXT(cpu_cur);
691 device_printf(dev, "Allocated %d MSI-X vectors\n", nvecs);
693 cpu_cur = CPU_FIRST();
694 for (i = 0; i < nvecs; i++) {
695 struct ptnet_queue *pq = sc->queues + i;
696 static void (*handler)(void *context, int pending);
698 handler = (i < sc->num_tx_rings) ? ptnet_tx_task : ptnet_rx_task;
700 TASK_INIT(&pq->task, 0, handler, pq);
701 pq->taskq = taskqueue_create_fast("ptnet_queue", M_NOWAIT,
702 taskqueue_thread_enqueue, &pq->taskq);
703 taskqueue_start_threads(&pq->taskq, 1, PI_NET, "%s-pq-%d",
704 device_get_nameunit(sc->dev), cpu_cur);
705 cpu_cur = CPU_NEXT(cpu_cur);
715 ptnet_irqs_fini(struct ptnet_softc *sc)
717 device_t dev = sc->dev;
720 for (i = 0; i < sc->num_rings; i++) {
721 struct ptnet_queue *pq = sc->queues + i;
724 taskqueue_free(pq->taskq);
729 bus_teardown_intr(dev, pq->irq, pq->cookie);
734 bus_release_resource(dev, SYS_RES_IRQ, i + 1, pq->irq);
740 pci_release_msi(dev);
742 bus_release_resource(dev, SYS_RES_MEMORY,
743 PCIR_BAR(PTNETMAP_MSIX_PCI_BAR),
750 ptnet_init(void *opaque)
752 struct ptnet_softc *sc = opaque;
755 ptnet_init_locked(sc);
756 PTNET_CORE_UNLOCK(sc);
760 ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data)
762 struct ptnet_softc *sc = if_getsoftc(ifp);
763 device_t dev = sc->dev;
764 struct ifreq *ifr = (struct ifreq *)data;
765 int mask __unused, err = 0;
769 device_printf(dev, "SIOCSIFFLAGS %x\n", ifp->if_flags);
771 if (ifp->if_flags & IFF_UP) {
772 /* Network stack wants the iff to be up. */
773 err = ptnet_init_locked(sc);
775 /* Network stack wants the iff to be down. */
776 err = ptnet_stop(sc);
778 /* We don't need to do nothing to support IFF_PROMISC,
779 * since that is managed by the backend port. */
780 PTNET_CORE_UNLOCK(sc);
784 device_printf(dev, "SIOCSIFCAP %x %x\n",
785 ifr->ifr_reqcap, ifp->if_capenable);
786 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
787 #ifdef DEVICE_POLLING
788 if (mask & IFCAP_POLLING) {
789 struct ptnet_queue *pq;
792 if (ifr->ifr_reqcap & IFCAP_POLLING) {
793 err = ether_poll_register(ptnet_poll, ifp);
797 /* Stop queues and sync with taskqueues. */
798 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
799 for (i = 0; i < sc->num_rings; i++) {
800 pq = sc-> queues + i;
801 /* Make sure the worker sees the
802 * IFF_DRV_RUNNING down. */
804 pq->atok->appl_need_kick = 0;
806 /* Wait for rescheduling to finish. */
808 taskqueue_drain(pq->taskq,
812 ifp->if_drv_flags |= IFF_DRV_RUNNING;
814 err = ether_poll_deregister(ifp);
815 for (i = 0; i < sc->num_rings; i++) {
816 pq = sc-> queues + i;
818 pq->atok->appl_need_kick = 1;
823 #endif /* DEVICE_POLLING */
824 ifp->if_capenable = ifr->ifr_reqcap;
828 /* We support any reasonable MTU. */
829 if (ifr->ifr_mtu < ETHERMIN ||
830 ifr->ifr_mtu > PTNET_MAX_PKT_SIZE) {
834 ifp->if_mtu = ifr->ifr_mtu;
835 PTNET_CORE_UNLOCK(sc);
841 err = ifmedia_ioctl(ifp, ifr, &sc->media, cmd);
845 err = ether_ioctl(ifp, cmd, data);
853 ptnet_init_locked(struct ptnet_softc *sc)
856 struct netmap_adapter *na_dr = &sc->ptna->dr.up;
857 struct netmap_adapter *na_nm = &sc->ptna->hwup.up;
858 unsigned int nm_buf_size;
861 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
862 return 0; /* nothing to do */
865 device_printf(sc->dev, "%s\n", __func__);
867 /* Translate offload capabilities according to if_capenable. */
868 ifp->if_hwassist = 0;
869 if (ifp->if_capenable & IFCAP_TXCSUM)
870 ifp->if_hwassist |= PTNET_CSUM_OFFLOAD;
871 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
872 ifp->if_hwassist |= PTNET_CSUM_OFFLOAD_IPV6;
873 if (ifp->if_capenable & IFCAP_TSO4)
874 ifp->if_hwassist |= CSUM_IP_TSO;
875 if (ifp->if_capenable & IFCAP_TSO6)
876 ifp->if_hwassist |= CSUM_IP6_TSO;
879 * Prepare the interface for netmap mode access.
881 netmap_update_config(na_dr);
883 ret = netmap_mem_finalize(na_dr->nm_mem, na_dr);
885 device_printf(sc->dev, "netmap_mem_finalize() failed\n");
889 if (sc->ptna->backend_users == 0) {
890 ret = ptnet_nm_krings_create(na_nm);
892 device_printf(sc->dev, "ptnet_nm_krings_create() "
894 goto err_mem_finalize;
897 ret = netmap_mem_rings_create(na_dr);
899 device_printf(sc->dev, "netmap_mem_rings_create() "
901 goto err_rings_create;
904 ret = netmap_mem_get_lut(na_dr->nm_mem, &na_dr->na_lut);
906 device_printf(sc->dev, "netmap_mem_get_lut() "
912 ret = ptnet_nm_register(na_dr, 1 /* on */);
917 nm_buf_size = NETMAP_BUF_SIZE(na_dr);
919 KASSERT(nm_buf_size > 0, ("Invalid netmap buffer size"));
920 sc->min_tx_space = PTNET_MAX_PKT_SIZE / nm_buf_size + 2;
921 device_printf(sc->dev, "%s: min_tx_space = %u\n", __func__,
923 #ifdef PTNETMAP_STATS
924 callout_reset(&sc->tick, hz, ptnet_tick, sc);
927 ifp->if_drv_flags |= IFF_DRV_RUNNING;
932 memset(&na_dr->na_lut, 0, sizeof(na_dr->na_lut));
934 netmap_mem_rings_delete(na_dr);
936 ptnet_nm_krings_delete(na_nm);
938 netmap_mem_deref(na_dr->nm_mem, na_dr);
943 /* To be called under core lock. */
945 ptnet_stop(struct ptnet_softc *sc)
948 struct netmap_adapter *na_dr = &sc->ptna->dr.up;
949 struct netmap_adapter *na_nm = &sc->ptna->hwup.up;
952 device_printf(sc->dev, "%s\n", __func__);
954 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
955 return 0; /* nothing to do */
958 /* Clear the driver-ready flag, and synchronize with all the queues,
959 * so that after this loop we are sure nobody is working anymore with
960 * the device. This scheme is taken from the vtnet driver. */
961 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
962 callout_stop(&sc->tick);
963 for (i = 0; i < sc->num_rings; i++) {
964 PTNET_Q_LOCK(sc->queues + i);
965 PTNET_Q_UNLOCK(sc->queues + i);
968 ptnet_nm_register(na_dr, 0 /* off */);
970 if (sc->ptna->backend_users == 0) {
971 netmap_mem_rings_delete(na_dr);
972 ptnet_nm_krings_delete(na_nm);
974 netmap_mem_deref(na_dr->nm_mem, na_dr);
980 ptnet_qflush(if_t ifp)
982 struct ptnet_softc *sc = if_getsoftc(ifp);
985 /* Flush all the bufrings and do the interface flush. */
986 for (i = 0; i < sc->num_rings; i++) {
987 struct ptnet_queue *pq = sc->queues + i;
992 while ((m = buf_ring_dequeue_sc(pq->bufring))) {
1003 ptnet_media_change(if_t ifp)
1005 struct ptnet_softc *sc = if_getsoftc(ifp);
1006 struct ifmedia *ifm = &sc->media;
1008 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
1015 #if __FreeBSD_version >= 1100000
1017 ptnet_get_counter(if_t ifp, ift_counter cnt)
1019 struct ptnet_softc *sc = if_getsoftc(ifp);
1020 struct ptnet_queue_stats stats[2];
1023 /* Accumulate statistics over the queues. */
1024 memset(stats, 0, sizeof(stats));
1025 for (i = 0; i < sc->num_rings; i++) {
1026 struct ptnet_queue *pq = sc->queues + i;
1027 int idx = (i < sc->num_tx_rings) ? 0 : 1;
1029 stats[idx].packets += pq->stats.packets;
1030 stats[idx].bytes += pq->stats.bytes;
1031 stats[idx].errors += pq->stats.errors;
1032 stats[idx].iqdrops += pq->stats.iqdrops;
1033 stats[idx].mcasts += pq->stats.mcasts;
1037 case IFCOUNTER_IPACKETS:
1038 return (stats[1].packets);
1039 case IFCOUNTER_IQDROPS:
1040 return (stats[1].iqdrops);
1041 case IFCOUNTER_IERRORS:
1042 return (stats[1].errors);
1043 case IFCOUNTER_OPACKETS:
1044 return (stats[0].packets);
1045 case IFCOUNTER_OBYTES:
1046 return (stats[0].bytes);
1047 case IFCOUNTER_OMCASTS:
1048 return (stats[0].mcasts);
1050 return (if_get_counter_default(ifp, cnt));
1056 #ifdef PTNETMAP_STATS
1057 /* Called under core lock. */
1059 ptnet_tick(void *opaque)
1061 struct ptnet_softc *sc = opaque;
1064 for (i = 0; i < sc->num_rings; i++) {
1065 struct ptnet_queue *pq = sc->queues + i;
1066 struct ptnet_queue_stats cur = pq->stats;
1071 delta = now.tv_usec - sc->last_ts.tv_usec +
1072 (now.tv_sec - sc->last_ts.tv_sec) * 1000000;
1073 delta /= 1000; /* in milliseconds */
1078 device_printf(sc->dev, "#%d[%u ms]:pkts %lu, kicks %lu, "
1079 "intr %lu\n", i, delta,
1080 (cur.packets - pq->last_stats.packets),
1081 (cur.kicks - pq->last_stats.kicks),
1082 (cur.intrs - pq->last_stats.intrs));
1083 pq->last_stats = cur;
1085 microtime(&sc->last_ts);
1086 callout_schedule(&sc->tick, hz);
1088 #endif /* PTNETMAP_STATS */
1091 ptnet_media_status(if_t ifp, struct ifmediareq *ifmr)
1093 /* We are always active, as the backend netmap port is
1094 * always open in netmap mode. */
1095 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
1096 ifmr->ifm_active = IFM_ETHER | IFM_10G_T | IFM_FDX;
1100 ptnet_nm_ptctl(struct ptnet_softc *sc, uint32_t cmd)
1103 * Write a command and read back error status,
1104 * with zero meaning success.
1106 bus_write_4(sc->iomem, PTNET_IO_PTCTL, cmd);
1107 return bus_read_4(sc->iomem, PTNET_IO_PTCTL);
1111 ptnet_nm_config(struct netmap_adapter *na, struct nm_config_info *info)
1113 struct ptnet_softc *sc = if_getsoftc(na->ifp);
1115 info->num_tx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS);
1116 info->num_rx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS);
1117 info->num_tx_descs = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS);
1118 info->num_rx_descs = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS);
1119 info->rx_buf_maxsize = NETMAP_BUF_SIZE(na);
1121 device_printf(sc->dev, "txr %u, rxr %u, txd %u, rxd %u, rxbufsz %u\n",
1122 info->num_tx_rings, info->num_rx_rings,
1123 info->num_tx_descs, info->num_rx_descs,
1124 info->rx_buf_maxsize);
1130 ptnet_sync_from_csb(struct ptnet_softc *sc, struct netmap_adapter *na)
1134 /* Sync krings from the host, reading from
1136 for (i = 0; i < sc->num_rings; i++) {
1137 struct nm_csb_atok *atok = sc->queues[i].atok;
1138 struct nm_csb_ktoa *ktoa = sc->queues[i].ktoa;
1139 struct netmap_kring *kring;
1141 if (i < na->num_tx_rings) {
1142 kring = na->tx_rings[i];
1144 kring = na->rx_rings[i - na->num_tx_rings];
1146 kring->rhead = kring->ring->head = atok->head;
1147 kring->rcur = kring->ring->cur = atok->cur;
1148 kring->nr_hwcur = ktoa->hwcur;
1149 kring->nr_hwtail = kring->rtail =
1150 kring->ring->tail = ktoa->hwtail;
1152 nm_prdis("%d,%d: csb {hc %u h %u c %u ht %u}", t, i,
1153 ktoa->hwcur, atok->head, atok->cur,
1155 nm_prdis("%d,%d: kring {hc %u rh %u rc %u h %u c %u ht %u rt %u t %u}",
1156 t, i, kring->nr_hwcur, kring->rhead, kring->rcur,
1157 kring->ring->head, kring->ring->cur, kring->nr_hwtail,
1158 kring->rtail, kring->ring->tail);
1163 ptnet_update_vnet_hdr(struct ptnet_softc *sc)
1165 unsigned int wanted_hdr_len = ptnet_vnet_hdr ? PTNET_HDR_SIZE : 0;
1167 bus_write_4(sc->iomem, PTNET_IO_VNET_HDR_LEN, wanted_hdr_len);
1168 sc->vnet_hdr_len = bus_read_4(sc->iomem, PTNET_IO_VNET_HDR_LEN);
1169 sc->ptna->hwup.up.virt_hdr_len = sc->vnet_hdr_len;
1173 ptnet_nm_register(struct netmap_adapter *na, int onoff)
1175 /* device-specific */
1177 struct ptnet_softc *sc = if_getsoftc(ifp);
1178 int native = (na == &sc->ptna->hwup.up);
1179 struct ptnet_queue *pq;
1184 sc->ptna->backend_users--;
1187 /* If this is the last netmap client, guest interrupt enable flags may
1188 * be in arbitrary state. Since these flags are going to be used also
1189 * by the netdevice driver, we have to make sure to start with
1190 * notifications enabled. Also, schedule NAPI to flush pending packets
1191 * in the RX rings, since we will not receive further interrupts
1192 * until these will be processed. */
1193 if (native && !onoff && na->active_fds == 0) {
1194 nm_prinf("Exit netmap mode, re-enable interrupts");
1195 for (i = 0; i < sc->num_rings; i++) {
1196 pq = sc->queues + i;
1197 pq->atok->appl_need_kick = 1;
1202 if (sc->ptna->backend_users == 0) {
1203 /* Initialize notification enable fields in the CSB. */
1204 for (i = 0; i < sc->num_rings; i++) {
1205 pq = sc->queues + i;
1206 pq->ktoa->kern_need_kick = 1;
1207 pq->atok->appl_need_kick =
1208 (!(ifp->if_capenable & IFCAP_POLLING)
1209 && i >= sc->num_tx_rings);
1212 /* Set the virtio-net header length. */
1213 ptnet_update_vnet_hdr(sc);
1215 /* Make sure the host adapter passed through is ready
1216 * for txsync/rxsync. */
1217 ret = ptnet_nm_ptctl(sc, PTNETMAP_PTCTL_CREATE);
1222 /* Align the guest krings and rings to the state stored
1224 ptnet_sync_from_csb(sc, na);
1227 /* If not native, don't call nm_set_native_flags, since we don't want
1228 * to replace if_transmit method, nor set NAF_NETMAP_ON */
1230 netmap_krings_mode_commit(na, onoff);
1231 nm_set_native_flags(na);
1236 nm_clear_native_flags(na);
1237 netmap_krings_mode_commit(na, onoff);
1240 if (sc->ptna->backend_users == 0) {
1241 ret = ptnet_nm_ptctl(sc, PTNETMAP_PTCTL_DELETE);
1246 sc->ptna->backend_users++;
1253 ptnet_nm_txsync(struct netmap_kring *kring, int flags)
1255 struct ptnet_softc *sc = if_getsoftc(kring->na->ifp);
1256 struct ptnet_queue *pq = sc->queues + kring->ring_id;
1259 notify = netmap_pt_guest_txsync(pq->atok, pq->ktoa, kring, flags);
1268 ptnet_nm_rxsync(struct netmap_kring *kring, int flags)
1270 struct ptnet_softc *sc = if_getsoftc(kring->na->ifp);
1271 struct ptnet_queue *pq = sc->rxqueues + kring->ring_id;
1274 notify = netmap_pt_guest_rxsync(pq->atok, pq->ktoa, kring, flags);
1283 ptnet_nm_intr(struct netmap_adapter *na, int onoff)
1285 struct ptnet_softc *sc = if_getsoftc(na->ifp);
1288 for (i = 0; i < sc->num_rings; i++) {
1289 struct ptnet_queue *pq = sc->queues + i;
1290 pq->atok->appl_need_kick = onoff;
1295 ptnet_tx_intr(void *opaque)
1297 struct ptnet_queue *pq = opaque;
1298 struct ptnet_softc *sc = pq->sc;
1300 DBG(device_printf(sc->dev, "Tx interrupt #%d\n", pq->kring_id));
1301 #ifdef PTNETMAP_STATS
1303 #endif /* PTNETMAP_STATS */
1305 if (netmap_tx_irq(sc->ifp, pq->kring_id) != NM_IRQ_PASS) {
1309 /* Schedule the tasqueue to flush process transmissions requests.
1310 * However, vtnet, if_em and if_igb just call ptnet_transmit() here,
1311 * at least when using MSI-X interrupts. The if_em driver, instead
1312 * schedule taskqueue when using legacy interrupts. */
1313 taskqueue_enqueue(pq->taskq, &pq->task);
1317 ptnet_rx_intr(void *opaque)
1319 struct ptnet_queue *pq = opaque;
1320 struct ptnet_softc *sc = pq->sc;
1321 unsigned int unused;
1323 DBG(device_printf(sc->dev, "Rx interrupt #%d\n", pq->kring_id));
1324 #ifdef PTNETMAP_STATS
1326 #endif /* PTNETMAP_STATS */
1328 if (netmap_rx_irq(sc->ifp, pq->kring_id, &unused) != NM_IRQ_PASS) {
1332 /* Like vtnet, if_igb and if_em drivers when using MSI-X interrupts,
1333 * receive-side processing is executed directly in the interrupt
1334 * service routine. Alternatively, we may schedule the taskqueue. */
1335 ptnet_rx_eof(pq, PTNET_RX_BUDGET, true);
1339 ptnet_vlan_tag_remove(struct mbuf *m)
1341 struct ether_vlan_header *evh;
1343 evh = mtod(m, struct ether_vlan_header *);
1344 m->m_pkthdr.ether_vtag = ntohs(evh->evl_tag);
1345 m->m_flags |= M_VLANTAG;
1347 /* Strip the 802.1Q header. */
1348 bcopy((char *) evh, (char *) evh + ETHER_VLAN_ENCAP_LEN,
1349 ETHER_HDR_LEN - ETHER_TYPE_LEN);
1350 m_adj(m, ETHER_VLAN_ENCAP_LEN);
1354 ptnet_ring_update(struct ptnet_queue *pq, struct netmap_kring *kring,
1355 unsigned int head, unsigned int sync_flags)
1357 struct netmap_ring *ring = kring->ring;
1358 struct nm_csb_atok *atok = pq->atok;
1359 struct nm_csb_ktoa *ktoa = pq->ktoa;
1361 /* Some packets have been pushed to the netmap ring. We have
1362 * to tell the host to process the new packets, updating cur
1363 * and head in the CSB. */
1364 ring->head = ring->cur = head;
1366 /* Mimic nm_txsync_prologue/nm_rxsync_prologue. */
1367 kring->rcur = kring->rhead = head;
1369 nm_sync_kloop_appl_write(atok, kring->rcur, kring->rhead);
1371 /* Kick the host if needed. */
1372 if (NM_ACCESS_ONCE(ktoa->kern_need_kick)) {
1373 atok->sync_flags = sync_flags;
1378 #define PTNET_TX_NOSPACE(_h, _k, _min) \
1379 ((((_h) < (_k)->rtail) ? 0 : (_k)->nkr_num_slots) + \
1380 (_k)->rtail - (_h)) < (_min)
1382 /* This function may be called by the network stack, or by
1383 * by the taskqueue thread. */
1385 ptnet_drain_transmit_queue(struct ptnet_queue *pq, unsigned int budget,
1388 struct ptnet_softc *sc = pq->sc;
1389 bool have_vnet_hdr = sc->vnet_hdr_len;
1390 struct netmap_adapter *na = &sc->ptna->dr.up;
1392 unsigned int batch_count = 0;
1393 struct nm_csb_atok *atok;
1394 struct nm_csb_ktoa *ktoa;
1395 struct netmap_kring *kring;
1396 struct netmap_ring *ring;
1397 struct netmap_slot *slot;
1398 unsigned int count = 0;
1399 unsigned int minspace;
1407 if (!PTNET_Q_TRYLOCK(pq)) {
1408 /* We failed to acquire the lock, schedule the taskqueue. */
1409 nm_prlim(1, "Deferring TX work");
1411 taskqueue_enqueue(pq->taskq, &pq->task);
1417 if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) {
1419 nm_prlim(1, "Interface is down");
1425 kring = na->tx_rings[pq->kring_id];
1427 lim = kring->nkr_num_slots - 1;
1429 minspace = sc->min_tx_space;
1431 while (count < budget) {
1432 if (PTNET_TX_NOSPACE(head, kring, minspace)) {
1433 /* We ran out of slot, let's see if the host has
1434 * freed up some, by reading hwcur and hwtail from
1436 ptnet_sync_tail(ktoa, kring);
1438 if (PTNET_TX_NOSPACE(head, kring, minspace)) {
1439 /* Still no slots available. Reactivate the
1440 * interrupts so that we can be notified
1441 * when some free slots are made available by
1443 atok->appl_need_kick = 1;
1445 /* Double check. We need a full barrier to
1446 * prevent the store to atok->appl_need_kick
1447 * to be reordered with the load from
1448 * ktoa->hwcur and ktoa->hwtail (store-load
1451 ptnet_sync_tail(ktoa, kring);
1452 if (likely(PTNET_TX_NOSPACE(head, kring,
1457 nm_prlim(1, "Found more slots by doublecheck");
1458 /* More slots were freed before reactivating
1459 * the interrupts. */
1460 atok->appl_need_kick = 0;
1464 mhead = drbr_peek(ifp, pq->bufring);
1469 /* Initialize transmission state variables. */
1470 slot = ring->slot + head;
1471 nmbuf = NMB(na, slot);
1474 /* If needed, prepare the virtio-net header at the beginning
1475 * of the first slot. */
1476 if (have_vnet_hdr) {
1477 struct virtio_net_hdr *vh =
1478 (struct virtio_net_hdr *)nmbuf;
1480 /* For performance, we could replace this memset() with
1481 * two 8-bytes-wide writes. */
1482 memset(nmbuf, 0, PTNET_HDR_SIZE);
1483 if (mhead->m_pkthdr.csum_flags & PTNET_ALL_OFFLOAD) {
1484 mhead = virtio_net_tx_offload(ifp, mhead, false,
1486 if (unlikely(!mhead)) {
1487 /* Packet dropped because errors
1488 * occurred while preparing the vnet
1489 * header. Let's go ahead with the next
1491 pq->stats.errors ++;
1492 drbr_advance(ifp, pq->bufring);
1496 nm_prdis(1, "%s: [csum_flags %lX] vnet hdr: flags %x "
1497 "csum_start %u csum_ofs %u hdr_len = %u "
1498 "gso_size %u gso_type %x", __func__,
1499 mhead->m_pkthdr.csum_flags, vh->flags,
1500 vh->csum_start, vh->csum_offset, vh->hdr_len,
1501 vh->gso_size, vh->gso_type);
1503 nmbuf += PTNET_HDR_SIZE;
1504 nmbuf_bytes += PTNET_HDR_SIZE;
1507 for (mf = mhead; mf; mf = mf->m_next) {
1508 uint8_t *mdata = mf->m_data;
1509 int mlen = mf->m_len;
1512 int copy = NETMAP_BUF_SIZE(na) - nmbuf_bytes;
1517 memcpy(nmbuf, mdata, copy);
1522 nmbuf_bytes += copy;
1528 slot->len = nmbuf_bytes;
1529 slot->flags = NS_MOREFRAG;
1531 head = nm_next(head, lim);
1532 KASSERT(head != ring->tail,
1533 ("Unexpectedly run out of TX space"));
1534 slot = ring->slot + head;
1535 nmbuf = NMB(na, slot);
1540 /* Complete last slot and update head. */
1541 slot->len = nmbuf_bytes;
1543 head = nm_next(head, lim);
1545 /* Consume the packet just processed. */
1546 drbr_advance(ifp, pq->bufring);
1548 /* Copy the packet to listeners. */
1549 ETHER_BPF_MTAP(ifp, mhead);
1551 pq->stats.packets ++;
1552 pq->stats.bytes += mhead->m_pkthdr.len;
1553 if (mhead->m_flags & M_MCAST) {
1554 pq->stats.mcasts ++;
1560 if (++batch_count == PTNET_TX_BATCH) {
1561 ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM);
1567 ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM);
1570 if (count >= budget && may_resched) {
1571 DBG(nm_prlim(1, "out of budget: resched, %d mbufs pending\n",
1572 drbr_inuse(ifp, pq->bufring)));
1573 taskqueue_enqueue(pq->taskq, &pq->task);
1582 ptnet_transmit(if_t ifp, struct mbuf *m)
1584 struct ptnet_softc *sc = if_getsoftc(ifp);
1585 struct ptnet_queue *pq;
1586 unsigned int queue_idx;
1589 DBG(device_printf(sc->dev, "transmit %p\n", m));
1591 /* Insert 802.1Q header if needed. */
1592 if (m->m_flags & M_VLANTAG) {
1593 m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
1597 m->m_flags &= ~M_VLANTAG;
1600 /* Get the flow-id if available. */
1601 queue_idx = (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) ?
1602 m->m_pkthdr.flowid : curcpu;
1604 if (unlikely(queue_idx >= sc->num_tx_rings)) {
1605 queue_idx %= sc->num_tx_rings;
1608 pq = sc->queues + queue_idx;
1610 err = drbr_enqueue(ifp, pq->bufring, m);
1612 /* ENOBUFS when the bufring is full */
1613 nm_prlim(1, "%s: drbr_enqueue() failed %d\n",
1615 pq->stats.errors ++;
1619 if (ifp->if_capenable & IFCAP_POLLING) {
1620 /* If polling is on, the transmit queues will be
1621 * drained by the poller. */
1625 err = ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true);
1627 return (err < 0) ? err : 0;
1631 ptnet_rx_discard(struct netmap_kring *kring, unsigned int head)
1633 struct netmap_ring *ring = kring->ring;
1634 struct netmap_slot *slot = ring->slot + head;
1637 head = nm_next(head, kring->nkr_num_slots - 1);
1638 if (!(slot->flags & NS_MOREFRAG) || head == ring->tail) {
1641 slot = ring->slot + head;
1647 static inline struct mbuf *
1648 ptnet_rx_slot(struct mbuf *mtail, uint8_t *nmbuf, unsigned int nmbuf_len)
1650 uint8_t *mdata = mtod(mtail, uint8_t *) + mtail->m_len;
1655 if (mtail->m_len == MCLBYTES) {
1658 mf = m_getcl(M_NOWAIT, MT_DATA, 0);
1659 if (unlikely(!mf)) {
1665 mdata = mtod(mtail, uint8_t *);
1669 copy = MCLBYTES - mtail->m_len;
1670 if (nmbuf_len < copy) {
1674 memcpy(mdata, nmbuf, copy);
1679 mtail->m_len += copy;
1680 } while (nmbuf_len);
1686 ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, bool may_resched)
1688 struct ptnet_softc *sc = pq->sc;
1689 bool have_vnet_hdr = sc->vnet_hdr_len;
1690 struct nm_csb_atok *atok = pq->atok;
1691 struct nm_csb_ktoa *ktoa = pq->ktoa;
1692 struct netmap_adapter *na = &sc->ptna->dr.up;
1693 struct netmap_kring *kring = na->rx_rings[pq->kring_id];
1694 struct netmap_ring *ring = kring->ring;
1695 unsigned int const lim = kring->nkr_num_slots - 1;
1696 unsigned int batch_count = 0;
1698 unsigned int count = 0;
1703 if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) {
1707 kring->nr_kflags &= ~NKR_PENDINTR;
1710 while (count < budget) {
1711 uint32_t prev_head = head;
1712 struct mbuf *mhead, *mtail;
1713 struct virtio_net_hdr *vh;
1714 struct netmap_slot *slot;
1715 unsigned int nmbuf_len;
1717 int deliver = 1; /* the mbuf to the network stack. */
1719 if (head == ring->tail) {
1720 /* We ran out of slot, let's see if the host has
1721 * added some, by reading hwcur and hwtail from
1723 ptnet_sync_tail(ktoa, kring);
1725 if (head == ring->tail) {
1726 /* Still no slots available. Reactivate
1727 * interrupts as they were disabled by the
1728 * host thread right before issuing the
1729 * last interrupt. */
1730 atok->appl_need_kick = 1;
1732 /* Double check for more completed RX slots.
1733 * We need a full barrier to prevent the store
1734 * to atok->appl_need_kick to be reordered with
1735 * the load from ktoa->hwcur and ktoa->hwtail
1736 * (store-load barrier). */
1738 ptnet_sync_tail(ktoa, kring);
1739 if (likely(head == ring->tail)) {
1742 atok->appl_need_kick = 0;
1746 /* Initialize ring state variables, possibly grabbing the
1747 * virtio-net header. */
1748 slot = ring->slot + head;
1749 nmbuf = NMB(na, slot);
1750 nmbuf_len = slot->len;
1752 vh = (struct virtio_net_hdr *)nmbuf;
1753 if (have_vnet_hdr) {
1754 if (unlikely(nmbuf_len < PTNET_HDR_SIZE)) {
1755 /* There is no good reason why host should
1756 * put the header in multiple netmap slots.
1757 * If this is the case, discard. */
1758 nm_prlim(1, "Fragmented vnet-hdr: dropping");
1759 head = ptnet_rx_discard(kring, head);
1760 pq->stats.iqdrops ++;
1764 nm_prdis(1, "%s: vnet hdr: flags %x csum_start %u "
1765 "csum_ofs %u hdr_len = %u gso_size %u "
1766 "gso_type %x", __func__, vh->flags,
1767 vh->csum_start, vh->csum_offset, vh->hdr_len,
1768 vh->gso_size, vh->gso_type);
1769 nmbuf += PTNET_HDR_SIZE;
1770 nmbuf_len -= PTNET_HDR_SIZE;
1773 /* Allocate the head of a new mbuf chain.
1774 * We use m_getcl() to allocate an mbuf with standard cluster
1775 * size (MCLBYTES). In the future we could use m_getjcl()
1776 * to choose different sizes. */
1777 mhead = mtail = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1778 if (unlikely(mhead == NULL)) {
1779 device_printf(sc->dev, "%s: failed to allocate mbuf "
1780 "head\n", __func__);
1781 pq->stats.errors ++;
1785 /* Initialize the mbuf state variables. */
1786 mhead->m_pkthdr.len = nmbuf_len;
1789 /* Scan all the netmap slots containing the current packet. */
1791 DBG(device_printf(sc->dev, "%s: h %u t %u rcv frag "
1792 "len %u, flags %u\n", __func__,
1793 head, ring->tail, slot->len,
1796 mtail = ptnet_rx_slot(mtail, nmbuf, nmbuf_len);
1797 if (unlikely(!mtail)) {
1798 /* Ouch. We ran out of memory while processing
1799 * a packet. We have to restore the previous
1800 * head position, free the mbuf chain, and
1801 * schedule the taskqueue to give the packet
1802 * another chance. */
1803 device_printf(sc->dev, "%s: failed to allocate"
1804 " mbuf frag, reset head %u --> %u\n",
1805 __func__, head, prev_head);
1808 pq->stats.errors ++;
1810 taskqueue_enqueue(pq->taskq,
1816 /* We have to increment head irrespective of the
1817 * NS_MOREFRAG being set or not. */
1818 head = nm_next(head, lim);
1820 if (!(slot->flags & NS_MOREFRAG)) {
1824 if (unlikely(head == ring->tail)) {
1825 /* The very last slot prepared by the host has
1826 * the NS_MOREFRAG set. Drop it and continue
1827 * the outer cycle (to do the double-check). */
1828 nm_prlim(1, "Incomplete packet: dropping");
1830 pq->stats.iqdrops ++;
1834 slot = ring->slot + head;
1835 nmbuf = NMB(na, slot);
1836 nmbuf_len = slot->len;
1837 mhead->m_pkthdr.len += nmbuf_len;
1840 mhead->m_pkthdr.rcvif = ifp;
1841 mhead->m_pkthdr.csum_flags = 0;
1843 /* Store the queue idx in the packet header. */
1844 mhead->m_pkthdr.flowid = pq->kring_id;
1845 M_HASHTYPE_SET(mhead, M_HASHTYPE_OPAQUE);
1847 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
1848 struct ether_header *eh;
1850 eh = mtod(mhead, struct ether_header *);
1851 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
1852 ptnet_vlan_tag_remove(mhead);
1854 * With the 802.1Q header removed, update the
1855 * checksum starting location accordingly.
1857 if (vh->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
1858 vh->csum_start -= ETHER_VLAN_ENCAP_LEN;
1862 if (unlikely(have_vnet_hdr && virtio_net_rx_csum(mhead, vh))) {
1864 nm_prlim(1, "Csum offload error: dropping");
1865 pq->stats.iqdrops ++;
1871 if (++batch_count >= PTNET_RX_BATCH) {
1872 /* Some packets have been (or will be) pushed to the network
1873 * stack. We need to update the CSB to tell the host about
1874 * the new ring->cur and ring->head (RX buffer refill). */
1875 ptnet_ring_update(pq, kring, head, NAF_FORCE_READ);
1879 if (likely(deliver)) {
1880 pq->stats.packets ++;
1881 pq->stats.bytes += mhead->m_pkthdr.len;
1884 (*ifp->if_input)(ifp, mhead);
1886 /* The ring->head index (and related indices) are
1887 * updated under pq lock by ptnet_ring_update().
1888 * Since we dropped the lock to call if_input(), we
1889 * must reload ring->head and restart processing the
1890 * ring from there. */
1893 if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) {
1894 /* The interface has gone down while we didn't
1895 * have the lock. Stop any processing and exit. */
1902 ptnet_ring_update(pq, kring, head, NAF_FORCE_READ);
1906 if (count >= budget && may_resched) {
1907 /* If we ran out of budget or the double-check found new
1908 * slots to process, schedule the taskqueue. */
1909 DBG(nm_prlim(1, "out of budget: resched h %u t %u\n",
1911 taskqueue_enqueue(pq->taskq, &pq->task);
1920 ptnet_rx_task(void *context, int pending)
1922 struct ptnet_queue *pq = context;
1924 DBG(nm_prlim(1, "%s: pq #%u\n", __func__, pq->kring_id));
1925 ptnet_rx_eof(pq, PTNET_RX_BUDGET, true);
1929 ptnet_tx_task(void *context, int pending)
1931 struct ptnet_queue *pq = context;
1933 DBG(nm_prlim(1, "%s: pq #%u\n", __func__, pq->kring_id));
1934 ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true);
1937 #ifdef DEVICE_POLLING
1938 /* We don't need to handle differently POLL_AND_CHECK_STATUS and
1939 * POLL_ONLY, since we don't have an Interrupt Status Register. */
1941 ptnet_poll(if_t ifp, enum poll_cmd cmd, int budget)
1943 struct ptnet_softc *sc = if_getsoftc(ifp);
1944 unsigned int queue_budget;
1945 unsigned int count = 0;
1946 bool borrow = false;
1949 KASSERT(sc->num_rings > 0, ("Found no queues in while polling ptnet"));
1950 queue_budget = MAX(budget / sc->num_rings, 1);
1951 nm_prlim(1, "Per-queue budget is %d", queue_budget);
1954 unsigned int rcnt = 0;
1956 for (i = 0; i < sc->num_rings; i++) {
1957 struct ptnet_queue *pq = sc->queues + i;
1960 queue_budget = MIN(queue_budget, budget);
1961 if (queue_budget == 0) {
1966 if (i < sc->num_tx_rings) {
1967 rcnt += ptnet_drain_transmit_queue(pq,
1968 queue_budget, false);
1970 rcnt += ptnet_rx_eof(pq, queue_budget,
1976 /* A scan of the queues gave no result, we can
1981 if (rcnt > budget) {
1982 /* This may happen when initial budget < sc->num_rings,
1983 * since one packet budget is given to each queue
1984 * anyway. Just pretend we didn't eat "so much". */
1995 #endif /* DEVICE_POLLING */