2 * Copyright (c) 2016, Vincenzo Maffione
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 /* Driver for ptnet paravirtualized network device. */
31 #include <sys/cdefs.h>
33 #include <sys/types.h>
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/sockio.h>
39 #include <sys/malloc.h>
40 #include <sys/module.h>
41 #include <sys/socket.h>
42 #include <sys/sysctl.h>
44 #include <sys/mutex.h>
45 #include <sys/taskqueue.h>
48 #include <machine/smp.h>
54 #include <net/ethernet.h>
56 #include <net/if_var.h>
57 #include <net/if_arp.h>
58 #include <net/if_dl.h>
59 #include <net/if_types.h>
60 #include <net/if_media.h>
61 #include <net/if_vlan_var.h>
64 #include <netinet/in_systm.h>
65 #include <netinet/in.h>
66 #include <netinet/ip.h>
67 #include <netinet/ip6.h>
68 #include <netinet6/ip6_var.h>
69 #include <netinet/udp.h>
70 #include <netinet/tcp.h>
71 #include <netinet/sctp.h>
73 #include <machine/bus.h>
74 #include <machine/resource.h>
78 #include <dev/pci/pcivar.h>
79 #include <dev/pci/pcireg.h>
82 #include "opt_inet6.h"
84 #include <sys/selinfo.h>
85 #include <net/netmap.h>
86 #include <dev/netmap/netmap_kern.h>
87 #include <net/netmap_virt.h>
88 #include <dev/netmap/netmap_mem2.h>
89 #include <dev/virtio/network/virtio_net.h>
92 #error "INET not defined, cannot support offloadings"
95 #if __FreeBSD_version >= 1100000
96 static uint64_t ptnet_get_counter(if_t, ift_counter);
98 typedef struct ifnet *if_t;
99 #define if_getsoftc(_ifp) (_ifp)->if_softc
102 //#define PTNETMAP_STATS
110 extern int ptnet_vnet_hdr; /* Tunable parameter */
114 struct ptnet_queue_stats {
115 uint64_t packets; /* if_[io]packets */
116 uint64_t bytes; /* if_[io]bytes */
117 uint64_t errors; /* if_[io]errors */
118 uint64_t iqdrops; /* if_iqdrops */
119 uint64_t mcasts; /* if_[io]mcasts */
120 #ifdef PTNETMAP_STATS
123 #endif /* PTNETMAP_STATS */
127 struct ptnet_softc *sc;
128 struct resource *irq;
131 struct nm_csb_atok *atok;
132 struct nm_csb_ktoa *ktoa;
135 struct buf_ring *bufring; /* for TX queues */
136 struct ptnet_queue_stats stats;
137 #ifdef PTNETMAP_STATS
138 struct ptnet_queue_stats last_stats;
139 #endif /* PTNETMAP_STATS */
140 struct taskqueue *taskq;
145 #define PTNET_Q_LOCK(_pq) mtx_lock(&(_pq)->lock)
146 #define PTNET_Q_TRYLOCK(_pq) mtx_trylock(&(_pq)->lock)
147 #define PTNET_Q_UNLOCK(_pq) mtx_unlock(&(_pq)->lock)
152 struct ifmedia media;
155 char hwaddr[ETHER_ADDR_LEN];
157 /* Mirror of PTFEAT register. */
159 unsigned int vnet_hdr_len;
161 /* PCI BARs support. */
162 struct resource *iomem;
163 struct resource *msix_mem;
165 unsigned int num_rings;
166 unsigned int num_tx_rings;
167 struct ptnet_queue *queues;
168 struct ptnet_queue *rxqueues;
169 struct nm_csb_atok *csb_gh;
170 struct nm_csb_ktoa *csb_hg;
172 unsigned int min_tx_space;
174 struct netmap_pt_guest_adapter *ptna;
177 #ifdef PTNETMAP_STATS
178 struct timeval last_ts;
179 #endif /* PTNETMAP_STATS */
182 #define PTNET_CORE_LOCK(_sc) mtx_lock(&(_sc)->lock)
183 #define PTNET_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->lock)
185 static int ptnet_probe(device_t);
186 static int ptnet_attach(device_t);
187 static int ptnet_detach(device_t);
188 static int ptnet_suspend(device_t);
189 static int ptnet_resume(device_t);
190 static int ptnet_shutdown(device_t);
192 static void ptnet_init(void *opaque);
193 static int ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data);
194 static int ptnet_init_locked(struct ptnet_softc *sc);
195 static int ptnet_stop(struct ptnet_softc *sc);
196 static int ptnet_transmit(if_t ifp, struct mbuf *m);
197 static int ptnet_drain_transmit_queue(struct ptnet_queue *pq,
200 static void ptnet_qflush(if_t ifp);
201 static void ptnet_tx_task(void *context, int pending);
203 static int ptnet_media_change(if_t ifp);
204 static void ptnet_media_status(if_t ifp, struct ifmediareq *ifmr);
205 #ifdef PTNETMAP_STATS
206 static void ptnet_tick(void *opaque);
209 static int ptnet_irqs_init(struct ptnet_softc *sc);
210 static void ptnet_irqs_fini(struct ptnet_softc *sc);
212 static uint32_t ptnet_nm_ptctl(struct ptnet_softc *sc, uint32_t cmd);
213 static int ptnet_nm_config(struct netmap_adapter *na,
214 struct nm_config_info *info);
215 static void ptnet_update_vnet_hdr(struct ptnet_softc *sc);
216 static int ptnet_nm_register(struct netmap_adapter *na, int onoff);
217 static int ptnet_nm_txsync(struct netmap_kring *kring, int flags);
218 static int ptnet_nm_rxsync(struct netmap_kring *kring, int flags);
219 static void ptnet_nm_intr(struct netmap_adapter *na, int onoff);
221 static void ptnet_tx_intr(void *opaque);
222 static void ptnet_rx_intr(void *opaque);
224 static unsigned ptnet_rx_discard(struct netmap_kring *kring,
226 static int ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget,
228 static void ptnet_rx_task(void *context, int pending);
230 #ifdef DEVICE_POLLING
231 static poll_handler_t ptnet_poll;
234 static device_method_t ptnet_methods[] = {
235 DEVMETHOD(device_probe, ptnet_probe),
236 DEVMETHOD(device_attach, ptnet_attach),
237 DEVMETHOD(device_detach, ptnet_detach),
238 DEVMETHOD(device_suspend, ptnet_suspend),
239 DEVMETHOD(device_resume, ptnet_resume),
240 DEVMETHOD(device_shutdown, ptnet_shutdown),
244 static driver_t ptnet_driver = {
247 sizeof(struct ptnet_softc)
250 /* We use (SI_ORDER_MIDDLE+2) here, see DEV_MODULE_ORDERED() invocation. */
251 static devclass_t ptnet_devclass;
252 DRIVER_MODULE_ORDERED(ptnet, pci, ptnet_driver, ptnet_devclass,
253 NULL, NULL, SI_ORDER_MIDDLE + 2);
256 ptnet_probe(device_t dev)
258 if (pci_get_vendor(dev) != PTNETMAP_PCI_VENDOR_ID ||
259 pci_get_device(dev) != PTNETMAP_PCI_NETIF_ID) {
263 device_set_desc(dev, "ptnet network adapter");
265 return (BUS_PROBE_DEFAULT);
268 static inline void ptnet_kick(struct ptnet_queue *pq)
270 #ifdef PTNETMAP_STATS
272 #endif /* PTNETMAP_STATS */
273 bus_write_4(pq->sc->iomem, pq->kick, 0);
276 #define PTNET_BUF_RING_SIZE 4096
277 #define PTNET_RX_BUDGET 512
278 #define PTNET_RX_BATCH 1
279 #define PTNET_TX_BUDGET 512
280 #define PTNET_TX_BATCH 64
281 #define PTNET_HDR_SIZE sizeof(struct virtio_net_hdr_mrg_rxbuf)
282 #define PTNET_MAX_PKT_SIZE 65536
284 #define PTNET_CSUM_OFFLOAD (CSUM_TCP | CSUM_UDP | CSUM_SCTP)
285 #define PTNET_CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6 | CSUM_UDP_IPV6 |\
287 #define PTNET_ALL_OFFLOAD (CSUM_TSO | PTNET_CSUM_OFFLOAD |\
288 PTNET_CSUM_OFFLOAD_IPV6)
291 ptnet_attach(device_t dev)
293 uint32_t ptfeatures = 0;
294 unsigned int num_rx_rings, num_tx_rings;
295 struct netmap_adapter na_arg;
296 unsigned int nifp_offset;
297 struct ptnet_softc *sc;
303 sc = device_get_softc(dev);
306 /* Setup PCI resources. */
307 pci_enable_busmaster(dev);
309 rid = PCIR_BAR(PTNETMAP_IO_PCI_BAR);
310 sc->iomem = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
312 if (sc->iomem == NULL) {
313 device_printf(dev, "Failed to map I/O BAR\n");
317 /* Negotiate features with the hypervisor. */
318 if (ptnet_vnet_hdr) {
319 ptfeatures |= PTNETMAP_F_VNET_HDR;
321 bus_write_4(sc->iomem, PTNET_IO_PTFEAT, ptfeatures); /* wanted */
322 ptfeatures = bus_read_4(sc->iomem, PTNET_IO_PTFEAT); /* acked */
323 sc->ptfeatures = ptfeatures;
325 num_tx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS);
326 num_rx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS);
327 sc->num_rings = num_tx_rings + num_rx_rings;
328 sc->num_tx_rings = num_tx_rings;
330 if (sc->num_rings * sizeof(struct nm_csb_atok) > PAGE_SIZE) {
331 device_printf(dev, "CSB cannot handle that many rings (%u)\n",
337 /* Allocate CSB and carry out CSB allocation protocol. */
338 sc->csb_gh = contigmalloc(2*PAGE_SIZE, M_DEVBUF, M_NOWAIT | M_ZERO,
339 (size_t)0, -1UL, PAGE_SIZE, 0);
340 if (sc->csb_gh == NULL) {
341 device_printf(dev, "Failed to allocate CSB\n");
345 sc->csb_hg = (struct nm_csb_ktoa *)(((char *)sc->csb_gh) + PAGE_SIZE);
349 * We use uint64_t rather than vm_paddr_t since we
350 * need 64 bit addresses even on 32 bit platforms.
352 uint64_t paddr = vtophys(sc->csb_gh);
354 /* CSB allocation protocol: write to BAH first, then
355 * to BAL (for both GH and HG sections). */
356 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAH,
357 (paddr >> 32) & 0xffffffff);
358 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAL,
360 paddr = vtophys(sc->csb_hg);
361 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAH,
362 (paddr >> 32) & 0xffffffff);
363 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAL,
367 /* Allocate and initialize per-queue data structures. */
368 sc->queues = malloc(sizeof(struct ptnet_queue) * sc->num_rings,
369 M_DEVBUF, M_NOWAIT | M_ZERO);
370 if (sc->queues == NULL) {
374 sc->rxqueues = sc->queues + num_tx_rings;
376 for (i = 0; i < sc->num_rings; i++) {
377 struct ptnet_queue *pq = sc->queues + i;
381 pq->kick = PTNET_IO_KICK_BASE + 4 * i;
382 pq->atok = sc->csb_gh + i;
383 pq->ktoa = sc->csb_hg + i;
384 snprintf(pq->lock_name, sizeof(pq->lock_name), "%s-%d",
385 device_get_nameunit(dev), i);
386 mtx_init(&pq->lock, pq->lock_name, NULL, MTX_DEF);
387 if (i >= num_tx_rings) {
388 /* RX queue: fix kring_id. */
389 pq->kring_id -= num_tx_rings;
391 /* TX queue: allocate buf_ring. */
392 pq->bufring = buf_ring_alloc(PTNET_BUF_RING_SIZE,
393 M_DEVBUF, M_NOWAIT, &pq->lock);
394 if (pq->bufring == NULL) {
401 sc->min_tx_space = 64; /* Safe initial value. */
403 err = ptnet_irqs_init(sc);
408 /* Setup Ethernet interface. */
409 sc->ifp = ifp = if_alloc(IFT_ETHER);
411 device_printf(dev, "Failed to allocate ifnet\n");
416 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
417 ifp->if_baudrate = IF_Gbps(10);
419 ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX;
420 ifp->if_init = ptnet_init;
421 ifp->if_ioctl = ptnet_ioctl;
422 #if __FreeBSD_version >= 1100000
423 ifp->if_get_counter = ptnet_get_counter;
425 ifp->if_transmit = ptnet_transmit;
426 ifp->if_qflush = ptnet_qflush;
428 ifmedia_init(&sc->media, IFM_IMASK, ptnet_media_change,
430 ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX, 0, NULL);
431 ifmedia_set(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX);
433 macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_HI);
434 sc->hwaddr[0] = (macreg >> 8) & 0xff;
435 sc->hwaddr[1] = macreg & 0xff;
436 macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_LO);
437 sc->hwaddr[2] = (macreg >> 24) & 0xff;
438 sc->hwaddr[3] = (macreg >> 16) & 0xff;
439 sc->hwaddr[4] = (macreg >> 8) & 0xff;
440 sc->hwaddr[5] = macreg & 0xff;
442 ether_ifattach(ifp, sc->hwaddr);
444 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
445 ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU;
447 if (sc->ptfeatures & PTNETMAP_F_VNET_HDR) {
448 /* Similarly to what the vtnet driver does, we can emulate
449 * VLAN offloadings by inserting and removing the 802.1Q
450 * header during transmit and receive. We are then able
451 * to do checksum offloading of VLAN frames. */
452 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6
454 | IFCAP_TSO | IFCAP_LRO
456 | IFCAP_VLAN_HWTAGGING;
459 ifp->if_capenable = ifp->if_capabilities;
460 #ifdef DEVICE_POLLING
461 /* Don't enable polling by default. */
462 ifp->if_capabilities |= IFCAP_POLLING;
464 snprintf(sc->lock_name, sizeof(sc->lock_name),
465 "%s", device_get_nameunit(dev));
466 mtx_init(&sc->lock, sc->lock_name, "ptnet core lock", MTX_DEF);
467 callout_init_mtx(&sc->tick, &sc->lock, 0);
469 /* Prepare a netmap_adapter struct instance to do netmap_attach(). */
470 nifp_offset = bus_read_4(sc->iomem, PTNET_IO_NIFP_OFS);
471 memset(&na_arg, 0, sizeof(na_arg));
473 na_arg.num_tx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS);
474 na_arg.num_rx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS);
475 na_arg.num_tx_rings = num_tx_rings;
476 na_arg.num_rx_rings = num_rx_rings;
477 na_arg.nm_config = ptnet_nm_config;
478 na_arg.nm_krings_create = ptnet_nm_krings_create;
479 na_arg.nm_krings_delete = ptnet_nm_krings_delete;
480 na_arg.nm_dtor = ptnet_nm_dtor;
481 na_arg.nm_intr = ptnet_nm_intr;
482 na_arg.nm_register = ptnet_nm_register;
483 na_arg.nm_txsync = ptnet_nm_txsync;
484 na_arg.nm_rxsync = ptnet_nm_rxsync;
486 netmap_pt_guest_attach(&na_arg, nifp_offset,
487 bus_read_4(sc->iomem, PTNET_IO_HOSTMEMID));
489 /* Now a netmap adapter for this ifp has been allocated, and it
490 * can be accessed through NA(ifp). We also have to initialize the CSB
492 sc->ptna = (struct netmap_pt_guest_adapter *)NA(ifp);
494 /* If virtio-net header was negotiated, set the virt_hdr_len field in
495 * the netmap adapter, to inform users that this netmap adapter requires
496 * the application to deal with the headers. */
497 ptnet_update_vnet_hdr(sc);
499 device_printf(dev, "%s() completed\n", __func__);
508 /* Stop host sync-kloop if it was running. */
510 ptnet_device_shutdown(struct ptnet_softc *sc)
512 ptnet_nm_ptctl(sc, PTNETMAP_PTCTL_DELETE);
513 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAH, 0);
514 bus_write_4(sc->iomem, PTNET_IO_CSB_GH_BAL, 0);
515 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAH, 0);
516 bus_write_4(sc->iomem, PTNET_IO_CSB_HG_BAL, 0);
520 ptnet_detach(device_t dev)
522 struct ptnet_softc *sc = device_get_softc(dev);
525 ptnet_device_shutdown(sc);
527 #ifdef DEVICE_POLLING
528 if (sc->ifp->if_capenable & IFCAP_POLLING) {
529 ether_poll_deregister(sc->ifp);
532 callout_drain(&sc->tick);
535 /* Drain taskqueues before calling if_detach. */
536 for (i = 0; i < sc->num_rings; i++) {
537 struct ptnet_queue *pq = sc->queues + i;
540 taskqueue_drain(pq->taskq, &pq->task);
546 ether_ifdetach(sc->ifp);
548 /* Uninitialize netmap adapters for this device. */
549 netmap_detach(sc->ifp);
551 ifmedia_removeall(&sc->media);
559 contigfree(sc->csb_gh, 2*PAGE_SIZE, M_DEVBUF);
565 for (i = 0; i < sc->num_rings; i++) {
566 struct ptnet_queue *pq = sc->queues + i;
568 if (mtx_initialized(&pq->lock)) {
569 mtx_destroy(&pq->lock);
571 if (pq->bufring != NULL) {
572 buf_ring_free(pq->bufring, M_DEVBUF);
575 free(sc->queues, M_DEVBUF);
580 bus_release_resource(dev, SYS_RES_IOPORT,
581 PCIR_BAR(PTNETMAP_IO_PCI_BAR), sc->iomem);
585 mtx_destroy(&sc->lock);
587 device_printf(dev, "%s() completed\n", __func__);
593 ptnet_suspend(device_t dev)
595 struct ptnet_softc *sc = device_get_softc(dev);
603 ptnet_resume(device_t dev)
605 struct ptnet_softc *sc = device_get_softc(dev);
613 ptnet_shutdown(device_t dev)
615 struct ptnet_softc *sc = device_get_softc(dev);
617 ptnet_device_shutdown(sc);
623 ptnet_irqs_init(struct ptnet_softc *sc)
625 int rid = PCIR_BAR(PTNETMAP_MSIX_PCI_BAR);
626 int nvecs = sc->num_rings;
627 device_t dev = sc->dev;
632 if (pci_find_cap(dev, PCIY_MSIX, NULL) != 0) {
633 device_printf(dev, "Could not find MSI-X capability\n");
637 sc->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
639 if (sc->msix_mem == NULL) {
640 device_printf(dev, "Failed to allocate MSIX PCI BAR\n");
644 if (pci_msix_count(dev) < nvecs) {
645 device_printf(dev, "Not enough MSI-X vectors\n");
649 err = pci_alloc_msix(dev, &nvecs);
651 device_printf(dev, "Failed to allocate MSI-X vectors\n");
655 for (i = 0; i < nvecs; i++) {
656 struct ptnet_queue *pq = sc->queues + i;
659 pq->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
661 if (pq->irq == NULL) {
662 device_printf(dev, "Failed to allocate interrupt "
663 "for queue #%d\n", i);
669 cpu_cur = CPU_FIRST();
670 for (i = 0; i < nvecs; i++) {
671 struct ptnet_queue *pq = sc->queues + i;
672 void (*handler)(void *) = ptnet_tx_intr;
674 if (i >= sc->num_tx_rings) {
675 handler = ptnet_rx_intr;
677 err = bus_setup_intr(dev, pq->irq, INTR_TYPE_NET | INTR_MPSAFE,
678 NULL /* intr_filter */, handler,
681 device_printf(dev, "Failed to register intr handler "
682 "for queue #%d\n", i);
686 bus_describe_intr(dev, pq->irq, pq->cookie, "q%d", i);
688 bus_bind_intr(sc->dev, pq->irq, cpu_cur);
690 cpu_cur = CPU_NEXT(cpu_cur);
693 device_printf(dev, "Allocated %d MSI-X vectors\n", nvecs);
695 cpu_cur = CPU_FIRST();
696 for (i = 0; i < nvecs; i++) {
697 struct ptnet_queue *pq = sc->queues + i;
698 static void (*handler)(void *context, int pending);
700 handler = (i < sc->num_tx_rings) ? ptnet_tx_task : ptnet_rx_task;
702 TASK_INIT(&pq->task, 0, handler, pq);
703 pq->taskq = taskqueue_create_fast("ptnet_queue", M_NOWAIT,
704 taskqueue_thread_enqueue, &pq->taskq);
705 taskqueue_start_threads(&pq->taskq, 1, PI_NET, "%s-pq-%d",
706 device_get_nameunit(sc->dev), cpu_cur);
707 cpu_cur = CPU_NEXT(cpu_cur);
717 ptnet_irqs_fini(struct ptnet_softc *sc)
719 device_t dev = sc->dev;
722 for (i = 0; i < sc->num_rings; i++) {
723 struct ptnet_queue *pq = sc->queues + i;
726 taskqueue_free(pq->taskq);
731 bus_teardown_intr(dev, pq->irq, pq->cookie);
736 bus_release_resource(dev, SYS_RES_IRQ, i + 1, pq->irq);
742 pci_release_msi(dev);
744 bus_release_resource(dev, SYS_RES_MEMORY,
745 PCIR_BAR(PTNETMAP_MSIX_PCI_BAR),
752 ptnet_init(void *opaque)
754 struct ptnet_softc *sc = opaque;
757 ptnet_init_locked(sc);
758 PTNET_CORE_UNLOCK(sc);
762 ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data)
764 struct ptnet_softc *sc = if_getsoftc(ifp);
765 device_t dev = sc->dev;
766 struct ifreq *ifr = (struct ifreq *)data;
767 int mask __unused, err = 0;
771 device_printf(dev, "SIOCSIFFLAGS %x\n", ifp->if_flags);
773 if (ifp->if_flags & IFF_UP) {
774 /* Network stack wants the iff to be up. */
775 err = ptnet_init_locked(sc);
777 /* Network stack wants the iff to be down. */
778 err = ptnet_stop(sc);
780 /* We don't need to do nothing to support IFF_PROMISC,
781 * since that is managed by the backend port. */
782 PTNET_CORE_UNLOCK(sc);
786 device_printf(dev, "SIOCSIFCAP %x %x\n",
787 ifr->ifr_reqcap, ifp->if_capenable);
788 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
789 #ifdef DEVICE_POLLING
790 if (mask & IFCAP_POLLING) {
791 struct ptnet_queue *pq;
794 if (ifr->ifr_reqcap & IFCAP_POLLING) {
795 err = ether_poll_register(ptnet_poll, ifp);
799 /* Stop queues and sync with taskqueues. */
800 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
801 for (i = 0; i < sc->num_rings; i++) {
802 pq = sc-> queues + i;
803 /* Make sure the worker sees the
804 * IFF_DRV_RUNNING down. */
806 pq->atok->appl_need_kick = 0;
808 /* Wait for rescheduling to finish. */
810 taskqueue_drain(pq->taskq,
814 ifp->if_drv_flags |= IFF_DRV_RUNNING;
816 err = ether_poll_deregister(ifp);
817 for (i = 0; i < sc->num_rings; i++) {
818 pq = sc-> queues + i;
820 pq->atok->appl_need_kick = 1;
825 #endif /* DEVICE_POLLING */
826 ifp->if_capenable = ifr->ifr_reqcap;
830 /* We support any reasonable MTU. */
831 if (ifr->ifr_mtu < ETHERMIN ||
832 ifr->ifr_mtu > PTNET_MAX_PKT_SIZE) {
836 ifp->if_mtu = ifr->ifr_mtu;
837 PTNET_CORE_UNLOCK(sc);
843 err = ifmedia_ioctl(ifp, ifr, &sc->media, cmd);
847 err = ether_ioctl(ifp, cmd, data);
855 ptnet_init_locked(struct ptnet_softc *sc)
858 struct netmap_adapter *na_dr = &sc->ptna->dr.up;
859 struct netmap_adapter *na_nm = &sc->ptna->hwup.up;
860 unsigned int nm_buf_size;
863 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
864 return 0; /* nothing to do */
867 device_printf(sc->dev, "%s\n", __func__);
869 /* Translate offload capabilities according to if_capenable. */
870 ifp->if_hwassist = 0;
871 if (ifp->if_capenable & IFCAP_TXCSUM)
872 ifp->if_hwassist |= PTNET_CSUM_OFFLOAD;
873 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
874 ifp->if_hwassist |= PTNET_CSUM_OFFLOAD_IPV6;
875 if (ifp->if_capenable & IFCAP_TSO4)
876 ifp->if_hwassist |= CSUM_IP_TSO;
877 if (ifp->if_capenable & IFCAP_TSO6)
878 ifp->if_hwassist |= CSUM_IP6_TSO;
881 * Prepare the interface for netmap mode access.
883 netmap_update_config(na_dr);
885 ret = netmap_mem_finalize(na_dr->nm_mem, na_dr);
887 device_printf(sc->dev, "netmap_mem_finalize() failed\n");
891 if (sc->ptna->backend_users == 0) {
892 ret = ptnet_nm_krings_create(na_nm);
894 device_printf(sc->dev, "ptnet_nm_krings_create() "
896 goto err_mem_finalize;
899 ret = netmap_mem_rings_create(na_dr);
901 device_printf(sc->dev, "netmap_mem_rings_create() "
903 goto err_rings_create;
906 ret = netmap_mem_get_lut(na_dr->nm_mem, &na_dr->na_lut);
908 device_printf(sc->dev, "netmap_mem_get_lut() "
914 ret = ptnet_nm_register(na_dr, 1 /* on */);
919 nm_buf_size = NETMAP_BUF_SIZE(na_dr);
921 KASSERT(nm_buf_size > 0, ("Invalid netmap buffer size"));
922 sc->min_tx_space = PTNET_MAX_PKT_SIZE / nm_buf_size + 2;
923 device_printf(sc->dev, "%s: min_tx_space = %u\n", __func__,
925 #ifdef PTNETMAP_STATS
926 callout_reset(&sc->tick, hz, ptnet_tick, sc);
929 ifp->if_drv_flags |= IFF_DRV_RUNNING;
934 memset(&na_dr->na_lut, 0, sizeof(na_dr->na_lut));
936 netmap_mem_rings_delete(na_dr);
938 ptnet_nm_krings_delete(na_nm);
940 netmap_mem_deref(na_dr->nm_mem, na_dr);
945 /* To be called under core lock. */
947 ptnet_stop(struct ptnet_softc *sc)
950 struct netmap_adapter *na_dr = &sc->ptna->dr.up;
951 struct netmap_adapter *na_nm = &sc->ptna->hwup.up;
954 device_printf(sc->dev, "%s\n", __func__);
956 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
957 return 0; /* nothing to do */
960 /* Clear the driver-ready flag, and synchronize with all the queues,
961 * so that after this loop we are sure nobody is working anymore with
962 * the device. This scheme is taken from the vtnet driver. */
963 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
964 callout_stop(&sc->tick);
965 for (i = 0; i < sc->num_rings; i++) {
966 PTNET_Q_LOCK(sc->queues + i);
967 PTNET_Q_UNLOCK(sc->queues + i);
970 ptnet_nm_register(na_dr, 0 /* off */);
972 if (sc->ptna->backend_users == 0) {
973 netmap_mem_rings_delete(na_dr);
974 ptnet_nm_krings_delete(na_nm);
976 netmap_mem_deref(na_dr->nm_mem, na_dr);
982 ptnet_qflush(if_t ifp)
984 struct ptnet_softc *sc = if_getsoftc(ifp);
987 /* Flush all the bufrings and do the interface flush. */
988 for (i = 0; i < sc->num_rings; i++) {
989 struct ptnet_queue *pq = sc->queues + i;
994 while ((m = buf_ring_dequeue_sc(pq->bufring))) {
1005 ptnet_media_change(if_t ifp)
1007 struct ptnet_softc *sc = if_getsoftc(ifp);
1008 struct ifmedia *ifm = &sc->media;
1010 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
1017 #if __FreeBSD_version >= 1100000
1019 ptnet_get_counter(if_t ifp, ift_counter cnt)
1021 struct ptnet_softc *sc = if_getsoftc(ifp);
1022 struct ptnet_queue_stats stats[2];
1025 /* Accumulate statistics over the queues. */
1026 memset(stats, 0, sizeof(stats));
1027 for (i = 0; i < sc->num_rings; i++) {
1028 struct ptnet_queue *pq = sc->queues + i;
1029 int idx = (i < sc->num_tx_rings) ? 0 : 1;
1031 stats[idx].packets += pq->stats.packets;
1032 stats[idx].bytes += pq->stats.bytes;
1033 stats[idx].errors += pq->stats.errors;
1034 stats[idx].iqdrops += pq->stats.iqdrops;
1035 stats[idx].mcasts += pq->stats.mcasts;
1039 case IFCOUNTER_IPACKETS:
1040 return (stats[1].packets);
1041 case IFCOUNTER_IQDROPS:
1042 return (stats[1].iqdrops);
1043 case IFCOUNTER_IERRORS:
1044 return (stats[1].errors);
1045 case IFCOUNTER_OPACKETS:
1046 return (stats[0].packets);
1047 case IFCOUNTER_OBYTES:
1048 return (stats[0].bytes);
1049 case IFCOUNTER_OMCASTS:
1050 return (stats[0].mcasts);
1052 return (if_get_counter_default(ifp, cnt));
1058 #ifdef PTNETMAP_STATS
1059 /* Called under core lock. */
1061 ptnet_tick(void *opaque)
1063 struct ptnet_softc *sc = opaque;
1066 for (i = 0; i < sc->num_rings; i++) {
1067 struct ptnet_queue *pq = sc->queues + i;
1068 struct ptnet_queue_stats cur = pq->stats;
1073 delta = now.tv_usec - sc->last_ts.tv_usec +
1074 (now.tv_sec - sc->last_ts.tv_sec) * 1000000;
1075 delta /= 1000; /* in milliseconds */
1080 device_printf(sc->dev, "#%d[%u ms]:pkts %lu, kicks %lu, "
1081 "intr %lu\n", i, delta,
1082 (cur.packets - pq->last_stats.packets),
1083 (cur.kicks - pq->last_stats.kicks),
1084 (cur.intrs - pq->last_stats.intrs));
1085 pq->last_stats = cur;
1087 microtime(&sc->last_ts);
1088 callout_schedule(&sc->tick, hz);
1090 #endif /* PTNETMAP_STATS */
1093 ptnet_media_status(if_t ifp, struct ifmediareq *ifmr)
1095 /* We are always active, as the backend netmap port is
1096 * always open in netmap mode. */
1097 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
1098 ifmr->ifm_active = IFM_ETHER | IFM_10G_T | IFM_FDX;
1102 ptnet_nm_ptctl(struct ptnet_softc *sc, uint32_t cmd)
1105 * Write a command and read back error status,
1106 * with zero meaning success.
1108 bus_write_4(sc->iomem, PTNET_IO_PTCTL, cmd);
1109 return bus_read_4(sc->iomem, PTNET_IO_PTCTL);
1113 ptnet_nm_config(struct netmap_adapter *na, struct nm_config_info *info)
1115 struct ptnet_softc *sc = if_getsoftc(na->ifp);
1117 info->num_tx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS);
1118 info->num_rx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS);
1119 info->num_tx_descs = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS);
1120 info->num_rx_descs = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS);
1121 info->rx_buf_maxsize = NETMAP_BUF_SIZE(na);
1123 device_printf(sc->dev, "txr %u, rxr %u, txd %u, rxd %u, rxbufsz %u\n",
1124 info->num_tx_rings, info->num_rx_rings,
1125 info->num_tx_descs, info->num_rx_descs,
1126 info->rx_buf_maxsize);
1132 ptnet_sync_from_csb(struct ptnet_softc *sc, struct netmap_adapter *na)
1136 /* Sync krings from the host, reading from
1138 for (i = 0; i < sc->num_rings; i++) {
1139 struct nm_csb_atok *atok = sc->queues[i].atok;
1140 struct nm_csb_ktoa *ktoa = sc->queues[i].ktoa;
1141 struct netmap_kring *kring;
1143 if (i < na->num_tx_rings) {
1144 kring = na->tx_rings[i];
1146 kring = na->rx_rings[i - na->num_tx_rings];
1148 kring->rhead = kring->ring->head = atok->head;
1149 kring->rcur = kring->ring->cur = atok->cur;
1150 kring->nr_hwcur = ktoa->hwcur;
1151 kring->nr_hwtail = kring->rtail =
1152 kring->ring->tail = ktoa->hwtail;
1154 ND("%d,%d: csb {hc %u h %u c %u ht %u}", t, i,
1155 ktoa->hwcur, atok->head, atok->cur,
1157 ND("%d,%d: kring {hc %u rh %u rc %u h %u c %u ht %u rt %u t %u}",
1158 t, i, kring->nr_hwcur, kring->rhead, kring->rcur,
1159 kring->ring->head, kring->ring->cur, kring->nr_hwtail,
1160 kring->rtail, kring->ring->tail);
1165 ptnet_update_vnet_hdr(struct ptnet_softc *sc)
1167 unsigned int wanted_hdr_len = ptnet_vnet_hdr ? PTNET_HDR_SIZE : 0;
1169 bus_write_4(sc->iomem, PTNET_IO_VNET_HDR_LEN, wanted_hdr_len);
1170 sc->vnet_hdr_len = bus_read_4(sc->iomem, PTNET_IO_VNET_HDR_LEN);
1171 sc->ptna->hwup.up.virt_hdr_len = sc->vnet_hdr_len;
1175 ptnet_nm_register(struct netmap_adapter *na, int onoff)
1177 /* device-specific */
1179 struct ptnet_softc *sc = if_getsoftc(ifp);
1180 int native = (na == &sc->ptna->hwup.up);
1181 struct ptnet_queue *pq;
1187 sc->ptna->backend_users--;
1190 /* If this is the last netmap client, guest interrupt enable flags may
1191 * be in arbitrary state. Since these flags are going to be used also
1192 * by the netdevice driver, we have to make sure to start with
1193 * notifications enabled. Also, schedule NAPI to flush pending packets
1194 * in the RX rings, since we will not receive further interrupts
1195 * until these will be processed. */
1196 if (native && !onoff && na->active_fds == 0) {
1197 D("Exit netmap mode, re-enable interrupts");
1198 for (i = 0; i < sc->num_rings; i++) {
1199 pq = sc->queues + i;
1200 pq->atok->appl_need_kick = 1;
1205 if (sc->ptna->backend_users == 0) {
1206 /* Initialize notification enable fields in the CSB. */
1207 for (i = 0; i < sc->num_rings; i++) {
1208 pq = sc->queues + i;
1209 pq->ktoa->kern_need_kick = 1;
1210 pq->atok->appl_need_kick =
1211 (!(ifp->if_capenable & IFCAP_POLLING)
1212 && i >= sc->num_tx_rings);
1215 /* Set the virtio-net header length. */
1216 ptnet_update_vnet_hdr(sc);
1218 /* Make sure the host adapter passed through is ready
1219 * for txsync/rxsync. */
1220 ret = ptnet_nm_ptctl(sc, PTNETMAP_PTCTL_CREATE);
1225 /* Align the guest krings and rings to the state stored
1227 ptnet_sync_from_csb(sc, na);
1230 /* If not native, don't call nm_set_native_flags, since we don't want
1231 * to replace if_transmit method, nor set NAF_NETMAP_ON */
1234 for (i = 0; i <= nma_get_nrings(na, t); i++) {
1235 struct netmap_kring *kring = NMR(na, t)[i];
1237 if (nm_kring_pending_on(kring)) {
1238 kring->nr_mode = NKR_NETMAP_ON;
1242 nm_set_native_flags(na);
1247 nm_clear_native_flags(na);
1249 for (i = 0; i <= nma_get_nrings(na, t); i++) {
1250 struct netmap_kring *kring = NMR(na, t)[i];
1252 if (nm_kring_pending_off(kring)) {
1253 kring->nr_mode = NKR_NETMAP_OFF;
1259 if (sc->ptna->backend_users == 0) {
1260 ret = ptnet_nm_ptctl(sc, PTNETMAP_PTCTL_DELETE);
1265 sc->ptna->backend_users++;
1272 ptnet_nm_txsync(struct netmap_kring *kring, int flags)
1274 struct ptnet_softc *sc = if_getsoftc(kring->na->ifp);
1275 struct ptnet_queue *pq = sc->queues + kring->ring_id;
1278 notify = netmap_pt_guest_txsync(pq->atok, pq->ktoa, kring, flags);
1287 ptnet_nm_rxsync(struct netmap_kring *kring, int flags)
1289 struct ptnet_softc *sc = if_getsoftc(kring->na->ifp);
1290 struct ptnet_queue *pq = sc->rxqueues + kring->ring_id;
1293 notify = netmap_pt_guest_rxsync(pq->atok, pq->ktoa, kring, flags);
1302 ptnet_nm_intr(struct netmap_adapter *na, int onoff)
1304 struct ptnet_softc *sc = if_getsoftc(na->ifp);
1307 for (i = 0; i < sc->num_rings; i++) {
1308 struct ptnet_queue *pq = sc->queues + i;
1309 pq->atok->appl_need_kick = onoff;
1314 ptnet_tx_intr(void *opaque)
1316 struct ptnet_queue *pq = opaque;
1317 struct ptnet_softc *sc = pq->sc;
1319 DBG(device_printf(sc->dev, "Tx interrupt #%d\n", pq->kring_id));
1320 #ifdef PTNETMAP_STATS
1322 #endif /* PTNETMAP_STATS */
1324 if (netmap_tx_irq(sc->ifp, pq->kring_id) != NM_IRQ_PASS) {
1328 /* Schedule the tasqueue to flush process transmissions requests.
1329 * However, vtnet, if_em and if_igb just call ptnet_transmit() here,
1330 * at least when using MSI-X interrupts. The if_em driver, instead
1331 * schedule taskqueue when using legacy interrupts. */
1332 taskqueue_enqueue(pq->taskq, &pq->task);
1336 ptnet_rx_intr(void *opaque)
1338 struct ptnet_queue *pq = opaque;
1339 struct ptnet_softc *sc = pq->sc;
1340 unsigned int unused;
1342 DBG(device_printf(sc->dev, "Rx interrupt #%d\n", pq->kring_id));
1343 #ifdef PTNETMAP_STATS
1345 #endif /* PTNETMAP_STATS */
1347 if (netmap_rx_irq(sc->ifp, pq->kring_id, &unused) != NM_IRQ_PASS) {
1351 /* Like vtnet, if_igb and if_em drivers when using MSI-X interrupts,
1352 * receive-side processing is executed directly in the interrupt
1353 * service routine. Alternatively, we may schedule the taskqueue. */
1354 ptnet_rx_eof(pq, PTNET_RX_BUDGET, true);
1357 /* The following offloadings-related functions are taken from the vtnet
1358 * driver, but the same functionality is required for the ptnet driver.
1359 * As a temporary solution, I copied this code from vtnet and I started
1360 * to generalize it (taking away driver-specific statistic accounting),
1361 * making as little modifications as possible.
1362 * In the future we need to share these functions between vtnet and ptnet.
1365 ptnet_tx_offload_ctx(struct mbuf *m, int *etype, int *proto, int *start)
1367 struct ether_vlan_header *evh;
1370 evh = mtod(m, struct ether_vlan_header *);
1371 if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1372 /* BMV: We should handle nested VLAN tags too. */
1373 *etype = ntohs(evh->evl_proto);
1374 offset = sizeof(struct ether_vlan_header);
1376 *etype = ntohs(evh->evl_encap_proto);
1377 offset = sizeof(struct ether_header);
1382 case ETHERTYPE_IP: {
1383 struct ip *ip, iphdr;
1384 if (__predict_false(m->m_len < offset + sizeof(struct ip))) {
1385 m_copydata(m, offset, sizeof(struct ip),
1389 ip = (struct ip *)(m->m_data + offset);
1391 *start = offset + (ip->ip_hl << 2);
1396 case ETHERTYPE_IPV6:
1398 *start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto);
1399 /* Assert the network stack sent us a valid packet. */
1400 KASSERT(*start > offset,
1401 ("%s: mbuf %p start %d offset %d proto %d", __func__, m,
1402 *start, offset, *proto));
1406 /* Here we should increment the tx_csum_bad_ethtype counter. */
1414 ptnet_tx_offload_tso(if_t ifp, struct mbuf *m, int eth_type,
1415 int offset, bool allow_ecn, struct virtio_net_hdr *hdr)
1417 static struct timeval lastecn;
1419 struct tcphdr *tcp, tcphdr;
1421 if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) {
1422 m_copydata(m, offset, sizeof(struct tcphdr), (caddr_t) &tcphdr);
1425 tcp = (struct tcphdr *)(m->m_data + offset);
1427 hdr->hdr_len = offset + (tcp->th_off << 2);
1428 hdr->gso_size = m->m_pkthdr.tso_segsz;
1429 hdr->gso_type = eth_type == ETHERTYPE_IP ? VIRTIO_NET_HDR_GSO_TCPV4 :
1430 VIRTIO_NET_HDR_GSO_TCPV6;
1432 if (tcp->th_flags & TH_CWR) {
1434 * Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In FreeBSD,
1435 * ECN support is not on a per-interface basis, but globally via
1436 * the net.inet.tcp.ecn.enable sysctl knob. The default is off.
1439 if (ppsratecheck(&lastecn, &curecn, 1))
1441 "TSO with ECN not negotiated with host\n");
1444 hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
1447 /* Here we should increment tx_tso counter. */
1452 static struct mbuf *
1453 ptnet_tx_offload(if_t ifp, struct mbuf *m, bool allow_ecn,
1454 struct virtio_net_hdr *hdr)
1456 int flags, etype, csum_start, proto, error;
1458 flags = m->m_pkthdr.csum_flags;
1460 error = ptnet_tx_offload_ctx(m, &etype, &proto, &csum_start);
1464 if ((etype == ETHERTYPE_IP && flags & PTNET_CSUM_OFFLOAD) ||
1465 (etype == ETHERTYPE_IPV6 && flags & PTNET_CSUM_OFFLOAD_IPV6)) {
1467 * We could compare the IP protocol vs the CSUM_ flag too,
1468 * but that really should not be necessary.
1470 hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM;
1471 hdr->csum_start = csum_start;
1472 hdr->csum_offset = m->m_pkthdr.csum_data;
1473 /* Here we should increment the tx_csum counter. */
1476 if (flags & CSUM_TSO) {
1477 if (__predict_false(proto != IPPROTO_TCP)) {
1478 /* Likely failed to correctly parse the mbuf.
1479 * Here we should increment the tx_tso_not_tcp
1484 KASSERT(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM,
1485 ("%s: mbuf %p TSO without checksum offload %#x",
1486 __func__, m, flags));
1488 error = ptnet_tx_offload_tso(ifp, m, etype, csum_start,
1502 ptnet_vlan_tag_remove(struct mbuf *m)
1504 struct ether_vlan_header *evh;
1506 evh = mtod(m, struct ether_vlan_header *);
1507 m->m_pkthdr.ether_vtag = ntohs(evh->evl_tag);
1508 m->m_flags |= M_VLANTAG;
1510 /* Strip the 802.1Q header. */
1511 bcopy((char *) evh, (char *) evh + ETHER_VLAN_ENCAP_LEN,
1512 ETHER_HDR_LEN - ETHER_TYPE_LEN);
1513 m_adj(m, ETHER_VLAN_ENCAP_LEN);
1517 * Use the checksum offset in the VirtIO header to set the
1518 * correct CSUM_* flags.
1521 ptnet_rx_csum_by_offset(struct mbuf *m, uint16_t eth_type, int ip_start,
1522 struct virtio_net_hdr *hdr)
1524 #if defined(INET) || defined(INET6)
1525 int offset = hdr->csum_start + hdr->csum_offset;
1528 /* Only do a basic sanity check on the offset. */
1532 if (__predict_false(offset < ip_start + sizeof(struct ip)))
1537 case ETHERTYPE_IPV6:
1538 if (__predict_false(offset < ip_start + sizeof(struct ip6_hdr)))
1543 /* Here we should increment the rx_csum_bad_ethtype counter. */
1548 * Use the offset to determine the appropriate CSUM_* flags. This is
1549 * a bit dirty, but we can get by with it since the checksum offsets
1550 * happen to be different. We assume the host host does not do IPv4
1551 * header checksum offloading.
1553 switch (hdr->csum_offset) {
1554 case offsetof(struct udphdr, uh_sum):
1555 case offsetof(struct tcphdr, th_sum):
1556 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1557 m->m_pkthdr.csum_data = 0xFFFF;
1559 case offsetof(struct sctphdr, checksum):
1560 m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID;
1563 /* Here we should increment the rx_csum_bad_offset counter. */
1571 ptnet_rx_csum_by_parse(struct mbuf *m, uint16_t eth_type, int ip_start,
1572 struct virtio_net_hdr *hdr)
1578 case ETHERTYPE_IP: {
1580 if (__predict_false(m->m_len < ip_start + sizeof(struct ip)))
1582 ip = (struct ip *)(m->m_data + ip_start);
1584 offset = ip_start + (ip->ip_hl << 2);
1589 case ETHERTYPE_IPV6:
1590 if (__predict_false(m->m_len < ip_start +
1591 sizeof(struct ip6_hdr)))
1593 offset = ip6_lasthdr(m, ip_start, IPPROTO_IPV6, &proto);
1594 if (__predict_false(offset < 0))
1599 /* Here we should increment the rx_csum_bad_ethtype counter. */
1605 if (__predict_false(m->m_len < offset + sizeof(struct tcphdr)))
1607 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1608 m->m_pkthdr.csum_data = 0xFFFF;
1611 if (__predict_false(m->m_len < offset + sizeof(struct udphdr)))
1613 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1614 m->m_pkthdr.csum_data = 0xFFFF;
1617 if (__predict_false(m->m_len < offset + sizeof(struct sctphdr)))
1619 m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID;
1623 * For the remaining protocols, FreeBSD does not support
1624 * checksum offloading, so the checksum will be recomputed.
1627 if_printf(ifp, "cksum offload of unsupported "
1628 "protocol eth_type=%#x proto=%d csum_start=%d "
1629 "csum_offset=%d\n", __func__, eth_type, proto,
1630 hdr->csum_start, hdr->csum_offset);
1639 * Set the appropriate CSUM_* flags. Unfortunately, the information
1640 * provided is not directly useful to us. The VirtIO header gives the
1641 * offset of the checksum, which is all Linux needs, but this is not
1642 * how FreeBSD does things. We are forced to peek inside the packet
1645 * It would be nice if VirtIO gave us the L4 protocol or if FreeBSD
1646 * could accept the offsets and let the stack figure it out.
1649 ptnet_rx_csum(struct mbuf *m, struct virtio_net_hdr *hdr)
1651 struct ether_header *eh;
1652 struct ether_vlan_header *evh;
1656 eh = mtod(m, struct ether_header *);
1657 eth_type = ntohs(eh->ether_type);
1658 if (eth_type == ETHERTYPE_VLAN) {
1659 /* BMV: We should handle nested VLAN tags too. */
1660 evh = mtod(m, struct ether_vlan_header *);
1661 eth_type = ntohs(evh->evl_proto);
1662 offset = sizeof(struct ether_vlan_header);
1664 offset = sizeof(struct ether_header);
1666 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
1667 error = ptnet_rx_csum_by_offset(m, eth_type, offset, hdr);
1669 error = ptnet_rx_csum_by_parse(m, eth_type, offset, hdr);
1673 /* End of offloading-related functions to be shared with vtnet. */
1676 ptnet_ring_update(struct ptnet_queue *pq, struct netmap_kring *kring,
1677 unsigned int head, unsigned int sync_flags)
1679 struct netmap_ring *ring = kring->ring;
1680 struct nm_csb_atok *atok = pq->atok;
1681 struct nm_csb_ktoa *ktoa = pq->ktoa;
1683 /* Some packets have been pushed to the netmap ring. We have
1684 * to tell the host to process the new packets, updating cur
1685 * and head in the CSB. */
1686 ring->head = ring->cur = head;
1688 /* Mimic nm_txsync_prologue/nm_rxsync_prologue. */
1689 kring->rcur = kring->rhead = head;
1691 ptnetmap_guest_write_kring_csb(atok, kring->rcur, kring->rhead);
1693 /* Kick the host if needed. */
1694 if (NM_ACCESS_ONCE(ktoa->kern_need_kick)) {
1695 atok->sync_flags = sync_flags;
1700 #define PTNET_TX_NOSPACE(_h, _k, _min) \
1701 ((((_h) < (_k)->rtail) ? 0 : (_k)->nkr_num_slots) + \
1702 (_k)->rtail - (_h)) < (_min)
1704 /* This function may be called by the network stack, or by
1705 * by the taskqueue thread. */
1707 ptnet_drain_transmit_queue(struct ptnet_queue *pq, unsigned int budget,
1710 struct ptnet_softc *sc = pq->sc;
1711 bool have_vnet_hdr = sc->vnet_hdr_len;
1712 struct netmap_adapter *na = &sc->ptna->dr.up;
1714 unsigned int batch_count = 0;
1715 struct nm_csb_atok *atok;
1716 struct nm_csb_ktoa *ktoa;
1717 struct netmap_kring *kring;
1718 struct netmap_ring *ring;
1719 struct netmap_slot *slot;
1720 unsigned int count = 0;
1721 unsigned int minspace;
1729 if (!PTNET_Q_TRYLOCK(pq)) {
1730 /* We failed to acquire the lock, schedule the taskqueue. */
1731 RD(1, "Deferring TX work");
1733 taskqueue_enqueue(pq->taskq, &pq->task);
1739 if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) {
1741 RD(1, "Interface is down");
1747 kring = na->tx_rings[pq->kring_id];
1749 lim = kring->nkr_num_slots - 1;
1751 minspace = sc->min_tx_space;
1753 while (count < budget) {
1754 if (PTNET_TX_NOSPACE(head, kring, minspace)) {
1755 /* We ran out of slot, let's see if the host has
1756 * freed up some, by reading hwcur and hwtail from
1758 ptnet_sync_tail(ktoa, kring);
1760 if (PTNET_TX_NOSPACE(head, kring, minspace)) {
1761 /* Still no slots available. Reactivate the
1762 * interrupts so that we can be notified
1763 * when some free slots are made available by
1765 atok->appl_need_kick = 1;
1768 ptnet_sync_tail(ktoa, kring);
1769 if (likely(PTNET_TX_NOSPACE(head, kring,
1774 RD(1, "Found more slots by doublecheck");
1775 /* More slots were freed before reactivating
1776 * the interrupts. */
1777 atok->appl_need_kick = 0;
1781 mhead = drbr_peek(ifp, pq->bufring);
1786 /* Initialize transmission state variables. */
1787 slot = ring->slot + head;
1788 nmbuf = NMB(na, slot);
1791 /* If needed, prepare the virtio-net header at the beginning
1792 * of the first slot. */
1793 if (have_vnet_hdr) {
1794 struct virtio_net_hdr *vh =
1795 (struct virtio_net_hdr *)nmbuf;
1797 /* For performance, we could replace this memset() with
1798 * two 8-bytes-wide writes. */
1799 memset(nmbuf, 0, PTNET_HDR_SIZE);
1800 if (mhead->m_pkthdr.csum_flags & PTNET_ALL_OFFLOAD) {
1801 mhead = ptnet_tx_offload(ifp, mhead, false,
1803 if (unlikely(!mhead)) {
1804 /* Packet dropped because errors
1805 * occurred while preparing the vnet
1806 * header. Let's go ahead with the next
1808 pq->stats.errors ++;
1809 drbr_advance(ifp, pq->bufring);
1813 ND(1, "%s: [csum_flags %lX] vnet hdr: flags %x "
1814 "csum_start %u csum_ofs %u hdr_len = %u "
1815 "gso_size %u gso_type %x", __func__,
1816 mhead->m_pkthdr.csum_flags, vh->flags,
1817 vh->csum_start, vh->csum_offset, vh->hdr_len,
1818 vh->gso_size, vh->gso_type);
1820 nmbuf += PTNET_HDR_SIZE;
1821 nmbuf_bytes += PTNET_HDR_SIZE;
1824 for (mf = mhead; mf; mf = mf->m_next) {
1825 uint8_t *mdata = mf->m_data;
1826 int mlen = mf->m_len;
1829 int copy = NETMAP_BUF_SIZE(na) - nmbuf_bytes;
1834 memcpy(nmbuf, mdata, copy);
1839 nmbuf_bytes += copy;
1845 slot->len = nmbuf_bytes;
1846 slot->flags = NS_MOREFRAG;
1848 head = nm_next(head, lim);
1849 KASSERT(head != ring->tail,
1850 ("Unexpectedly run out of TX space"));
1851 slot = ring->slot + head;
1852 nmbuf = NMB(na, slot);
1857 /* Complete last slot and update head. */
1858 slot->len = nmbuf_bytes;
1860 head = nm_next(head, lim);
1862 /* Consume the packet just processed. */
1863 drbr_advance(ifp, pq->bufring);
1865 /* Copy the packet to listeners. */
1866 ETHER_BPF_MTAP(ifp, mhead);
1868 pq->stats.packets ++;
1869 pq->stats.bytes += mhead->m_pkthdr.len;
1870 if (mhead->m_flags & M_MCAST) {
1871 pq->stats.mcasts ++;
1877 if (++batch_count == PTNET_TX_BATCH) {
1878 ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM);
1884 ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM);
1887 if (count >= budget && may_resched) {
1888 DBG(RD(1, "out of budget: resched, %d mbufs pending\n",
1889 drbr_inuse(ifp, pq->bufring)));
1890 taskqueue_enqueue(pq->taskq, &pq->task);
1899 ptnet_transmit(if_t ifp, struct mbuf *m)
1901 struct ptnet_softc *sc = if_getsoftc(ifp);
1902 struct ptnet_queue *pq;
1903 unsigned int queue_idx;
1906 DBG(device_printf(sc->dev, "transmit %p\n", m));
1908 /* Insert 802.1Q header if needed. */
1909 if (m->m_flags & M_VLANTAG) {
1910 m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
1914 m->m_flags &= ~M_VLANTAG;
1917 /* Get the flow-id if available. */
1918 queue_idx = (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) ?
1919 m->m_pkthdr.flowid : curcpu;
1921 if (unlikely(queue_idx >= sc->num_tx_rings)) {
1922 queue_idx %= sc->num_tx_rings;
1925 pq = sc->queues + queue_idx;
1927 err = drbr_enqueue(ifp, pq->bufring, m);
1929 /* ENOBUFS when the bufring is full */
1930 RD(1, "%s: drbr_enqueue() failed %d\n",
1932 pq->stats.errors ++;
1936 if (ifp->if_capenable & IFCAP_POLLING) {
1937 /* If polling is on, the transmit queues will be
1938 * drained by the poller. */
1942 err = ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true);
1944 return (err < 0) ? err : 0;
1948 ptnet_rx_discard(struct netmap_kring *kring, unsigned int head)
1950 struct netmap_ring *ring = kring->ring;
1951 struct netmap_slot *slot = ring->slot + head;
1954 head = nm_next(head, kring->nkr_num_slots - 1);
1955 if (!(slot->flags & NS_MOREFRAG) || head == ring->tail) {
1958 slot = ring->slot + head;
1964 static inline struct mbuf *
1965 ptnet_rx_slot(struct mbuf *mtail, uint8_t *nmbuf, unsigned int nmbuf_len)
1967 uint8_t *mdata = mtod(mtail, uint8_t *) + mtail->m_len;
1972 if (mtail->m_len == MCLBYTES) {
1975 mf = m_getcl(M_NOWAIT, MT_DATA, 0);
1976 if (unlikely(!mf)) {
1982 mdata = mtod(mtail, uint8_t *);
1986 copy = MCLBYTES - mtail->m_len;
1987 if (nmbuf_len < copy) {
1991 memcpy(mdata, nmbuf, copy);
1996 mtail->m_len += copy;
1997 } while (nmbuf_len);
2003 ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, bool may_resched)
2005 struct ptnet_softc *sc = pq->sc;
2006 bool have_vnet_hdr = sc->vnet_hdr_len;
2007 struct nm_csb_atok *atok = pq->atok;
2008 struct nm_csb_ktoa *ktoa = pq->ktoa;
2009 struct netmap_adapter *na = &sc->ptna->dr.up;
2010 struct netmap_kring *kring = na->rx_rings[pq->kring_id];
2011 struct netmap_ring *ring = kring->ring;
2012 unsigned int const lim = kring->nkr_num_slots - 1;
2013 unsigned int batch_count = 0;
2015 unsigned int count = 0;
2020 if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) {
2024 kring->nr_kflags &= ~NKR_PENDINTR;
2027 while (count < budget) {
2028 uint32_t prev_head = head;
2029 struct mbuf *mhead, *mtail;
2030 struct virtio_net_hdr *vh;
2031 struct netmap_slot *slot;
2032 unsigned int nmbuf_len;
2034 int deliver = 1; /* the mbuf to the network stack. */
2036 if (head == ring->tail) {
2037 /* We ran out of slot, let's see if the host has
2038 * added some, by reading hwcur and hwtail from
2040 ptnet_sync_tail(ktoa, kring);
2042 if (head == ring->tail) {
2043 /* Still no slots available. Reactivate
2044 * interrupts as they were disabled by the
2045 * host thread right before issuing the
2046 * last interrupt. */
2047 atok->appl_need_kick = 1;
2050 ptnet_sync_tail(ktoa, kring);
2051 if (likely(head == ring->tail)) {
2054 atok->appl_need_kick = 0;
2058 /* Initialize ring state variables, possibly grabbing the
2059 * virtio-net header. */
2060 slot = ring->slot + head;
2061 nmbuf = NMB(na, slot);
2062 nmbuf_len = slot->len;
2064 vh = (struct virtio_net_hdr *)nmbuf;
2065 if (have_vnet_hdr) {
2066 if (unlikely(nmbuf_len < PTNET_HDR_SIZE)) {
2067 /* There is no good reason why host should
2068 * put the header in multiple netmap slots.
2069 * If this is the case, discard. */
2070 RD(1, "Fragmented vnet-hdr: dropping");
2071 head = ptnet_rx_discard(kring, head);
2072 pq->stats.iqdrops ++;
2076 ND(1, "%s: vnet hdr: flags %x csum_start %u "
2077 "csum_ofs %u hdr_len = %u gso_size %u "
2078 "gso_type %x", __func__, vh->flags,
2079 vh->csum_start, vh->csum_offset, vh->hdr_len,
2080 vh->gso_size, vh->gso_type);
2081 nmbuf += PTNET_HDR_SIZE;
2082 nmbuf_len -= PTNET_HDR_SIZE;
2085 /* Allocate the head of a new mbuf chain.
2086 * We use m_getcl() to allocate an mbuf with standard cluster
2087 * size (MCLBYTES). In the future we could use m_getjcl()
2088 * to choose different sizes. */
2089 mhead = mtail = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2090 if (unlikely(mhead == NULL)) {
2091 device_printf(sc->dev, "%s: failed to allocate mbuf "
2092 "head\n", __func__);
2093 pq->stats.errors ++;
2097 /* Initialize the mbuf state variables. */
2098 mhead->m_pkthdr.len = nmbuf_len;
2101 /* Scan all the netmap slots containing the current packet. */
2103 DBG(device_printf(sc->dev, "%s: h %u t %u rcv frag "
2104 "len %u, flags %u\n", __func__,
2105 head, ring->tail, slot->len,
2108 mtail = ptnet_rx_slot(mtail, nmbuf, nmbuf_len);
2109 if (unlikely(!mtail)) {
2110 /* Ouch. We ran out of memory while processing
2111 * a packet. We have to restore the previous
2112 * head position, free the mbuf chain, and
2113 * schedule the taskqueue to give the packet
2114 * another chance. */
2115 device_printf(sc->dev, "%s: failed to allocate"
2116 " mbuf frag, reset head %u --> %u\n",
2117 __func__, head, prev_head);
2120 pq->stats.errors ++;
2122 taskqueue_enqueue(pq->taskq,
2128 /* We have to increment head irrespective of the
2129 * NS_MOREFRAG being set or not. */
2130 head = nm_next(head, lim);
2132 if (!(slot->flags & NS_MOREFRAG)) {
2136 if (unlikely(head == ring->tail)) {
2137 /* The very last slot prepared by the host has
2138 * the NS_MOREFRAG set. Drop it and continue
2139 * the outer cycle (to do the double-check). */
2140 RD(1, "Incomplete packet: dropping");
2142 pq->stats.iqdrops ++;
2146 slot = ring->slot + head;
2147 nmbuf = NMB(na, slot);
2148 nmbuf_len = slot->len;
2149 mhead->m_pkthdr.len += nmbuf_len;
2152 mhead->m_pkthdr.rcvif = ifp;
2153 mhead->m_pkthdr.csum_flags = 0;
2155 /* Store the queue idx in the packet header. */
2156 mhead->m_pkthdr.flowid = pq->kring_id;
2157 M_HASHTYPE_SET(mhead, M_HASHTYPE_OPAQUE);
2159 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
2160 struct ether_header *eh;
2162 eh = mtod(mhead, struct ether_header *);
2163 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
2164 ptnet_vlan_tag_remove(mhead);
2166 * With the 802.1Q header removed, update the
2167 * checksum starting location accordingly.
2169 if (vh->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
2170 vh->csum_start -= ETHER_VLAN_ENCAP_LEN;
2174 if (have_vnet_hdr && (vh->flags & (VIRTIO_NET_HDR_F_NEEDS_CSUM
2175 | VIRTIO_NET_HDR_F_DATA_VALID))) {
2176 if (unlikely(ptnet_rx_csum(mhead, vh))) {
2178 RD(1, "Csum offload error: dropping");
2179 pq->stats.iqdrops ++;
2186 if (++batch_count >= PTNET_RX_BATCH) {
2187 /* Some packets have been (or will be) pushed to the network
2188 * stack. We need to update the CSB to tell the host about
2189 * the new ring->cur and ring->head (RX buffer refill). */
2190 ptnet_ring_update(pq, kring, head, NAF_FORCE_READ);
2194 if (likely(deliver)) {
2195 pq->stats.packets ++;
2196 pq->stats.bytes += mhead->m_pkthdr.len;
2199 (*ifp->if_input)(ifp, mhead);
2201 /* The ring->head index (and related indices) are
2202 * updated under pq lock by ptnet_ring_update().
2203 * Since we dropped the lock to call if_input(), we
2204 * must reload ring->head and restart processing the
2205 * ring from there. */
2208 if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) {
2209 /* The interface has gone down while we didn't
2210 * have the lock. Stop any processing and exit. */
2217 ptnet_ring_update(pq, kring, head, NAF_FORCE_READ);
2221 if (count >= budget && may_resched) {
2222 /* If we ran out of budget or the double-check found new
2223 * slots to process, schedule the taskqueue. */
2224 DBG(RD(1, "out of budget: resched h %u t %u\n",
2226 taskqueue_enqueue(pq->taskq, &pq->task);
2235 ptnet_rx_task(void *context, int pending)
2237 struct ptnet_queue *pq = context;
2239 DBG(RD(1, "%s: pq #%u\n", __func__, pq->kring_id));
2240 ptnet_rx_eof(pq, PTNET_RX_BUDGET, true);
2244 ptnet_tx_task(void *context, int pending)
2246 struct ptnet_queue *pq = context;
2248 DBG(RD(1, "%s: pq #%u\n", __func__, pq->kring_id));
2249 ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true);
2252 #ifdef DEVICE_POLLING
2253 /* We don't need to handle differently POLL_AND_CHECK_STATUS and
2254 * POLL_ONLY, since we don't have an Interrupt Status Register. */
2256 ptnet_poll(if_t ifp, enum poll_cmd cmd, int budget)
2258 struct ptnet_softc *sc = if_getsoftc(ifp);
2259 unsigned int queue_budget;
2260 unsigned int count = 0;
2261 bool borrow = false;
2264 KASSERT(sc->num_rings > 0, ("Found no queues in while polling ptnet"));
2265 queue_budget = MAX(budget / sc->num_rings, 1);
2266 RD(1, "Per-queue budget is %d", queue_budget);
2269 unsigned int rcnt = 0;
2271 for (i = 0; i < sc->num_rings; i++) {
2272 struct ptnet_queue *pq = sc->queues + i;
2275 queue_budget = MIN(queue_budget, budget);
2276 if (queue_budget == 0) {
2281 if (i < sc->num_tx_rings) {
2282 rcnt += ptnet_drain_transmit_queue(pq,
2283 queue_budget, false);
2285 rcnt += ptnet_rx_eof(pq, queue_budget,
2291 /* A scan of the queues gave no result, we can
2296 if (rcnt > budget) {
2297 /* This may happen when initial budget < sc->num_rings,
2298 * since one packet budget is given to each queue
2299 * anyway. Just pretend we didn't eat "so much". */
2310 #endif /* DEVICE_POLLING */