2 * Copyright (c) 2016, Vincenzo Maffione
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 /* Driver for ptnet paravirtualized network device. */
31 #include <sys/cdefs.h>
33 #include <sys/types.h>
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/sockio.h>
39 #include <sys/malloc.h>
40 #include <sys/module.h>
41 #include <sys/socket.h>
42 #include <sys/sysctl.h>
44 #include <sys/mutex.h>
45 #include <sys/taskqueue.h>
48 #include <machine/smp.h>
54 #include <net/ethernet.h>
56 #include <net/if_var.h>
57 #include <net/if_arp.h>
58 #include <net/if_dl.h>
59 #include <net/if_types.h>
60 #include <net/if_media.h>
61 #include <net/if_vlan_var.h>
64 #include <netinet/in_systm.h>
65 #include <netinet/in.h>
66 #include <netinet/ip.h>
67 #include <netinet/ip6.h>
68 #include <netinet6/ip6_var.h>
69 #include <netinet/udp.h>
70 #include <netinet/tcp.h>
71 #include <netinet/sctp.h>
73 #include <machine/bus.h>
74 #include <machine/resource.h>
78 #include <dev/pci/pcivar.h>
79 #include <dev/pci/pcireg.h>
82 #include "opt_inet6.h"
84 #include <sys/selinfo.h>
85 #include <net/netmap.h>
86 #include <dev/netmap/netmap_kern.h>
87 #include <net/netmap_virt.h>
88 #include <dev/netmap/netmap_mem2.h>
89 #include <dev/virtio/network/virtio_net.h>
91 #ifndef PTNET_CSB_ALLOC
92 #error "No support for on-device CSB"
96 #error "INET not defined, cannot support offloadings"
99 #if __FreeBSD_version >= 1100000
100 static uint64_t ptnet_get_counter(if_t, ift_counter);
102 typedef struct ifnet *if_t;
103 #define if_getsoftc(_ifp) (_ifp)->if_softc
106 //#define PTNETMAP_STATS
114 extern int ptnet_vnet_hdr; /* Tunable parameter */
118 struct ptnet_queue_stats {
119 uint64_t packets; /* if_[io]packets */
120 uint64_t bytes; /* if_[io]bytes */
121 uint64_t errors; /* if_[io]errors */
122 uint64_t iqdrops; /* if_iqdrops */
123 uint64_t mcasts; /* if_[io]mcasts */
124 #ifdef PTNETMAP_STATS
127 #endif /* PTNETMAP_STATS */
131 struct ptnet_softc *sc;
132 struct resource *irq;
135 struct ptnet_ring *ptring;
138 struct buf_ring *bufring; /* for TX queues */
139 struct ptnet_queue_stats stats;
140 #ifdef PTNETMAP_STATS
141 struct ptnet_queue_stats last_stats;
142 #endif /* PTNETMAP_STATS */
143 struct taskqueue *taskq;
148 #define PTNET_Q_LOCK(_pq) mtx_lock(&(_pq)->lock)
149 #define PTNET_Q_TRYLOCK(_pq) mtx_trylock(&(_pq)->lock)
150 #define PTNET_Q_UNLOCK(_pq) mtx_unlock(&(_pq)->lock)
155 struct ifmedia media;
158 char hwaddr[ETHER_ADDR_LEN];
160 /* Mirror of PTFEAT register. */
162 unsigned int vnet_hdr_len;
164 /* PCI BARs support. */
165 struct resource *iomem;
166 struct resource *msix_mem;
168 unsigned int num_rings;
169 unsigned int num_tx_rings;
170 struct ptnet_queue *queues;
171 struct ptnet_queue *rxqueues;
172 struct ptnet_csb *csb;
174 unsigned int min_tx_space;
176 struct netmap_pt_guest_adapter *ptna;
179 #ifdef PTNETMAP_STATS
180 struct timeval last_ts;
181 #endif /* PTNETMAP_STATS */
184 #define PTNET_CORE_LOCK(_sc) mtx_lock(&(_sc)->lock)
185 #define PTNET_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->lock)
187 static int ptnet_probe(device_t);
188 static int ptnet_attach(device_t);
189 static int ptnet_detach(device_t);
190 static int ptnet_suspend(device_t);
191 static int ptnet_resume(device_t);
192 static int ptnet_shutdown(device_t);
194 static void ptnet_init(void *opaque);
195 static int ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data);
196 static int ptnet_init_locked(struct ptnet_softc *sc);
197 static int ptnet_stop(struct ptnet_softc *sc);
198 static int ptnet_transmit(if_t ifp, struct mbuf *m);
199 static int ptnet_drain_transmit_queue(struct ptnet_queue *pq,
202 static void ptnet_qflush(if_t ifp);
203 static void ptnet_tx_task(void *context, int pending);
205 static int ptnet_media_change(if_t ifp);
206 static void ptnet_media_status(if_t ifp, struct ifmediareq *ifmr);
207 #ifdef PTNETMAP_STATS
208 static void ptnet_tick(void *opaque);
211 static int ptnet_irqs_init(struct ptnet_softc *sc);
212 static void ptnet_irqs_fini(struct ptnet_softc *sc);
214 static uint32_t ptnet_nm_ptctl(if_t ifp, uint32_t cmd);
215 static int ptnet_nm_config(struct netmap_adapter *na, unsigned *txr,
216 unsigned *txd, unsigned *rxr, unsigned *rxd);
217 static void ptnet_update_vnet_hdr(struct ptnet_softc *sc);
218 static int ptnet_nm_register(struct netmap_adapter *na, int onoff);
219 static int ptnet_nm_txsync(struct netmap_kring *kring, int flags);
220 static int ptnet_nm_rxsync(struct netmap_kring *kring, int flags);
222 static void ptnet_tx_intr(void *opaque);
223 static void ptnet_rx_intr(void *opaque);
225 static unsigned ptnet_rx_discard(struct netmap_kring *kring,
227 static int ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget,
229 static void ptnet_rx_task(void *context, int pending);
231 #ifdef DEVICE_POLLING
232 static poll_handler_t ptnet_poll;
235 static device_method_t ptnet_methods[] = {
236 DEVMETHOD(device_probe, ptnet_probe),
237 DEVMETHOD(device_attach, ptnet_attach),
238 DEVMETHOD(device_detach, ptnet_detach),
239 DEVMETHOD(device_suspend, ptnet_suspend),
240 DEVMETHOD(device_resume, ptnet_resume),
241 DEVMETHOD(device_shutdown, ptnet_shutdown),
245 static driver_t ptnet_driver = {
248 sizeof(struct ptnet_softc)
251 /* We use (SI_ORDER_MIDDLE+2) here, see DEV_MODULE_ORDERED() invocation. */
252 static devclass_t ptnet_devclass;
253 DRIVER_MODULE_ORDERED(ptnet, pci, ptnet_driver, ptnet_devclass,
254 NULL, NULL, SI_ORDER_MIDDLE + 2);
257 ptnet_probe(device_t dev)
259 if (pci_get_vendor(dev) != PTNETMAP_PCI_VENDOR_ID ||
260 pci_get_device(dev) != PTNETMAP_PCI_NETIF_ID) {
264 device_set_desc(dev, "ptnet network adapter");
266 return (BUS_PROBE_DEFAULT);
269 static inline void ptnet_kick(struct ptnet_queue *pq)
271 #ifdef PTNETMAP_STATS
273 #endif /* PTNETMAP_STATS */
274 bus_write_4(pq->sc->iomem, pq->kick, 0);
277 #define PTNET_BUF_RING_SIZE 4096
278 #define PTNET_RX_BUDGET 512
279 #define PTNET_RX_BATCH 1
280 #define PTNET_TX_BUDGET 512
281 #define PTNET_TX_BATCH 64
282 #define PTNET_HDR_SIZE sizeof(struct virtio_net_hdr_mrg_rxbuf)
283 #define PTNET_MAX_PKT_SIZE 65536
285 #define PTNET_CSUM_OFFLOAD (CSUM_TCP | CSUM_UDP | CSUM_SCTP)
286 #define PTNET_CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6 | CSUM_UDP_IPV6 |\
288 #define PTNET_ALL_OFFLOAD (CSUM_TSO | PTNET_CSUM_OFFLOAD |\
289 PTNET_CSUM_OFFLOAD_IPV6)
292 ptnet_attach(device_t dev)
294 uint32_t ptfeatures = 0;
295 unsigned int num_rx_rings, num_tx_rings;
296 struct netmap_adapter na_arg;
297 unsigned int nifp_offset;
298 struct ptnet_softc *sc;
304 sc = device_get_softc(dev);
307 /* Setup PCI resources. */
308 pci_enable_busmaster(dev);
310 rid = PCIR_BAR(PTNETMAP_IO_PCI_BAR);
311 sc->iomem = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
313 if (sc->iomem == NULL) {
314 device_printf(dev, "Failed to map I/O BAR\n");
318 /* Negotiate features with the hypervisor. */
319 if (ptnet_vnet_hdr) {
320 ptfeatures |= PTNETMAP_F_VNET_HDR;
322 bus_write_4(sc->iomem, PTNET_IO_PTFEAT, ptfeatures); /* wanted */
323 ptfeatures = bus_read_4(sc->iomem, PTNET_IO_PTFEAT); /* acked */
324 sc->ptfeatures = ptfeatures;
326 /* Allocate CSB and carry out CSB allocation protocol (CSBBAH first,
328 sc->csb = malloc(sizeof(struct ptnet_csb), M_DEVBUF,
330 if (sc->csb == NULL) {
331 device_printf(dev, "Failed to allocate CSB\n");
338 * We use uint64_t rather than vm_paddr_t since we
339 * need 64 bit addresses even on 32 bit platforms.
341 uint64_t paddr = vtophys(sc->csb);
343 bus_write_4(sc->iomem, PTNET_IO_CSBBAH,
344 (paddr >> 32) & 0xffffffff);
345 bus_write_4(sc->iomem, PTNET_IO_CSBBAL, paddr & 0xffffffff);
348 num_tx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS);
349 num_rx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS);
350 sc->num_rings = num_tx_rings + num_rx_rings;
351 sc->num_tx_rings = num_tx_rings;
353 /* Allocate and initialize per-queue data structures. */
354 sc->queues = malloc(sizeof(struct ptnet_queue) * sc->num_rings,
355 M_DEVBUF, M_NOWAIT | M_ZERO);
356 if (sc->queues == NULL) {
360 sc->rxqueues = sc->queues + num_tx_rings;
362 for (i = 0; i < sc->num_rings; i++) {
363 struct ptnet_queue *pq = sc->queues + i;
367 pq->kick = PTNET_IO_KICK_BASE + 4 * i;
368 pq->ptring = sc->csb->rings + i;
369 snprintf(pq->lock_name, sizeof(pq->lock_name), "%s-%d",
370 device_get_nameunit(dev), i);
371 mtx_init(&pq->lock, pq->lock_name, NULL, MTX_DEF);
372 if (i >= num_tx_rings) {
373 /* RX queue: fix kring_id. */
374 pq->kring_id -= num_tx_rings;
376 /* TX queue: allocate buf_ring. */
377 pq->bufring = buf_ring_alloc(PTNET_BUF_RING_SIZE,
378 M_DEVBUF, M_NOWAIT, &pq->lock);
379 if (pq->bufring == NULL) {
386 sc->min_tx_space = 64; /* Safe initial value. */
388 err = ptnet_irqs_init(sc);
393 /* Setup Ethernet interface. */
394 sc->ifp = ifp = if_alloc(IFT_ETHER);
396 device_printf(dev, "Failed to allocate ifnet\n");
401 if_initname(ifp, device_get_name(dev), device_get_unit(dev));
402 ifp->if_baudrate = IF_Gbps(10);
404 ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX;
405 ifp->if_init = ptnet_init;
406 ifp->if_ioctl = ptnet_ioctl;
407 #if __FreeBSD_version >= 1100000
408 ifp->if_get_counter = ptnet_get_counter;
410 ifp->if_transmit = ptnet_transmit;
411 ifp->if_qflush = ptnet_qflush;
413 ifmedia_init(&sc->media, IFM_IMASK, ptnet_media_change,
415 ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX, 0, NULL);
416 ifmedia_set(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX);
418 macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_HI);
419 sc->hwaddr[0] = (macreg >> 8) & 0xff;
420 sc->hwaddr[1] = macreg & 0xff;
421 macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_LO);
422 sc->hwaddr[2] = (macreg >> 24) & 0xff;
423 sc->hwaddr[3] = (macreg >> 16) & 0xff;
424 sc->hwaddr[4] = (macreg >> 8) & 0xff;
425 sc->hwaddr[5] = macreg & 0xff;
427 ether_ifattach(ifp, sc->hwaddr);
429 ifp->if_hdrlen = sizeof(struct ether_vlan_header);
430 ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU;
432 if (sc->ptfeatures & PTNETMAP_F_VNET_HDR) {
433 /* Similarly to what the vtnet driver does, we can emulate
434 * VLAN offloadings by inserting and removing the 802.1Q
435 * header during transmit and receive. We are then able
436 * to do checksum offloading of VLAN frames. */
437 ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6
439 | IFCAP_TSO | IFCAP_LRO
441 | IFCAP_VLAN_HWTAGGING;
444 ifp->if_capenable = ifp->if_capabilities;
445 #ifdef DEVICE_POLLING
446 /* Don't enable polling by default. */
447 ifp->if_capabilities |= IFCAP_POLLING;
449 snprintf(sc->lock_name, sizeof(sc->lock_name),
450 "%s", device_get_nameunit(dev));
451 mtx_init(&sc->lock, sc->lock_name, "ptnet core lock", MTX_DEF);
452 callout_init_mtx(&sc->tick, &sc->lock, 0);
454 /* Prepare a netmap_adapter struct instance to do netmap_attach(). */
455 nifp_offset = bus_read_4(sc->iomem, PTNET_IO_NIFP_OFS);
456 memset(&na_arg, 0, sizeof(na_arg));
458 na_arg.num_tx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS);
459 na_arg.num_rx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS);
460 na_arg.num_tx_rings = num_tx_rings;
461 na_arg.num_rx_rings = num_rx_rings;
462 na_arg.nm_config = ptnet_nm_config;
463 na_arg.nm_krings_create = ptnet_nm_krings_create;
464 na_arg.nm_krings_delete = ptnet_nm_krings_delete;
465 na_arg.nm_dtor = ptnet_nm_dtor;
466 na_arg.nm_register = ptnet_nm_register;
467 na_arg.nm_txsync = ptnet_nm_txsync;
468 na_arg.nm_rxsync = ptnet_nm_rxsync;
470 netmap_pt_guest_attach(&na_arg, sc->csb, nifp_offset,
471 bus_read_4(sc->iomem, PTNET_IO_HOSTMEMID));
473 /* Now a netmap adapter for this ifp has been allocated, and it
474 * can be accessed through NA(ifp). We also have to initialize the CSB
476 sc->ptna = (struct netmap_pt_guest_adapter *)NA(ifp);
478 /* If virtio-net header was negotiated, set the virt_hdr_len field in
479 * the netmap adapter, to inform users that this netmap adapter requires
480 * the application to deal with the headers. */
481 ptnet_update_vnet_hdr(sc);
483 device_printf(dev, "%s() completed\n", __func__);
493 ptnet_detach(device_t dev)
495 struct ptnet_softc *sc = device_get_softc(dev);
498 #ifdef DEVICE_POLLING
499 if (sc->ifp->if_capenable & IFCAP_POLLING) {
500 ether_poll_deregister(sc->ifp);
503 callout_drain(&sc->tick);
506 /* Drain taskqueues before calling if_detach. */
507 for (i = 0; i < sc->num_rings; i++) {
508 struct ptnet_queue *pq = sc->queues + i;
511 taskqueue_drain(pq->taskq, &pq->task);
517 ether_ifdetach(sc->ifp);
519 /* Uninitialize netmap adapters for this device. */
520 netmap_detach(sc->ifp);
522 ifmedia_removeall(&sc->media);
530 bus_write_4(sc->iomem, PTNET_IO_CSBBAH, 0);
531 bus_write_4(sc->iomem, PTNET_IO_CSBBAL, 0);
532 free(sc->csb, M_DEVBUF);
537 for (i = 0; i < sc->num_rings; i++) {
538 struct ptnet_queue *pq = sc->queues + i;
540 if (mtx_initialized(&pq->lock)) {
541 mtx_destroy(&pq->lock);
543 if (pq->bufring != NULL) {
544 buf_ring_free(pq->bufring, M_DEVBUF);
547 free(sc->queues, M_DEVBUF);
552 bus_release_resource(dev, SYS_RES_IOPORT,
553 PCIR_BAR(PTNETMAP_IO_PCI_BAR), sc->iomem);
557 mtx_destroy(&sc->lock);
559 device_printf(dev, "%s() completed\n", __func__);
565 ptnet_suspend(device_t dev)
567 struct ptnet_softc *sc;
569 sc = device_get_softc(dev);
576 ptnet_resume(device_t dev)
578 struct ptnet_softc *sc;
580 sc = device_get_softc(dev);
587 ptnet_shutdown(device_t dev)
590 * Suspend already does all of what we need to
591 * do here; we just never expect to be resumed.
593 return (ptnet_suspend(dev));
597 ptnet_irqs_init(struct ptnet_softc *sc)
599 int rid = PCIR_BAR(PTNETMAP_MSIX_PCI_BAR);
600 int nvecs = sc->num_rings;
601 device_t dev = sc->dev;
606 if (pci_find_cap(dev, PCIY_MSIX, NULL) != 0) {
607 device_printf(dev, "Could not find MSI-X capability\n");
611 sc->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
613 if (sc->msix_mem == NULL) {
614 device_printf(dev, "Failed to allocate MSIX PCI BAR\n");
618 if (pci_msix_count(dev) < nvecs) {
619 device_printf(dev, "Not enough MSI-X vectors\n");
623 err = pci_alloc_msix(dev, &nvecs);
625 device_printf(dev, "Failed to allocate MSI-X vectors\n");
629 for (i = 0; i < nvecs; i++) {
630 struct ptnet_queue *pq = sc->queues + i;
633 pq->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
635 if (pq->irq == NULL) {
636 device_printf(dev, "Failed to allocate interrupt "
637 "for queue #%d\n", i);
643 cpu_cur = CPU_FIRST();
644 for (i = 0; i < nvecs; i++) {
645 struct ptnet_queue *pq = sc->queues + i;
646 void (*handler)(void *) = ptnet_tx_intr;
648 if (i >= sc->num_tx_rings) {
649 handler = ptnet_rx_intr;
651 err = bus_setup_intr(dev, pq->irq, INTR_TYPE_NET | INTR_MPSAFE,
652 NULL /* intr_filter */, handler,
655 device_printf(dev, "Failed to register intr handler "
656 "for queue #%d\n", i);
660 bus_describe_intr(dev, pq->irq, pq->cookie, "q%d", i);
662 bus_bind_intr(sc->dev, pq->irq, cpu_cur);
664 cpu_cur = CPU_NEXT(cpu_cur);
667 device_printf(dev, "Allocated %d MSI-X vectors\n", nvecs);
669 cpu_cur = CPU_FIRST();
670 for (i = 0; i < nvecs; i++) {
671 struct ptnet_queue *pq = sc->queues + i;
672 static void (*handler)(void *context, int pending);
674 handler = (i < sc->num_tx_rings) ? ptnet_tx_task : ptnet_rx_task;
676 TASK_INIT(&pq->task, 0, handler, pq);
677 pq->taskq = taskqueue_create_fast("ptnet_queue", M_NOWAIT,
678 taskqueue_thread_enqueue, &pq->taskq);
679 taskqueue_start_threads(&pq->taskq, 1, PI_NET, "%s-pq-%d",
680 device_get_nameunit(sc->dev), cpu_cur);
681 cpu_cur = CPU_NEXT(cpu_cur);
691 ptnet_irqs_fini(struct ptnet_softc *sc)
693 device_t dev = sc->dev;
696 for (i = 0; i < sc->num_rings; i++) {
697 struct ptnet_queue *pq = sc->queues + i;
700 taskqueue_free(pq->taskq);
705 bus_teardown_intr(dev, pq->irq, pq->cookie);
710 bus_release_resource(dev, SYS_RES_IRQ, i + 1, pq->irq);
716 pci_release_msi(dev);
718 bus_release_resource(dev, SYS_RES_MEMORY,
719 PCIR_BAR(PTNETMAP_MSIX_PCI_BAR),
726 ptnet_init(void *opaque)
728 struct ptnet_softc *sc = opaque;
731 ptnet_init_locked(sc);
732 PTNET_CORE_UNLOCK(sc);
736 ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data)
738 struct ptnet_softc *sc = if_getsoftc(ifp);
739 device_t dev = sc->dev;
740 struct ifreq *ifr = (struct ifreq *)data;
745 device_printf(dev, "SIOCSIFFLAGS %x\n", ifp->if_flags);
747 if (ifp->if_flags & IFF_UP) {
748 /* Network stack wants the iff to be up. */
749 err = ptnet_init_locked(sc);
751 /* Network stack wants the iff to be down. */
752 err = ptnet_stop(sc);
754 /* We don't need to do nothing to support IFF_PROMISC,
755 * since that is managed by the backend port. */
756 PTNET_CORE_UNLOCK(sc);
760 device_printf(dev, "SIOCSIFCAP %x %x\n",
761 ifr->ifr_reqcap, ifp->if_capenable);
762 mask = ifr->ifr_reqcap ^ ifp->if_capenable;
763 #ifdef DEVICE_POLLING
764 if (mask & IFCAP_POLLING) {
765 struct ptnet_queue *pq;
768 if (ifr->ifr_reqcap & IFCAP_POLLING) {
769 err = ether_poll_register(ptnet_poll, ifp);
773 /* Stop queues and sync with taskqueues. */
774 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
775 for (i = 0; i < sc->num_rings; i++) {
776 pq = sc-> queues + i;
777 /* Make sure the worker sees the
778 * IFF_DRV_RUNNING down. */
780 pq->ptring->guest_need_kick = 0;
782 /* Wait for rescheduling to finish. */
784 taskqueue_drain(pq->taskq,
788 ifp->if_drv_flags |= IFF_DRV_RUNNING;
790 err = ether_poll_deregister(ifp);
791 for (i = 0; i < sc->num_rings; i++) {
792 pq = sc-> queues + i;
794 pq->ptring->guest_need_kick = 1;
799 #endif /* DEVICE_POLLING */
800 ifp->if_capenable = ifr->ifr_reqcap;
804 /* We support any reasonable MTU. */
805 if (ifr->ifr_mtu < ETHERMIN ||
806 ifr->ifr_mtu > PTNET_MAX_PKT_SIZE) {
810 ifp->if_mtu = ifr->ifr_mtu;
811 PTNET_CORE_UNLOCK(sc);
817 err = ifmedia_ioctl(ifp, ifr, &sc->media, cmd);
821 err = ether_ioctl(ifp, cmd, data);
829 ptnet_init_locked(struct ptnet_softc *sc)
832 struct netmap_adapter *na_dr = &sc->ptna->dr.up;
833 struct netmap_adapter *na_nm = &sc->ptna->hwup.up;
834 unsigned int nm_buf_size;
837 if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
838 return 0; /* nothing to do */
841 device_printf(sc->dev, "%s\n", __func__);
843 /* Translate offload capabilities according to if_capenable. */
844 ifp->if_hwassist = 0;
845 if (ifp->if_capenable & IFCAP_TXCSUM)
846 ifp->if_hwassist |= PTNET_CSUM_OFFLOAD;
847 if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
848 ifp->if_hwassist |= PTNET_CSUM_OFFLOAD_IPV6;
849 if (ifp->if_capenable & IFCAP_TSO4)
850 ifp->if_hwassist |= CSUM_IP_TSO;
851 if (ifp->if_capenable & IFCAP_TSO6)
852 ifp->if_hwassist |= CSUM_IP6_TSO;
855 * Prepare the interface for netmap mode access.
857 netmap_update_config(na_dr);
859 ret = netmap_mem_finalize(na_dr->nm_mem, na_dr);
861 device_printf(sc->dev, "netmap_mem_finalize() failed\n");
865 if (sc->ptna->backend_regifs == 0) {
866 ret = ptnet_nm_krings_create(na_nm);
868 device_printf(sc->dev, "ptnet_nm_krings_create() "
870 goto err_mem_finalize;
873 ret = netmap_mem_rings_create(na_dr);
875 device_printf(sc->dev, "netmap_mem_rings_create() "
877 goto err_rings_create;
880 ret = netmap_mem_get_lut(na_dr->nm_mem, &na_dr->na_lut);
882 device_printf(sc->dev, "netmap_mem_get_lut() "
888 ret = ptnet_nm_register(na_dr, 1 /* on */);
893 nm_buf_size = NETMAP_BUF_SIZE(na_dr);
895 KASSERT(nm_buf_size > 0, ("Invalid netmap buffer size"));
896 sc->min_tx_space = PTNET_MAX_PKT_SIZE / nm_buf_size + 2;
897 device_printf(sc->dev, "%s: min_tx_space = %u\n", __func__,
899 #ifdef PTNETMAP_STATS
900 callout_reset(&sc->tick, hz, ptnet_tick, sc);
903 ifp->if_drv_flags |= IFF_DRV_RUNNING;
908 memset(&na_dr->na_lut, 0, sizeof(na_dr->na_lut));
910 netmap_mem_rings_delete(na_dr);
912 ptnet_nm_krings_delete(na_nm);
914 netmap_mem_deref(na_dr->nm_mem, na_dr);
919 /* To be called under core lock. */
921 ptnet_stop(struct ptnet_softc *sc)
924 struct netmap_adapter *na_dr = &sc->ptna->dr.up;
925 struct netmap_adapter *na_nm = &sc->ptna->hwup.up;
928 device_printf(sc->dev, "%s\n", __func__);
930 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) {
931 return 0; /* nothing to do */
934 /* Clear the driver-ready flag, and synchronize with all the queues,
935 * so that after this loop we are sure nobody is working anymore with
936 * the device. This scheme is taken from the vtnet driver. */
937 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
938 callout_stop(&sc->tick);
939 for (i = 0; i < sc->num_rings; i++) {
940 PTNET_Q_LOCK(sc->queues + i);
941 PTNET_Q_UNLOCK(sc->queues + i);
944 ptnet_nm_register(na_dr, 0 /* off */);
946 if (sc->ptna->backend_regifs == 0) {
947 netmap_mem_rings_delete(na_dr);
948 ptnet_nm_krings_delete(na_nm);
950 netmap_mem_deref(na_dr->nm_mem, na_dr);
956 ptnet_qflush(if_t ifp)
958 struct ptnet_softc *sc = if_getsoftc(ifp);
961 /* Flush all the bufrings and do the interface flush. */
962 for (i = 0; i < sc->num_rings; i++) {
963 struct ptnet_queue *pq = sc->queues + i;
968 while ((m = buf_ring_dequeue_sc(pq->bufring))) {
979 ptnet_media_change(if_t ifp)
981 struct ptnet_softc *sc = if_getsoftc(ifp);
982 struct ifmedia *ifm = &sc->media;
984 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) {
991 #if __FreeBSD_version >= 1100000
993 ptnet_get_counter(if_t ifp, ift_counter cnt)
995 struct ptnet_softc *sc = if_getsoftc(ifp);
996 struct ptnet_queue_stats stats[2];
999 /* Accumulate statistics over the queues. */
1000 memset(stats, 0, sizeof(stats));
1001 for (i = 0; i < sc->num_rings; i++) {
1002 struct ptnet_queue *pq = sc->queues + i;
1003 int idx = (i < sc->num_tx_rings) ? 0 : 1;
1005 stats[idx].packets += pq->stats.packets;
1006 stats[idx].bytes += pq->stats.bytes;
1007 stats[idx].errors += pq->stats.errors;
1008 stats[idx].iqdrops += pq->stats.iqdrops;
1009 stats[idx].mcasts += pq->stats.mcasts;
1013 case IFCOUNTER_IPACKETS:
1014 return (stats[1].packets);
1015 case IFCOUNTER_IQDROPS:
1016 return (stats[1].iqdrops);
1017 case IFCOUNTER_IERRORS:
1018 return (stats[1].errors);
1019 case IFCOUNTER_OPACKETS:
1020 return (stats[0].packets);
1021 case IFCOUNTER_OBYTES:
1022 return (stats[0].bytes);
1023 case IFCOUNTER_OMCASTS:
1024 return (stats[0].mcasts);
1026 return (if_get_counter_default(ifp, cnt));
1032 #ifdef PTNETMAP_STATS
1033 /* Called under core lock. */
1035 ptnet_tick(void *opaque)
1037 struct ptnet_softc *sc = opaque;
1040 for (i = 0; i < sc->num_rings; i++) {
1041 struct ptnet_queue *pq = sc->queues + i;
1042 struct ptnet_queue_stats cur = pq->stats;
1047 delta = now.tv_usec - sc->last_ts.tv_usec +
1048 (now.tv_sec - sc->last_ts.tv_sec) * 1000000;
1049 delta /= 1000; /* in milliseconds */
1054 device_printf(sc->dev, "#%d[%u ms]:pkts %lu, kicks %lu, "
1055 "intr %lu\n", i, delta,
1056 (cur.packets - pq->last_stats.packets),
1057 (cur.kicks - pq->last_stats.kicks),
1058 (cur.intrs - pq->last_stats.intrs));
1059 pq->last_stats = cur;
1061 microtime(&sc->last_ts);
1062 callout_schedule(&sc->tick, hz);
1064 #endif /* PTNETMAP_STATS */
1067 ptnet_media_status(if_t ifp, struct ifmediareq *ifmr)
1069 /* We are always active, as the backend netmap port is
1070 * always open in netmap mode. */
1071 ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE;
1072 ifmr->ifm_active = IFM_ETHER | IFM_10G_T | IFM_FDX;
1076 ptnet_nm_ptctl(if_t ifp, uint32_t cmd)
1078 struct ptnet_softc *sc = if_getsoftc(ifp);
1080 * Write a command and read back error status,
1081 * with zero meaning success.
1083 bus_write_4(sc->iomem, PTNET_IO_PTCTL, cmd);
1084 return bus_read_4(sc->iomem, PTNET_IO_PTCTL);
1088 ptnet_nm_config(struct netmap_adapter *na, unsigned *txr, unsigned *txd,
1089 unsigned *rxr, unsigned *rxd)
1091 struct ptnet_softc *sc = if_getsoftc(na->ifp);
1093 *txr = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS);
1094 *rxr = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS);
1095 *txd = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS);
1096 *rxd = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS);
1098 device_printf(sc->dev, "txr %u, rxr %u, txd %u, rxd %u\n",
1099 *txr, *rxr, *txd, *rxd);
1105 ptnet_sync_from_csb(struct ptnet_softc *sc, struct netmap_adapter *na)
1109 /* Sync krings from the host, reading from
1111 for (i = 0; i < sc->num_rings; i++) {
1112 struct ptnet_ring *ptring = sc->queues[i].ptring;
1113 struct netmap_kring *kring;
1115 if (i < na->num_tx_rings) {
1116 kring = na->tx_rings + i;
1118 kring = na->rx_rings + i - na->num_tx_rings;
1120 kring->rhead = kring->ring->head = ptring->head;
1121 kring->rcur = kring->ring->cur = ptring->cur;
1122 kring->nr_hwcur = ptring->hwcur;
1123 kring->nr_hwtail = kring->rtail =
1124 kring->ring->tail = ptring->hwtail;
1126 ND("%d,%d: csb {hc %u h %u c %u ht %u}", t, i,
1127 ptring->hwcur, ptring->head, ptring->cur,
1129 ND("%d,%d: kring {hc %u rh %u rc %u h %u c %u ht %u rt %u t %u}",
1130 t, i, kring->nr_hwcur, kring->rhead, kring->rcur,
1131 kring->ring->head, kring->ring->cur, kring->nr_hwtail,
1132 kring->rtail, kring->ring->tail);
1137 ptnet_update_vnet_hdr(struct ptnet_softc *sc)
1139 unsigned int wanted_hdr_len = ptnet_vnet_hdr ? PTNET_HDR_SIZE : 0;
1141 bus_write_4(sc->iomem, PTNET_IO_VNET_HDR_LEN, wanted_hdr_len);
1142 sc->vnet_hdr_len = bus_read_4(sc->iomem, PTNET_IO_VNET_HDR_LEN);
1143 sc->ptna->hwup.up.virt_hdr_len = sc->vnet_hdr_len;
1147 ptnet_nm_register(struct netmap_adapter *na, int onoff)
1149 /* device-specific */
1151 struct ptnet_softc *sc = if_getsoftc(ifp);
1152 int native = (na == &sc->ptna->hwup.up);
1153 struct ptnet_queue *pq;
1159 sc->ptna->backend_regifs--;
1162 /* If this is the last netmap client, guest interrupt enable flags may
1163 * be in arbitrary state. Since these flags are going to be used also
1164 * by the netdevice driver, we have to make sure to start with
1165 * notifications enabled. Also, schedule NAPI to flush pending packets
1166 * in the RX rings, since we will not receive further interrupts
1167 * until these will be processed. */
1168 if (native && !onoff && na->active_fds == 0) {
1169 D("Exit netmap mode, re-enable interrupts");
1170 for (i = 0; i < sc->num_rings; i++) {
1171 pq = sc->queues + i;
1172 pq->ptring->guest_need_kick = 1;
1177 if (sc->ptna->backend_regifs == 0) {
1178 /* Initialize notification enable fields in the CSB. */
1179 for (i = 0; i < sc->num_rings; i++) {
1180 pq = sc->queues + i;
1181 pq->ptring->host_need_kick = 1;
1182 pq->ptring->guest_need_kick =
1183 (!(ifp->if_capenable & IFCAP_POLLING)
1184 && i >= sc->num_tx_rings);
1187 /* Set the virtio-net header length. */
1188 ptnet_update_vnet_hdr(sc);
1190 /* Make sure the host adapter passed through is ready
1191 * for txsync/rxsync. */
1192 ret = ptnet_nm_ptctl(ifp, PTNETMAP_PTCTL_CREATE);
1198 /* Sync from CSB must be done after REGIF PTCTL. Skip this
1199 * step only if this is a netmap client and it is not the
1201 if ((!native && sc->ptna->backend_regifs == 0) ||
1202 (native && na->active_fds == 0)) {
1203 ptnet_sync_from_csb(sc, na);
1206 /* If not native, don't call nm_set_native_flags, since we don't want
1207 * to replace if_transmit method, nor set NAF_NETMAP_ON */
1210 for (i = 0; i <= nma_get_nrings(na, t); i++) {
1211 struct netmap_kring *kring = &NMR(na, t)[i];
1213 if (nm_kring_pending_on(kring)) {
1214 kring->nr_mode = NKR_NETMAP_ON;
1218 nm_set_native_flags(na);
1223 nm_clear_native_flags(na);
1225 for (i = 0; i <= nma_get_nrings(na, t); i++) {
1226 struct netmap_kring *kring = &NMR(na, t)[i];
1228 if (nm_kring_pending_off(kring)) {
1229 kring->nr_mode = NKR_NETMAP_OFF;
1235 /* Sync from CSB must be done before UNREGIF PTCTL, on the last
1237 if (native && na->active_fds == 0) {
1238 ptnet_sync_from_csb(sc, na);
1241 if (sc->ptna->backend_regifs == 0) {
1242 ret = ptnet_nm_ptctl(ifp, PTNETMAP_PTCTL_DELETE);
1247 sc->ptna->backend_regifs++;
1254 ptnet_nm_txsync(struct netmap_kring *kring, int flags)
1256 struct ptnet_softc *sc = if_getsoftc(kring->na->ifp);
1257 struct ptnet_queue *pq = sc->queues + kring->ring_id;
1260 notify = netmap_pt_guest_txsync(pq->ptring, kring, flags);
1269 ptnet_nm_rxsync(struct netmap_kring *kring, int flags)
1271 struct ptnet_softc *sc = if_getsoftc(kring->na->ifp);
1272 struct ptnet_queue *pq = sc->rxqueues + kring->ring_id;
1275 notify = netmap_pt_guest_rxsync(pq->ptring, kring, flags);
1284 ptnet_tx_intr(void *opaque)
1286 struct ptnet_queue *pq = opaque;
1287 struct ptnet_softc *sc = pq->sc;
1289 DBG(device_printf(sc->dev, "Tx interrupt #%d\n", pq->kring_id));
1290 #ifdef PTNETMAP_STATS
1292 #endif /* PTNETMAP_STATS */
1294 if (netmap_tx_irq(sc->ifp, pq->kring_id) != NM_IRQ_PASS) {
1298 /* Schedule the tasqueue to flush process transmissions requests.
1299 * However, vtnet, if_em and if_igb just call ptnet_transmit() here,
1300 * at least when using MSI-X interrupts. The if_em driver, instead
1301 * schedule taskqueue when using legacy interrupts. */
1302 taskqueue_enqueue(pq->taskq, &pq->task);
1306 ptnet_rx_intr(void *opaque)
1308 struct ptnet_queue *pq = opaque;
1309 struct ptnet_softc *sc = pq->sc;
1310 unsigned int unused;
1312 DBG(device_printf(sc->dev, "Rx interrupt #%d\n", pq->kring_id));
1313 #ifdef PTNETMAP_STATS
1315 #endif /* PTNETMAP_STATS */
1317 if (netmap_rx_irq(sc->ifp, pq->kring_id, &unused) != NM_IRQ_PASS) {
1321 /* Like vtnet, if_igb and if_em drivers when using MSI-X interrupts,
1322 * receive-side processing is executed directly in the interrupt
1323 * service routine. Alternatively, we may schedule the taskqueue. */
1324 ptnet_rx_eof(pq, PTNET_RX_BUDGET, true);
1327 /* The following offloadings-related functions are taken from the vtnet
1328 * driver, but the same functionality is required for the ptnet driver.
1329 * As a temporary solution, I copied this code from vtnet and I started
1330 * to generalize it (taking away driver-specific statistic accounting),
1331 * making as little modifications as possible.
1332 * In the future we need to share these functions between vtnet and ptnet.
1335 ptnet_tx_offload_ctx(struct mbuf *m, int *etype, int *proto, int *start)
1337 struct ether_vlan_header *evh;
1340 evh = mtod(m, struct ether_vlan_header *);
1341 if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) {
1342 /* BMV: We should handle nested VLAN tags too. */
1343 *etype = ntohs(evh->evl_proto);
1344 offset = sizeof(struct ether_vlan_header);
1346 *etype = ntohs(evh->evl_encap_proto);
1347 offset = sizeof(struct ether_header);
1352 case ETHERTYPE_IP: {
1353 struct ip *ip, iphdr;
1354 if (__predict_false(m->m_len < offset + sizeof(struct ip))) {
1355 m_copydata(m, offset, sizeof(struct ip),
1359 ip = (struct ip *)(m->m_data + offset);
1361 *start = offset + (ip->ip_hl << 2);
1366 case ETHERTYPE_IPV6:
1368 *start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto);
1369 /* Assert the network stack sent us a valid packet. */
1370 KASSERT(*start > offset,
1371 ("%s: mbuf %p start %d offset %d proto %d", __func__, m,
1372 *start, offset, *proto));
1376 /* Here we should increment the tx_csum_bad_ethtype counter. */
1384 ptnet_tx_offload_tso(if_t ifp, struct mbuf *m, int eth_type,
1385 int offset, bool allow_ecn, struct virtio_net_hdr *hdr)
1387 static struct timeval lastecn;
1389 struct tcphdr *tcp, tcphdr;
1391 if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) {
1392 m_copydata(m, offset, sizeof(struct tcphdr), (caddr_t) &tcphdr);
1395 tcp = (struct tcphdr *)(m->m_data + offset);
1397 hdr->hdr_len = offset + (tcp->th_off << 2);
1398 hdr->gso_size = m->m_pkthdr.tso_segsz;
1399 hdr->gso_type = eth_type == ETHERTYPE_IP ? VIRTIO_NET_HDR_GSO_TCPV4 :
1400 VIRTIO_NET_HDR_GSO_TCPV6;
1402 if (tcp->th_flags & TH_CWR) {
1404 * Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In FreeBSD,
1405 * ECN support is not on a per-interface basis, but globally via
1406 * the net.inet.tcp.ecn.enable sysctl knob. The default is off.
1409 if (ppsratecheck(&lastecn, &curecn, 1))
1411 "TSO with ECN not negotiated with host\n");
1414 hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
1417 /* Here we should increment tx_tso counter. */
1422 static struct mbuf *
1423 ptnet_tx_offload(if_t ifp, struct mbuf *m, bool allow_ecn,
1424 struct virtio_net_hdr *hdr)
1426 int flags, etype, csum_start, proto, error;
1428 flags = m->m_pkthdr.csum_flags;
1430 error = ptnet_tx_offload_ctx(m, &etype, &proto, &csum_start);
1434 if ((etype == ETHERTYPE_IP && flags & PTNET_CSUM_OFFLOAD) ||
1435 (etype == ETHERTYPE_IPV6 && flags & PTNET_CSUM_OFFLOAD_IPV6)) {
1437 * We could compare the IP protocol vs the CSUM_ flag too,
1438 * but that really should not be necessary.
1440 hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM;
1441 hdr->csum_start = csum_start;
1442 hdr->csum_offset = m->m_pkthdr.csum_data;
1443 /* Here we should increment the tx_csum counter. */
1446 if (flags & CSUM_TSO) {
1447 if (__predict_false(proto != IPPROTO_TCP)) {
1448 /* Likely failed to correctly parse the mbuf.
1449 * Here we should increment the tx_tso_not_tcp
1454 KASSERT(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM,
1455 ("%s: mbuf %p TSO without checksum offload %#x",
1456 __func__, m, flags));
1458 error = ptnet_tx_offload_tso(ifp, m, etype, csum_start,
1472 ptnet_vlan_tag_remove(struct mbuf *m)
1474 struct ether_vlan_header *evh;
1476 evh = mtod(m, struct ether_vlan_header *);
1477 m->m_pkthdr.ether_vtag = ntohs(evh->evl_tag);
1478 m->m_flags |= M_VLANTAG;
1480 /* Strip the 802.1Q header. */
1481 bcopy((char *) evh, (char *) evh + ETHER_VLAN_ENCAP_LEN,
1482 ETHER_HDR_LEN - ETHER_TYPE_LEN);
1483 m_adj(m, ETHER_VLAN_ENCAP_LEN);
1487 * Use the checksum offset in the VirtIO header to set the
1488 * correct CSUM_* flags.
1491 ptnet_rx_csum_by_offset(struct mbuf *m, uint16_t eth_type, int ip_start,
1492 struct virtio_net_hdr *hdr)
1494 #if defined(INET) || defined(INET6)
1495 int offset = hdr->csum_start + hdr->csum_offset;
1498 /* Only do a basic sanity check on the offset. */
1502 if (__predict_false(offset < ip_start + sizeof(struct ip)))
1507 case ETHERTYPE_IPV6:
1508 if (__predict_false(offset < ip_start + sizeof(struct ip6_hdr)))
1513 /* Here we should increment the rx_csum_bad_ethtype counter. */
1518 * Use the offset to determine the appropriate CSUM_* flags. This is
1519 * a bit dirty, but we can get by with it since the checksum offsets
1520 * happen to be different. We assume the host host does not do IPv4
1521 * header checksum offloading.
1523 switch (hdr->csum_offset) {
1524 case offsetof(struct udphdr, uh_sum):
1525 case offsetof(struct tcphdr, th_sum):
1526 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1527 m->m_pkthdr.csum_data = 0xFFFF;
1529 case offsetof(struct sctphdr, checksum):
1530 m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID;
1533 /* Here we should increment the rx_csum_bad_offset counter. */
1541 ptnet_rx_csum_by_parse(struct mbuf *m, uint16_t eth_type, int ip_start,
1542 struct virtio_net_hdr *hdr)
1548 case ETHERTYPE_IP: {
1550 if (__predict_false(m->m_len < ip_start + sizeof(struct ip)))
1552 ip = (struct ip *)(m->m_data + ip_start);
1554 offset = ip_start + (ip->ip_hl << 2);
1559 case ETHERTYPE_IPV6:
1560 if (__predict_false(m->m_len < ip_start +
1561 sizeof(struct ip6_hdr)))
1563 offset = ip6_lasthdr(m, ip_start, IPPROTO_IPV6, &proto);
1564 if (__predict_false(offset < 0))
1569 /* Here we should increment the rx_csum_bad_ethtype counter. */
1575 if (__predict_false(m->m_len < offset + sizeof(struct tcphdr)))
1577 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1578 m->m_pkthdr.csum_data = 0xFFFF;
1581 if (__predict_false(m->m_len < offset + sizeof(struct udphdr)))
1583 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1584 m->m_pkthdr.csum_data = 0xFFFF;
1587 if (__predict_false(m->m_len < offset + sizeof(struct sctphdr)))
1589 m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID;
1593 * For the remaining protocols, FreeBSD does not support
1594 * checksum offloading, so the checksum will be recomputed.
1597 if_printf(ifp, "cksum offload of unsupported "
1598 "protocol eth_type=%#x proto=%d csum_start=%d "
1599 "csum_offset=%d\n", __func__, eth_type, proto,
1600 hdr->csum_start, hdr->csum_offset);
1609 * Set the appropriate CSUM_* flags. Unfortunately, the information
1610 * provided is not directly useful to us. The VirtIO header gives the
1611 * offset of the checksum, which is all Linux needs, but this is not
1612 * how FreeBSD does things. We are forced to peek inside the packet
1615 * It would be nice if VirtIO gave us the L4 protocol or if FreeBSD
1616 * could accept the offsets and let the stack figure it out.
1619 ptnet_rx_csum(struct mbuf *m, struct virtio_net_hdr *hdr)
1621 struct ether_header *eh;
1622 struct ether_vlan_header *evh;
1626 eh = mtod(m, struct ether_header *);
1627 eth_type = ntohs(eh->ether_type);
1628 if (eth_type == ETHERTYPE_VLAN) {
1629 /* BMV: We should handle nested VLAN tags too. */
1630 evh = mtod(m, struct ether_vlan_header *);
1631 eth_type = ntohs(evh->evl_proto);
1632 offset = sizeof(struct ether_vlan_header);
1634 offset = sizeof(struct ether_header);
1636 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
1637 error = ptnet_rx_csum_by_offset(m, eth_type, offset, hdr);
1639 error = ptnet_rx_csum_by_parse(m, eth_type, offset, hdr);
1643 /* End of offloading-related functions to be shared with vtnet. */
1646 ptnet_sync_tail(struct ptnet_ring *ptring, struct netmap_kring *kring)
1648 struct netmap_ring *ring = kring->ring;
1650 /* Update hwcur and hwtail as known by the host. */
1651 ptnetmap_guest_read_kring_csb(ptring, kring);
1653 /* nm_sync_finalize */
1654 ring->tail = kring->rtail = kring->nr_hwtail;
1658 ptnet_ring_update(struct ptnet_queue *pq, struct netmap_kring *kring,
1659 unsigned int head, unsigned int sync_flags)
1661 struct netmap_ring *ring = kring->ring;
1662 struct ptnet_ring *ptring = pq->ptring;
1664 /* Some packets have been pushed to the netmap ring. We have
1665 * to tell the host to process the new packets, updating cur
1666 * and head in the CSB. */
1667 ring->head = ring->cur = head;
1669 /* Mimic nm_txsync_prologue/nm_rxsync_prologue. */
1670 kring->rcur = kring->rhead = head;
1672 ptnetmap_guest_write_kring_csb(ptring, kring->rcur, kring->rhead);
1674 /* Kick the host if needed. */
1675 if (NM_ACCESS_ONCE(ptring->host_need_kick)) {
1676 ptring->sync_flags = sync_flags;
1681 #define PTNET_TX_NOSPACE(_h, _k, _min) \
1682 ((((_h) < (_k)->rtail) ? 0 : (_k)->nkr_num_slots) + \
1683 (_k)->rtail - (_h)) < (_min)
1685 /* This function may be called by the network stack, or by
1686 * by the taskqueue thread. */
1688 ptnet_drain_transmit_queue(struct ptnet_queue *pq, unsigned int budget,
1691 struct ptnet_softc *sc = pq->sc;
1692 bool have_vnet_hdr = sc->vnet_hdr_len;
1693 struct netmap_adapter *na = &sc->ptna->dr.up;
1695 unsigned int batch_count = 0;
1696 struct ptnet_ring *ptring;
1697 struct netmap_kring *kring;
1698 struct netmap_ring *ring;
1699 struct netmap_slot *slot;
1700 unsigned int count = 0;
1701 unsigned int minspace;
1709 if (!PTNET_Q_TRYLOCK(pq)) {
1710 /* We failed to acquire the lock, schedule the taskqueue. */
1711 RD(1, "Deferring TX work");
1713 taskqueue_enqueue(pq->taskq, &pq->task);
1719 if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) {
1721 RD(1, "Interface is down");
1725 ptring = pq->ptring;
1726 kring = na->tx_rings + pq->kring_id;
1728 lim = kring->nkr_num_slots - 1;
1730 minspace = sc->min_tx_space;
1732 while (count < budget) {
1733 if (PTNET_TX_NOSPACE(head, kring, minspace)) {
1734 /* We ran out of slot, let's see if the host has
1735 * freed up some, by reading hwcur and hwtail from
1737 ptnet_sync_tail(ptring, kring);
1739 if (PTNET_TX_NOSPACE(head, kring, minspace)) {
1740 /* Still no slots available. Reactivate the
1741 * interrupts so that we can be notified
1742 * when some free slots are made available by
1744 ptring->guest_need_kick = 1;
1747 ptnet_sync_tail(ptring, kring);
1748 if (likely(PTNET_TX_NOSPACE(head, kring,
1753 RD(1, "Found more slots by doublecheck");
1754 /* More slots were freed before reactivating
1755 * the interrupts. */
1756 ptring->guest_need_kick = 0;
1760 mhead = drbr_peek(ifp, pq->bufring);
1765 /* Initialize transmission state variables. */
1766 slot = ring->slot + head;
1767 nmbuf = NMB(na, slot);
1770 /* If needed, prepare the virtio-net header at the beginning
1771 * of the first slot. */
1772 if (have_vnet_hdr) {
1773 struct virtio_net_hdr *vh =
1774 (struct virtio_net_hdr *)nmbuf;
1776 /* For performance, we could replace this memset() with
1777 * two 8-bytes-wide writes. */
1778 memset(nmbuf, 0, PTNET_HDR_SIZE);
1779 if (mhead->m_pkthdr.csum_flags & PTNET_ALL_OFFLOAD) {
1780 mhead = ptnet_tx_offload(ifp, mhead, false,
1782 if (unlikely(!mhead)) {
1783 /* Packet dropped because errors
1784 * occurred while preparing the vnet
1785 * header. Let's go ahead with the next
1787 pq->stats.errors ++;
1788 drbr_advance(ifp, pq->bufring);
1792 ND(1, "%s: [csum_flags %lX] vnet hdr: flags %x "
1793 "csum_start %u csum_ofs %u hdr_len = %u "
1794 "gso_size %u gso_type %x", __func__,
1795 mhead->m_pkthdr.csum_flags, vh->flags,
1796 vh->csum_start, vh->csum_offset, vh->hdr_len,
1797 vh->gso_size, vh->gso_type);
1799 nmbuf += PTNET_HDR_SIZE;
1800 nmbuf_bytes += PTNET_HDR_SIZE;
1803 for (mf = mhead; mf; mf = mf->m_next) {
1804 uint8_t *mdata = mf->m_data;
1805 int mlen = mf->m_len;
1808 int copy = NETMAP_BUF_SIZE(na) - nmbuf_bytes;
1813 memcpy(nmbuf, mdata, copy);
1818 nmbuf_bytes += copy;
1824 slot->len = nmbuf_bytes;
1825 slot->flags = NS_MOREFRAG;
1827 head = nm_next(head, lim);
1828 KASSERT(head != ring->tail,
1829 ("Unexpectedly run out of TX space"));
1830 slot = ring->slot + head;
1831 nmbuf = NMB(na, slot);
1836 /* Complete last slot and update head. */
1837 slot->len = nmbuf_bytes;
1839 head = nm_next(head, lim);
1841 /* Consume the packet just processed. */
1842 drbr_advance(ifp, pq->bufring);
1844 /* Copy the packet to listeners. */
1845 ETHER_BPF_MTAP(ifp, mhead);
1847 pq->stats.packets ++;
1848 pq->stats.bytes += mhead->m_pkthdr.len;
1849 if (mhead->m_flags & M_MCAST) {
1850 pq->stats.mcasts ++;
1856 if (++batch_count == PTNET_TX_BATCH) {
1857 ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM);
1863 ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM);
1866 if (count >= budget && may_resched) {
1867 DBG(RD(1, "out of budget: resched, %d mbufs pending\n",
1868 drbr_inuse(ifp, pq->bufring)));
1869 taskqueue_enqueue(pq->taskq, &pq->task);
1878 ptnet_transmit(if_t ifp, struct mbuf *m)
1880 struct ptnet_softc *sc = if_getsoftc(ifp);
1881 struct ptnet_queue *pq;
1882 unsigned int queue_idx;
1885 DBG(device_printf(sc->dev, "transmit %p\n", m));
1887 /* Insert 802.1Q header if needed. */
1888 if (m->m_flags & M_VLANTAG) {
1889 m = ether_vlanencap(m, m->m_pkthdr.ether_vtag);
1893 m->m_flags &= ~M_VLANTAG;
1896 /* Get the flow-id if available. */
1897 queue_idx = (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) ?
1898 m->m_pkthdr.flowid : curcpu;
1900 if (unlikely(queue_idx >= sc->num_tx_rings)) {
1901 queue_idx %= sc->num_tx_rings;
1904 pq = sc->queues + queue_idx;
1906 err = drbr_enqueue(ifp, pq->bufring, m);
1908 /* ENOBUFS when the bufring is full */
1909 RD(1, "%s: drbr_enqueue() failed %d\n",
1911 pq->stats.errors ++;
1915 if (ifp->if_capenable & IFCAP_POLLING) {
1916 /* If polling is on, the transmit queues will be
1917 * drained by the poller. */
1921 err = ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true);
1923 return (err < 0) ? err : 0;
1927 ptnet_rx_discard(struct netmap_kring *kring, unsigned int head)
1929 struct netmap_ring *ring = kring->ring;
1930 struct netmap_slot *slot = ring->slot + head;
1933 head = nm_next(head, kring->nkr_num_slots - 1);
1934 if (!(slot->flags & NS_MOREFRAG) || head == ring->tail) {
1937 slot = ring->slot + head;
1943 static inline struct mbuf *
1944 ptnet_rx_slot(struct mbuf *mtail, uint8_t *nmbuf, unsigned int nmbuf_len)
1946 uint8_t *mdata = mtod(mtail, uint8_t *) + mtail->m_len;
1951 if (mtail->m_len == MCLBYTES) {
1954 mf = m_getcl(M_NOWAIT, MT_DATA, 0);
1955 if (unlikely(!mf)) {
1961 mdata = mtod(mtail, uint8_t *);
1965 copy = MCLBYTES - mtail->m_len;
1966 if (nmbuf_len < copy) {
1970 memcpy(mdata, nmbuf, copy);
1975 mtail->m_len += copy;
1976 } while (nmbuf_len);
1982 ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, bool may_resched)
1984 struct ptnet_softc *sc = pq->sc;
1985 bool have_vnet_hdr = sc->vnet_hdr_len;
1986 struct ptnet_ring *ptring = pq->ptring;
1987 struct netmap_adapter *na = &sc->ptna->dr.up;
1988 struct netmap_kring *kring = na->rx_rings + pq->kring_id;
1989 struct netmap_ring *ring = kring->ring;
1990 unsigned int const lim = kring->nkr_num_slots - 1;
1991 unsigned int head = ring->head;
1992 unsigned int batch_count = 0;
1994 unsigned int count = 0;
1998 if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) {
2002 kring->nr_kflags &= ~NKR_PENDINTR;
2004 while (count < budget) {
2005 unsigned int prev_head = head;
2006 struct mbuf *mhead, *mtail;
2007 struct virtio_net_hdr *vh;
2008 struct netmap_slot *slot;
2009 unsigned int nmbuf_len;
2012 if (head == ring->tail) {
2013 /* We ran out of slot, let's see if the host has
2014 * added some, by reading hwcur and hwtail from
2016 ptnet_sync_tail(ptring, kring);
2018 if (head == ring->tail) {
2019 /* Still no slots available. Reactivate
2020 * interrupts as they were disabled by the
2021 * host thread right before issuing the
2022 * last interrupt. */
2023 ptring->guest_need_kick = 1;
2026 ptnet_sync_tail(ptring, kring);
2027 if (likely(head == ring->tail)) {
2030 ptring->guest_need_kick = 0;
2034 /* Initialize ring state variables, possibly grabbing the
2035 * virtio-net header. */
2036 slot = ring->slot + head;
2037 nmbuf = NMB(na, slot);
2038 nmbuf_len = slot->len;
2040 vh = (struct virtio_net_hdr *)nmbuf;
2041 if (have_vnet_hdr) {
2042 if (unlikely(nmbuf_len < PTNET_HDR_SIZE)) {
2043 /* There is no good reason why host should
2044 * put the header in multiple netmap slots.
2045 * If this is the case, discard. */
2046 RD(1, "Fragmented vnet-hdr: dropping");
2047 head = ptnet_rx_discard(kring, head);
2048 pq->stats.iqdrops ++;
2051 ND(1, "%s: vnet hdr: flags %x csum_start %u "
2052 "csum_ofs %u hdr_len = %u gso_size %u "
2053 "gso_type %x", __func__, vh->flags,
2054 vh->csum_start, vh->csum_offset, vh->hdr_len,
2055 vh->gso_size, vh->gso_type);
2056 nmbuf += PTNET_HDR_SIZE;
2057 nmbuf_len -= PTNET_HDR_SIZE;
2060 /* Allocate the head of a new mbuf chain.
2061 * We use m_getcl() to allocate an mbuf with standard cluster
2062 * size (MCLBYTES). In the future we could use m_getjcl()
2063 * to choose different sizes. */
2064 mhead = mtail = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
2065 if (unlikely(mhead == NULL)) {
2066 device_printf(sc->dev, "%s: failed to allocate mbuf "
2067 "head\n", __func__);
2068 pq->stats.errors ++;
2072 /* Initialize the mbuf state variables. */
2073 mhead->m_pkthdr.len = nmbuf_len;
2076 /* Scan all the netmap slots containing the current packet. */
2078 DBG(device_printf(sc->dev, "%s: h %u t %u rcv frag "
2079 "len %u, flags %u\n", __func__,
2080 head, ring->tail, slot->len,
2083 mtail = ptnet_rx_slot(mtail, nmbuf, nmbuf_len);
2084 if (unlikely(!mtail)) {
2085 /* Ouch. We ran out of memory while processing
2086 * a packet. We have to restore the previous
2087 * head position, free the mbuf chain, and
2088 * schedule the taskqueue to give the packet
2089 * another chance. */
2090 device_printf(sc->dev, "%s: failed to allocate"
2091 " mbuf frag, reset head %u --> %u\n",
2092 __func__, head, prev_head);
2095 pq->stats.errors ++;
2097 taskqueue_enqueue(pq->taskq,
2103 /* We have to increment head irrespective of the
2104 * NS_MOREFRAG being set or not. */
2105 head = nm_next(head, lim);
2107 if (!(slot->flags & NS_MOREFRAG)) {
2111 if (unlikely(head == ring->tail)) {
2112 /* The very last slot prepared by the host has
2113 * the NS_MOREFRAG set. Drop it and continue
2114 * the outer cycle (to do the double-check). */
2115 RD(1, "Incomplete packet: dropping");
2117 pq->stats.iqdrops ++;
2121 slot = ring->slot + head;
2122 nmbuf = NMB(na, slot);
2123 nmbuf_len = slot->len;
2124 mhead->m_pkthdr.len += nmbuf_len;
2127 mhead->m_pkthdr.rcvif = ifp;
2128 mhead->m_pkthdr.csum_flags = 0;
2130 /* Store the queue idx in the packet header. */
2131 mhead->m_pkthdr.flowid = pq->kring_id;
2132 M_HASHTYPE_SET(mhead, M_HASHTYPE_OPAQUE);
2134 if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) {
2135 struct ether_header *eh;
2137 eh = mtod(mhead, struct ether_header *);
2138 if (eh->ether_type == htons(ETHERTYPE_VLAN)) {
2139 ptnet_vlan_tag_remove(mhead);
2141 * With the 802.1Q header removed, update the
2142 * checksum starting location accordingly.
2144 if (vh->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
2145 vh->csum_start -= ETHER_VLAN_ENCAP_LEN;
2149 if (have_vnet_hdr && (vh->flags & (VIRTIO_NET_HDR_F_NEEDS_CSUM
2150 | VIRTIO_NET_HDR_F_DATA_VALID))) {
2151 if (unlikely(ptnet_rx_csum(mhead, vh))) {
2153 RD(1, "Csum offload error: dropping");
2154 pq->stats.iqdrops ++;
2159 pq->stats.packets ++;
2160 pq->stats.bytes += mhead->m_pkthdr.len;
2163 (*ifp->if_input)(ifp, mhead);
2166 if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) {
2167 /* The interface has gone down while we didn't
2168 * have the lock. Stop any processing and exit. */
2173 if (++batch_count == PTNET_RX_BATCH) {
2174 /* Some packets have been pushed to the network stack.
2175 * We need to update the CSB to tell the host about the new
2176 * ring->cur and ring->head (RX buffer refill). */
2177 ptnet_ring_update(pq, kring, head, NAF_FORCE_READ);
2183 ptnet_ring_update(pq, kring, head, NAF_FORCE_READ);
2187 if (count >= budget && may_resched) {
2188 /* If we ran out of budget or the double-check found new
2189 * slots to process, schedule the taskqueue. */
2190 DBG(RD(1, "out of budget: resched h %u t %u\n",
2192 taskqueue_enqueue(pq->taskq, &pq->task);
2201 ptnet_rx_task(void *context, int pending)
2203 struct ptnet_queue *pq = context;
2205 DBG(RD(1, "%s: pq #%u\n", __func__, pq->kring_id));
2206 ptnet_rx_eof(pq, PTNET_RX_BUDGET, true);
2210 ptnet_tx_task(void *context, int pending)
2212 struct ptnet_queue *pq = context;
2214 DBG(RD(1, "%s: pq #%u\n", __func__, pq->kring_id));
2215 ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true);
2218 #ifdef DEVICE_POLLING
2219 /* We don't need to handle differently POLL_AND_CHECK_STATUS and
2220 * POLL_ONLY, since we don't have an Interrupt Status Register. */
2222 ptnet_poll(if_t ifp, enum poll_cmd cmd, int budget)
2224 struct ptnet_softc *sc = if_getsoftc(ifp);
2225 unsigned int queue_budget;
2226 unsigned int count = 0;
2227 bool borrow = false;
2230 KASSERT(sc->num_rings > 0, ("Found no queues in while polling ptnet"));
2231 queue_budget = MAX(budget / sc->num_rings, 1);
2232 RD(1, "Per-queue budget is %d", queue_budget);
2235 unsigned int rcnt = 0;
2237 for (i = 0; i < sc->num_rings; i++) {
2238 struct ptnet_queue *pq = sc->queues + i;
2241 queue_budget = MIN(queue_budget, budget);
2242 if (queue_budget == 0) {
2247 if (i < sc->num_tx_rings) {
2248 rcnt += ptnet_drain_transmit_queue(pq,
2249 queue_budget, false);
2251 rcnt += ptnet_rx_eof(pq, queue_budget,
2257 /* A scan of the queues gave no result, we can
2262 if (rcnt > budget) {
2263 /* This may happen when initial budget < sc->num_rings,
2264 * since one packet budget is given to each queue
2265 * anyway. Just pretend we didn't eat "so much". */
2276 #endif /* DEVICE_POLLING */