2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (C) 2013-2014 Universita` di Pisa. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include "opt_inet6.h"
32 #include <sys/param.h>
33 #include <sys/module.h>
34 #include <sys/errno.h>
36 #include <sys/poll.h> /* POLLIN, POLLOUT */
37 #include <sys/kernel.h> /* types used in module initialization */
38 #include <sys/conf.h> /* DEV_MODULE_ORDERED */
39 #include <sys/endian.h>
40 #include <sys/syscallsubr.h> /* kern_ioctl() */
42 #include <sys/rwlock.h>
44 #include <vm/vm.h> /* vtophys */
45 #include <vm/pmap.h> /* vtophys */
46 #include <vm/vm_param.h>
47 #include <vm/vm_object.h>
48 #include <vm/vm_page.h>
49 #include <vm/vm_pager.h>
53 #include <sys/malloc.h>
54 #include <sys/socket.h> /* sockaddrs */
55 #include <sys/selinfo.h>
56 #include <sys/kthread.h> /* kthread_add() */
57 #include <sys/proc.h> /* PROC_LOCK() */
58 #include <sys/unistd.h> /* RFNOWAIT */
59 #include <sys/sched.h> /* sched_bind() */
60 #include <sys/smp.h> /* mp_maxid */
61 #include <sys/taskqueue.h> /* taskqueue_enqueue(), taskqueue_create(), ... */
63 #include <net/if_var.h>
64 #include <net/if_types.h> /* IFT_ETHER */
65 #include <net/ethernet.h> /* ether_ifdetach */
66 #include <net/if_dl.h> /* LLADDR */
67 #include <machine/bus.h> /* bus_dmamap_* */
68 #include <netinet/in.h> /* in6_cksum_pseudo() */
69 #include <machine/in_cksum.h> /* in_pseudo(), in_cksum_hdr() */
71 #include <net/netmap.h>
72 #include <dev/netmap/netmap_kern.h>
73 #include <net/netmap_virt.h>
74 #include <dev/netmap/netmap_mem2.h>
77 /* ======================== FREEBSD-SPECIFIC ROUTINES ================== */
80 nm_kqueue_notify(void *opaque, int pending)
82 struct nm_selinfo *si = opaque;
84 /* We use a non-zero hint to distinguish this notification call
85 * from the call done in kqueue_scan(), which uses hint=0.
87 KNOTE_UNLOCKED(&si->si.si_note, /*hint=*/0x100);
90 int nm_os_selinfo_init(NM_SELINFO_T *si, const char *name) {
93 TASK_INIT(&si->ntfytask, 0, nm_kqueue_notify, si);
94 si->ntfytq = taskqueue_create(name, M_NOWAIT,
95 taskqueue_thread_enqueue, &si->ntfytq);
96 if (si->ntfytq == NULL)
98 err = taskqueue_start_threads(&si->ntfytq, 1, PI_NET, "tq %s", name);
100 taskqueue_free(si->ntfytq);
105 snprintf(si->mtxname, sizeof(si->mtxname), "nmkl%s", name);
106 mtx_init(&si->m, si->mtxname, NULL, MTX_DEF);
107 knlist_init_mtx(&si->si.si_note, &si->m);
108 si->kqueue_users = 0;
114 nm_os_selinfo_uninit(NM_SELINFO_T *si)
116 if (si->ntfytq == NULL) {
117 return; /* si was not initialized */
119 taskqueue_drain(si->ntfytq, &si->ntfytask);
120 taskqueue_free(si->ntfytq);
122 knlist_delete(&si->si.si_note, curthread, /*islocked=*/0);
123 knlist_destroy(&si->si.si_note);
124 /* now we don't need the mutex anymore */
129 nm_os_malloc(size_t size)
131 return malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
135 nm_os_realloc(void *addr, size_t new_size, size_t old_size __unused)
137 return realloc(addr, new_size, M_DEVBUF, M_NOWAIT | M_ZERO);
141 nm_os_free(void *addr)
143 free(addr, M_DEVBUF);
147 nm_os_ifnet_lock(void)
153 nm_os_ifnet_unlock(void)
158 static int netmap_use_count = 0;
161 nm_os_get_module(void)
167 nm_os_put_module(void)
173 netmap_ifnet_arrival_handler(void *arg __unused, struct ifnet *ifp)
175 netmap_undo_zombie(ifp);
179 netmap_ifnet_departure_handler(void *arg __unused, struct ifnet *ifp)
181 netmap_make_zombie(ifp);
184 static eventhandler_tag nm_ifnet_ah_tag;
185 static eventhandler_tag nm_ifnet_dh_tag;
188 nm_os_ifnet_init(void)
191 EVENTHANDLER_REGISTER(ifnet_arrival_event,
192 netmap_ifnet_arrival_handler,
193 NULL, EVENTHANDLER_PRI_ANY);
195 EVENTHANDLER_REGISTER(ifnet_departure_event,
196 netmap_ifnet_departure_handler,
197 NULL, EVENTHANDLER_PRI_ANY);
202 nm_os_ifnet_fini(void)
204 EVENTHANDLER_DEREGISTER(ifnet_arrival_event,
206 EVENTHANDLER_DEREGISTER(ifnet_departure_event,
211 nm_os_ifnet_mtu(struct ifnet *ifp)
213 #if __FreeBSD_version < 1100030
214 return ifp->if_data.ifi_mtu;
215 #else /* __FreeBSD_version >= 1100030 */
221 nm_os_csum_raw(uint8_t *data, size_t len, rawsum_t cur_sum)
223 /* TODO XXX please use the FreeBSD implementation for this. */
224 uint16_t *words = (uint16_t *)data;
228 for (i = 0; i < nw; i++)
229 cur_sum += be16toh(words[i]);
232 cur_sum += (data[len-1] << 8);
237 /* Fold a raw checksum: 'cur_sum' is in host byte order, while the
238 * return value is in network byte order.
241 nm_os_csum_fold(rawsum_t cur_sum)
243 /* TODO XXX please use the FreeBSD implementation for this. */
244 while (cur_sum >> 16)
245 cur_sum = (cur_sum & 0xFFFF) + (cur_sum >> 16);
247 return htobe16((~cur_sum) & 0xFFFF);
250 uint16_t nm_os_csum_ipv4(struct nm_iphdr *iph)
253 return in_cksum_hdr((void *)iph);
255 return nm_os_csum_fold(nm_os_csum_raw((uint8_t*)iph, sizeof(struct nm_iphdr), 0));
260 nm_os_csum_tcpudp_ipv4(struct nm_iphdr *iph, void *data,
261 size_t datalen, uint16_t *check)
264 uint16_t pseudolen = datalen + iph->protocol;
266 /* Compute and insert the pseudo-header cheksum. */
267 *check = in_pseudo(iph->saddr, iph->daddr,
269 /* Compute the checksum on TCP/UDP header + payload
270 * (includes the pseudo-header).
272 *check = nm_os_csum_fold(nm_os_csum_raw(data, datalen, 0));
274 static int notsupported = 0;
277 nm_prerr("inet4 segmentation not supported");
283 nm_os_csum_tcpudp_ipv6(struct nm_ipv6hdr *ip6h, void *data,
284 size_t datalen, uint16_t *check)
287 *check = in6_cksum_pseudo((void*)ip6h, datalen, ip6h->nexthdr, 0);
288 *check = nm_os_csum_fold(nm_os_csum_raw(data, datalen, 0));
290 static int notsupported = 0;
293 nm_prerr("inet6 segmentation not supported");
298 /* on FreeBSD we send up one packet at a time */
300 nm_os_send_up(struct ifnet *ifp, struct mbuf *m, struct mbuf *prev)
302 NA(ifp)->if_input(ifp, m);
307 nm_os_mbuf_has_csum_offld(struct mbuf *m)
309 return m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_SCTP |
310 CSUM_TCP_IPV6 | CSUM_UDP_IPV6 |
315 nm_os_mbuf_has_seg_offld(struct mbuf *m)
317 return m->m_pkthdr.csum_flags & CSUM_TSO;
321 freebsd_generic_rx_handler(struct ifnet *ifp, struct mbuf *m)
325 if (unlikely(!NM_NA_VALID(ifp))) {
326 nm_prlim(1, "Warning: RX packet intercepted, but no"
327 " emulated adapter");
331 stolen = generic_rx_handler(ifp, m);
333 struct netmap_generic_adapter *gna =
334 (struct netmap_generic_adapter *)NA(ifp);
335 gna->save_if_input(ifp, m);
340 * Intercept the rx routine in the standard device driver.
341 * Second argument is non-zero to intercept, 0 to restore
344 nm_os_catch_rx(struct netmap_generic_adapter *gna, int intercept)
346 struct netmap_adapter *na = &gna->up.up;
347 struct ifnet *ifp = na->ifp;
352 if (gna->save_if_input) {
353 nm_prerr("RX on %s already intercepted", na->name);
354 ret = EBUSY; /* already set */
357 gna->save_if_input = ifp->if_input;
358 ifp->if_input = freebsd_generic_rx_handler;
360 if (!gna->save_if_input) {
361 nm_prerr("Failed to undo RX intercept on %s",
363 ret = EINVAL; /* not saved */
366 ifp->if_input = gna->save_if_input;
367 gna->save_if_input = NULL;
370 nm_os_ifnet_unlock();
377 * Intercept the packet steering routine in the tx path,
378 * so that we can decide which queue is used for an mbuf.
379 * Second argument is non-zero to intercept, 0 to restore.
380 * On freebsd we just intercept if_transmit.
383 nm_os_catch_tx(struct netmap_generic_adapter *gna, int intercept)
385 struct netmap_adapter *na = &gna->up.up;
386 struct ifnet *ifp = netmap_generic_getifp(gna);
390 na->if_transmit = ifp->if_transmit;
391 ifp->if_transmit = netmap_transmit;
393 ifp->if_transmit = na->if_transmit;
395 nm_os_ifnet_unlock();
402 * Transmit routine used by generic_netmap_txsync(). Returns 0 on success
403 * and non-zero on error (which may be packet drops or other errors).
404 * addr and len identify the netmap buffer, m is the (preallocated)
405 * mbuf to use for transmissions.
407 * We should add a reference to the mbuf so the m_freem() at the end
408 * of the transmission does not consume resources.
410 * On FreeBSD, and on multiqueue cards, we can force the queue using
411 * if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
412 * i = m->m_pkthdr.flowid % adapter->num_queues;
414 * i = curcpu % adapter->num_queues;
418 nm_os_generic_xmit_frame(struct nm_os_gen_arg *a)
422 struct ifnet *ifp = a->ifp;
423 struct mbuf *m = a->m;
425 #if __FreeBSD_version < 1100000
427 * Old FreeBSD versions. The mbuf has a cluster attached,
428 * we need to copy from the cluster to the netmap buffer.
430 if (MBUF_REFCNT(m) != 1) {
431 nm_prerr("invalid refcnt %d for %p", MBUF_REFCNT(m), m);
432 panic("in generic_xmit_frame");
434 if (m->m_ext.ext_size < len) {
435 nm_prlim(2, "size %d < len %d", m->m_ext.ext_size, len);
436 len = m->m_ext.ext_size;
438 bcopy(a->addr, m->m_data, len);
439 #else /* __FreeBSD_version >= 1100000 */
440 /* New FreeBSD versions. Link the external storage to
441 * the netmap buffer, so that no copy is necessary. */
442 m->m_ext.ext_buf = m->m_data = a->addr;
443 m->m_ext.ext_size = len;
444 #endif /* __FreeBSD_version >= 1100000 */
446 m->m_len = m->m_pkthdr.len = len;
448 /* mbuf refcnt is not contended, no need to use atomic
449 * (a memory barrier is enough). */
450 SET_MBUF_REFCNT(m, 2);
451 M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
452 m->m_pkthdr.flowid = a->ring_nr;
453 m->m_pkthdr.rcvif = ifp; /* used for tx notification */
454 ret = NA(ifp)->if_transmit(ifp, m);
459 #if __FreeBSD_version >= 1100005
460 struct netmap_adapter *
461 netmap_getna(if_t ifp)
463 return (NA((struct ifnet *)ifp));
465 #endif /* __FreeBSD_version >= 1100005 */
468 * The following two functions are empty until we have a generic
469 * way to extract the info from the ifp
472 nm_os_generic_find_num_desc(struct ifnet *ifp, unsigned int *tx, unsigned int *rx)
479 nm_os_generic_find_num_queues(struct ifnet *ifp, u_int *txq, u_int *rxq)
481 unsigned num_rings = netmap_generic_rings ? netmap_generic_rings : 1;
488 nm_os_generic_set_features(struct netmap_generic_adapter *gna)
491 gna->rxsg = 1; /* Supported through m_copydata. */
492 gna->txqdisc = 0; /* Not supported. */
496 nm_os_mitigation_init(struct nm_generic_mit *mit, int idx, struct netmap_adapter *na)
498 mit->mit_pending = 0;
499 mit->mit_ring_idx = idx;
505 nm_os_mitigation_start(struct nm_generic_mit *mit)
511 nm_os_mitigation_restart(struct nm_generic_mit *mit)
517 nm_os_mitigation_active(struct nm_generic_mit *mit)
525 nm_os_mitigation_cleanup(struct nm_generic_mit *mit)
530 nm_vi_dummy(struct ifnet *ifp, u_long cmd, caddr_t addr)
537 nm_vi_start(struct ifnet *ifp)
539 panic("nm_vi_start() must not be called");
543 * Index manager of persistent virtual interfaces.
544 * It is used to decide the lowest byte of the MAC address.
545 * We use the same algorithm with management of bridge port index.
547 #define NM_VI_MAX 255
549 uint8_t index[NM_VI_MAX]; /* XXX just for a reasonable number */
555 nm_os_vi_init_index(void)
558 for (i = 0; i < NM_VI_MAX; i++)
559 nm_vi_indices.index[i] = i;
560 nm_vi_indices.active = 0;
561 mtx_init(&nm_vi_indices.lock, "nm_vi_indices_lock", NULL, MTX_DEF);
564 /* return -1 if no index available */
566 nm_vi_get_index(void)
570 mtx_lock(&nm_vi_indices.lock);
571 ret = nm_vi_indices.active == NM_VI_MAX ? -1 :
572 nm_vi_indices.index[nm_vi_indices.active++];
573 mtx_unlock(&nm_vi_indices.lock);
578 nm_vi_free_index(uint8_t val)
582 mtx_lock(&nm_vi_indices.lock);
583 lim = nm_vi_indices.active;
584 for (i = 0; i < lim; i++) {
585 if (nm_vi_indices.index[i] == val) {
586 /* swap index[lim-1] and j */
587 int tmp = nm_vi_indices.index[lim-1];
588 nm_vi_indices.index[lim-1] = val;
589 nm_vi_indices.index[i] = tmp;
590 nm_vi_indices.active--;
594 if (lim == nm_vi_indices.active)
595 nm_prerr("Index %u not found", val);
596 mtx_unlock(&nm_vi_indices.lock);
601 * Implementation of a netmap-capable virtual interface that
602 * registered to the system.
603 * It is based on if_tap.c and ip_fw_log.c in FreeBSD 9.
605 * Note: Linux sets refcount to 0 on allocation of net_device,
606 * then increments it on registration to the system.
607 * FreeBSD sets refcount to 1 on if_alloc(), and does not
608 * increment this refcount on if_attach().
611 nm_os_vi_persist(const char *name, struct ifnet **ret)
615 uint32_t macaddr_mid;
617 int unit = nm_vi_get_index(); /* just to decide MAC address */
622 * We use the same MAC address generation method with tap
623 * except for the highest octet is 00:be instead of 00:bd
625 macaddr_hi = htons(0x00be); /* XXX tap + 1 */
626 macaddr_mid = (uint32_t) ticks;
627 bcopy(&macaddr_hi, eaddr, sizeof(short));
628 bcopy(&macaddr_mid, &eaddr[2], sizeof(uint32_t));
629 eaddr[5] = (uint8_t)unit;
631 ifp = if_alloc(IFT_ETHER);
633 nm_prerr("if_alloc failed");
636 if_initname(ifp, name, IF_DUNIT_NONE);
638 ifp->if_flags = IFF_UP | IFF_SIMPLEX | IFF_MULTICAST;
639 ifp->if_init = (void *)nm_vi_dummy;
640 ifp->if_ioctl = nm_vi_dummy;
641 ifp->if_start = nm_vi_start;
642 ifp->if_mtu = ETHERMTU;
643 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
644 ifp->if_capabilities |= IFCAP_LINKSTATE;
645 ifp->if_capenable |= IFCAP_LINKSTATE;
647 ether_ifattach(ifp, eaddr);
652 /* unregister from the system and drop the final refcount */
654 nm_os_vi_detach(struct ifnet *ifp)
656 nm_vi_free_index(((char *)IF_LLADDR(ifp))[5]);
662 #include <vm/vm_map.h>
663 #include <vm/vm_kern.h>
664 struct nm_os_extmem {
672 nm_os_extmem_delete(struct nm_os_extmem *e)
674 nm_prinf("freeing %zx bytes", (size_t)e->size);
675 vm_map_remove(kernel_map, e->kva, e->kva + e->size);
680 nm_os_extmem_nextpage(struct nm_os_extmem *e)
683 if (e->scan < e->kva + e->size) {
684 rv = (char *)e->scan;
685 e->scan += PAGE_SIZE;
691 nm_os_extmem_isequal(struct nm_os_extmem *e1, struct nm_os_extmem *e2)
693 return (e1->obj == e2->obj);
697 nm_os_extmem_nr_pages(struct nm_os_extmem *e)
699 return e->size >> PAGE_SHIFT;
702 struct nm_os_extmem *
703 nm_os_extmem_create(unsigned long p, struct nmreq_pools_info *pi, int *perror)
706 vm_map_entry_t entry;
711 struct nm_os_extmem *e = NULL;
714 e = nm_os_malloc(sizeof(*e));
720 map = &curthread->td_proc->p_vmspace->vm_map;
721 rv = vm_map_lookup(&map, p, VM_PROT_RW, &entry,
722 &obj, &index, &prot, &wired);
723 if (rv != KERN_SUCCESS) {
724 nm_prerr("address %lx not found", p);
727 /* check that we are given the whole vm_object ? */
728 vm_map_lookup_done(map, entry);
730 // XXX can we really use obj after releasing the map lock?
732 vm_object_reference(obj);
733 /* wire the memory and add the vm_object to the kernel map,
734 * to make sure that it is not fred even if the processes that
735 * are mmap()ing it all exit
737 e->kva = vm_map_min(kernel_map);
738 e->size = obj->size << PAGE_SHIFT;
739 rv = vm_map_find(kernel_map, obj, 0, &e->kva, e->size, 0,
740 VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
741 VM_PROT_READ | VM_PROT_WRITE, 0);
742 if (rv != KERN_SUCCESS) {
743 nm_prerr("vm_map_find(%zx) failed", (size_t)e->size);
746 rv = vm_map_wire(kernel_map, e->kva, e->kva + e->size,
747 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
748 if (rv != KERN_SUCCESS) {
749 nm_prerr("vm_map_wire failed");
758 vm_map_remove(kernel_map, e->kva, e->kva + e->size);
761 vm_object_deallocate(e->obj);
769 #endif /* WITH_EXTMEM */
771 /* ================== PTNETMAP GUEST SUPPORT ==================== */
775 #include <sys/rman.h>
776 #include <machine/bus.h> /* bus_dmamap_* */
777 #include <machine/resource.h>
778 #include <dev/pci/pcivar.h>
779 #include <dev/pci/pcireg.h>
781 * ptnetmap memory device (memdev) for freebsd guest,
782 * ssed to expose host netmap memory to the guest through a PCI BAR.
786 * ptnetmap memdev private data structure
788 struct ptnetmap_memdev {
790 struct resource *pci_io;
791 struct resource *pci_mem;
792 struct netmap_mem_d *nm_mem;
795 static int ptn_memdev_probe(device_t);
796 static int ptn_memdev_attach(device_t);
797 static int ptn_memdev_detach(device_t);
798 static int ptn_memdev_shutdown(device_t);
800 static device_method_t ptn_memdev_methods[] = {
801 DEVMETHOD(device_probe, ptn_memdev_probe),
802 DEVMETHOD(device_attach, ptn_memdev_attach),
803 DEVMETHOD(device_detach, ptn_memdev_detach),
804 DEVMETHOD(device_shutdown, ptn_memdev_shutdown),
808 static driver_t ptn_memdev_driver = {
809 PTNETMAP_MEMDEV_NAME,
811 sizeof(struct ptnetmap_memdev),
814 /* We use (SI_ORDER_MIDDLE+1) here, see DEV_MODULE_ORDERED() invocation
816 static devclass_t ptnetmap_devclass;
817 DRIVER_MODULE_ORDERED(ptn_memdev, pci, ptn_memdev_driver, ptnetmap_devclass,
818 NULL, NULL, SI_ORDER_MIDDLE + 1);
821 * Map host netmap memory through PCI-BAR in the guest OS,
822 * returning physical (nm_paddr) and virtual (nm_addr) addresses
823 * of the netmap memory mapped in the guest.
826 nm_os_pt_memdev_iomap(struct ptnetmap_memdev *ptn_dev, vm_paddr_t *nm_paddr,
827 void **nm_addr, uint64_t *mem_size)
831 nm_prinf("ptn_memdev_driver iomap");
833 rid = PCIR_BAR(PTNETMAP_MEM_PCI_BAR);
834 *mem_size = bus_read_4(ptn_dev->pci_io, PTNET_MDEV_IO_MEMSIZE_HI);
835 *mem_size = bus_read_4(ptn_dev->pci_io, PTNET_MDEV_IO_MEMSIZE_LO) |
838 /* map memory allocator */
839 ptn_dev->pci_mem = bus_alloc_resource(ptn_dev->dev, SYS_RES_MEMORY,
840 &rid, 0, ~0, *mem_size, RF_ACTIVE);
841 if (ptn_dev->pci_mem == NULL) {
847 *nm_paddr = rman_get_start(ptn_dev->pci_mem);
848 *nm_addr = rman_get_virtual(ptn_dev->pci_mem);
850 nm_prinf("=== BAR %d start %lx len %lx mem_size %lx ===",
851 PTNETMAP_MEM_PCI_BAR,
852 (unsigned long)(*nm_paddr),
853 (unsigned long)rman_get_size(ptn_dev->pci_mem),
854 (unsigned long)*mem_size);
859 nm_os_pt_memdev_ioread(struct ptnetmap_memdev *ptn_dev, unsigned int reg)
861 return bus_read_4(ptn_dev->pci_io, reg);
864 /* Unmap host netmap memory. */
866 nm_os_pt_memdev_iounmap(struct ptnetmap_memdev *ptn_dev)
868 nm_prinf("ptn_memdev_driver iounmap");
870 if (ptn_dev->pci_mem) {
871 bus_release_resource(ptn_dev->dev, SYS_RES_MEMORY,
872 PCIR_BAR(PTNETMAP_MEM_PCI_BAR), ptn_dev->pci_mem);
873 ptn_dev->pci_mem = NULL;
877 /* Device identification routine, return BUS_PROBE_DEFAULT on success,
878 * positive on failure */
880 ptn_memdev_probe(device_t dev)
884 if (pci_get_vendor(dev) != PTNETMAP_PCI_VENDOR_ID)
886 if (pci_get_device(dev) != PTNETMAP_PCI_DEVICE_ID)
889 snprintf(desc, sizeof(desc), "%s PCI adapter",
890 PTNETMAP_MEMDEV_NAME);
891 device_set_desc_copy(dev, desc);
893 return (BUS_PROBE_DEFAULT);
896 /* Device initialization routine. */
898 ptn_memdev_attach(device_t dev)
900 struct ptnetmap_memdev *ptn_dev;
904 ptn_dev = device_get_softc(dev);
907 pci_enable_busmaster(dev);
909 rid = PCIR_BAR(PTNETMAP_IO_PCI_BAR);
910 ptn_dev->pci_io = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
912 if (ptn_dev->pci_io == NULL) {
913 device_printf(dev, "cannot map I/O space\n");
917 mem_id = bus_read_4(ptn_dev->pci_io, PTNET_MDEV_IO_MEMID);
919 /* create guest allocator */
920 ptn_dev->nm_mem = netmap_mem_pt_guest_attach(ptn_dev, mem_id);
921 if (ptn_dev->nm_mem == NULL) {
922 ptn_memdev_detach(dev);
925 netmap_mem_get(ptn_dev->nm_mem);
927 nm_prinf("ptnetmap memdev attached, host memid: %u", mem_id);
932 /* Device removal routine. */
934 ptn_memdev_detach(device_t dev)
936 struct ptnetmap_memdev *ptn_dev;
938 ptn_dev = device_get_softc(dev);
940 if (ptn_dev->nm_mem) {
941 nm_prinf("ptnetmap memdev detached, host memid %u",
942 netmap_mem_get_id(ptn_dev->nm_mem));
943 netmap_mem_put(ptn_dev->nm_mem);
944 ptn_dev->nm_mem = NULL;
946 if (ptn_dev->pci_mem) {
947 bus_release_resource(dev, SYS_RES_MEMORY,
948 PCIR_BAR(PTNETMAP_MEM_PCI_BAR), ptn_dev->pci_mem);
949 ptn_dev->pci_mem = NULL;
951 if (ptn_dev->pci_io) {
952 bus_release_resource(dev, SYS_RES_IOPORT,
953 PCIR_BAR(PTNETMAP_IO_PCI_BAR), ptn_dev->pci_io);
954 ptn_dev->pci_io = NULL;
961 ptn_memdev_shutdown(device_t dev)
963 return bus_generic_shutdown(dev);
966 #endif /* WITH_PTNETMAP */
969 * In order to track whether pages are still mapped, we hook into
970 * the standard cdev_pager and intercept the constructor and
974 struct netmap_vm_handle_t {
976 struct netmap_priv_d *priv;
981 netmap_dev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
982 vm_ooffset_t foff, struct ucred *cred, u_short *color)
984 struct netmap_vm_handle_t *vmh = handle;
987 nm_prinf("handle %p size %jd prot %d foff %jd",
988 handle, (intmax_t)size, prot, (intmax_t)foff);
997 netmap_dev_pager_dtor(void *handle)
999 struct netmap_vm_handle_t *vmh = handle;
1000 struct cdev *dev = vmh->dev;
1001 struct netmap_priv_d *priv = vmh->priv;
1004 nm_prinf("handle %p", handle);
1006 free(vmh, M_DEVBUF);
1012 netmap_dev_pager_fault(vm_object_t object, vm_ooffset_t offset,
1013 int prot, vm_page_t *mres)
1015 struct netmap_vm_handle_t *vmh = object->handle;
1016 struct netmap_priv_d *priv = vmh->priv;
1017 struct netmap_adapter *na = priv->np_na;
1020 vm_memattr_t memattr;
1023 nm_prdis("object %p offset %jd prot %d mres %p",
1024 object, (intmax_t)offset, prot, mres);
1025 memattr = object->memattr;
1026 pidx = OFF_TO_IDX(offset);
1027 paddr = netmap_mem_ofstophys(na->nm_mem, offset);
1029 return VM_PAGER_FAIL;
1031 if (((*mres)->flags & PG_FICTITIOUS) != 0) {
1033 * If the passed in result page is a fake page, update it with
1034 * the new physical address.
1037 vm_page_updatefake(page, paddr, memattr);
1040 * Replace the passed in reqpage page with our own fake page and
1041 * free up the all of the original pages.
1043 #ifndef VM_OBJECT_WUNLOCK /* FreeBSD < 10.x */
1044 #define VM_OBJECT_WUNLOCK VM_OBJECT_UNLOCK
1045 #define VM_OBJECT_WLOCK VM_OBJECT_LOCK
1046 #endif /* VM_OBJECT_WUNLOCK */
1048 VM_OBJECT_WUNLOCK(object);
1049 page = vm_page_getfake(paddr, memattr);
1050 VM_OBJECT_WLOCK(object);
1051 vm_page_lock(*mres);
1052 vm_page_free(*mres);
1053 vm_page_unlock(*mres);
1055 vm_page_insert(page, object, pidx);
1057 page->valid = VM_PAGE_BITS_ALL;
1058 return (VM_PAGER_OK);
1062 static struct cdev_pager_ops netmap_cdev_pager_ops = {
1063 .cdev_pg_ctor = netmap_dev_pager_ctor,
1064 .cdev_pg_dtor = netmap_dev_pager_dtor,
1065 .cdev_pg_fault = netmap_dev_pager_fault,
1070 netmap_mmap_single(struct cdev *cdev, vm_ooffset_t *foff,
1071 vm_size_t objsize, vm_object_t *objp, int prot)
1074 struct netmap_vm_handle_t *vmh;
1075 struct netmap_priv_d *priv;
1079 nm_prinf("cdev %p foff %jd size %jd objp %p prot %d", cdev,
1080 (intmax_t )*foff, (intmax_t )objsize, objp, prot);
1082 vmh = malloc(sizeof(struct netmap_vm_handle_t), M_DEVBUF,
1089 error = devfs_get_cdevpriv((void**)&priv);
1092 if (priv->np_nifp == NULL) {
1100 obj = cdev_pager_allocate(vmh, OBJT_DEVICE,
1101 &netmap_cdev_pager_ops, objsize, prot,
1104 nm_prerr("cdev_pager_allocate failed");
1118 free(vmh, M_DEVBUF);
1123 * On FreeBSD the close routine is only called on the last close on
1124 * the device (/dev/netmap) so we cannot do anything useful.
1125 * To track close() on individual file descriptors we pass netmap_dtor() to
1126 * devfs_set_cdevpriv() on open(). The FreeBSD kernel will call the destructor
1127 * when the last fd pointing to the device is closed.
1129 * Note that FreeBSD does not even munmap() on close() so we also have
1130 * to track mmap() ourselves, and postpone the call to
1131 * netmap_dtor() is called when the process has no open fds and no active
1132 * memory maps on /dev/netmap, as in linux.
1135 netmap_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
1138 nm_prinf("dev %p fflag 0x%x devtype %d td %p",
1139 dev, fflag, devtype, td);
1145 netmap_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
1147 struct netmap_priv_d *priv;
1156 priv = netmap_priv_new();
1161 error = devfs_set_cdevpriv(priv, netmap_dtor);
1163 netmap_priv_delete(priv);
1170 /******************** kthread wrapper ****************/
1171 #include <sys/sysproto.h>
1175 return mp_maxid + 1;
1178 struct nm_kctx_ctx {
1179 /* Userspace thread (kthread creator). */
1180 struct thread *user_td;
1182 /* worker function and parameter */
1183 nm_kctx_worker_fn_t worker_fn;
1184 void *worker_private;
1186 struct nm_kctx *nmk;
1188 /* integer to manage multiple worker contexts (e.g., RX or TX on ptnetmap) */
1193 struct thread *worker;
1194 struct mtx worker_lock;
1195 struct nm_kctx_ctx worker_ctx;
1196 int run; /* used to stop kthread */
1197 int attach_user; /* kthread attached to user_process */
1202 nm_kctx_worker(void *data)
1204 struct nm_kctx *nmk = data;
1205 struct nm_kctx_ctx *ctx = &nmk->worker_ctx;
1207 if (nmk->affinity >= 0) {
1208 thread_lock(curthread);
1209 sched_bind(curthread, nmk->affinity);
1210 thread_unlock(curthread);
1215 * check if the parent process dies
1216 * (when kthread is attached to user process)
1220 thread_suspend_check(0);
1221 PROC_UNLOCK(curproc);
1223 kthread_suspend_check();
1226 /* Continuously execute worker process. */
1227 ctx->worker_fn(ctx->worker_private); /* worker body */
1234 nm_os_kctx_worker_setaff(struct nm_kctx *nmk, int affinity)
1236 nmk->affinity = affinity;
1240 nm_os_kctx_create(struct nm_kctx_cfg *cfg, void *opaque)
1242 struct nm_kctx *nmk = NULL;
1244 nmk = malloc(sizeof(*nmk), M_DEVBUF, M_NOWAIT | M_ZERO);
1248 mtx_init(&nmk->worker_lock, "nm_kthread lock", NULL, MTX_DEF);
1249 nmk->worker_ctx.worker_fn = cfg->worker_fn;
1250 nmk->worker_ctx.worker_private = cfg->worker_private;
1251 nmk->worker_ctx.type = cfg->type;
1254 /* attach kthread to user process (ptnetmap) */
1255 nmk->attach_user = cfg->attach_user;
1261 nm_os_kctx_worker_start(struct nm_kctx *nmk)
1263 struct proc *p = NULL;
1266 /* Temporarily disable this function as it is currently broken
1267 * and causes kernel crashes. The failure can be triggered by
1268 * the "vale_polling_enable_disable" test in ctrl-api-test.c. */
1274 /* check if we want to attach kthread to user process */
1275 if (nmk->attach_user) {
1276 nmk->worker_ctx.user_td = curthread;
1277 p = curthread->td_proc;
1280 /* enable kthread main loop */
1282 /* create kthread */
1283 if((error = kthread_add(nm_kctx_worker, nmk, p,
1284 &nmk->worker, RFNOWAIT /* to be checked */, 0, "nm-kthread-%ld",
1285 nmk->worker_ctx.type))) {
1289 nm_prinf("nm_kthread started td %p", nmk->worker);
1293 nm_prerr("nm_kthread start failed err %d", error);
1299 nm_os_kctx_worker_stop(struct nm_kctx *nmk)
1304 /* tell to kthread to exit from main loop */
1307 /* wake up kthread if it sleeps */
1308 kthread_resume(nmk->worker);
1314 nm_os_kctx_destroy(struct nm_kctx *nmk)
1320 nm_os_kctx_worker_stop(nmk);
1322 free(nmk, M_DEVBUF);
1325 /******************** kqueue support ****************/
1328 * In addition to calling selwakeuppri(), nm_os_selwakeup() also
1329 * needs to call knote() to wake up kqueue listeners.
1330 * This operation is deferred to a taskqueue in order to avoid possible
1331 * lock order reversals; these may happen because knote() grabs a
1332 * private lock associated to the 'si' (see struct selinfo,
1333 * struct nm_selinfo, and nm_os_selinfo_init), and nm_os_selwakeup()
1334 * can be called while holding the lock associated to a different
1336 * When calling knote() we use a non-zero 'hint' argument to inform
1337 * the netmap_knrw() function that it is being called from
1338 * 'nm_os_selwakeup'; this is necessary because when netmap_knrw() is
1339 * called by the kevent subsystem (i.e. kevent_scan()) we also need to
1340 * call netmap_poll().
1342 * The netmap_kqfilter() function registers one or another f_event
1343 * depending on read or write mode. A pointer to the struct
1344 * 'netmap_priv_d' is stored into kn->kn_hook, so that it can later
1345 * be passed to netmap_poll(). We pass NULL as a third argument to
1346 * netmap_poll(), so that the latter only runs the txsync/rxsync
1347 * (if necessary), and skips the nm_os_selrecord() calls.
1352 nm_os_selwakeup(struct nm_selinfo *si)
1354 selwakeuppri(&si->si, PI_NET);
1355 if (si->kqueue_users > 0) {
1356 taskqueue_enqueue(si->ntfytq, &si->ntfytask);
1361 nm_os_selrecord(struct thread *td, struct nm_selinfo *si)
1363 selrecord(td, &si->si);
1367 netmap_knrdetach(struct knote *kn)
1369 struct netmap_priv_d *priv = (struct netmap_priv_d *)kn->kn_hook;
1370 struct nm_selinfo *si = priv->np_si[NR_RX];
1372 knlist_remove(&si->si.si_note, kn, /*islocked=*/0);
1374 KASSERT(si->kqueue_users > 0, ("kqueue_user underflow on %s",
1377 nm_prinf("kqueue users for %s: %d", si->mtxname, si->kqueue_users);
1382 netmap_knwdetach(struct knote *kn)
1384 struct netmap_priv_d *priv = (struct netmap_priv_d *)kn->kn_hook;
1385 struct nm_selinfo *si = priv->np_si[NR_TX];
1387 knlist_remove(&si->si.si_note, kn, /*islocked=*/0);
1390 nm_prinf("kqueue users for %s: %d", si->mtxname, si->kqueue_users);
1395 * Callback triggered by netmap notifications (see netmap_notify()),
1396 * and by the application calling kevent(). In the former case we
1397 * just return 1 (events ready), since we are not able to do better.
1398 * In the latter case we use netmap_poll() to see which events are
1402 netmap_knrw(struct knote *kn, long hint, int events)
1404 struct netmap_priv_d *priv;
1408 /* Called from netmap_notify(), typically from a
1409 * thread different from the one issuing kevent().
1410 * Assume we are ready. */
1414 /* Called from kevent(). */
1416 revents = netmap_poll(priv, events, /*thread=*/NULL);
1418 return (events & revents) ? 1 : 0;
1422 netmap_knread(struct knote *kn, long hint)
1424 return netmap_knrw(kn, hint, POLLIN);
1428 netmap_knwrite(struct knote *kn, long hint)
1430 return netmap_knrw(kn, hint, POLLOUT);
1433 static struct filterops netmap_rfiltops = {
1435 .f_detach = netmap_knrdetach,
1436 .f_event = netmap_knread,
1439 static struct filterops netmap_wfiltops = {
1441 .f_detach = netmap_knwdetach,
1442 .f_event = netmap_knwrite,
1447 * This is called when a thread invokes kevent() to record
1448 * a change in the configuration of the kqueue().
1449 * The 'priv' is the one associated to the open netmap device.
1452 netmap_kqfilter(struct cdev *dev, struct knote *kn)
1454 struct netmap_priv_d *priv;
1456 struct netmap_adapter *na;
1457 struct nm_selinfo *si;
1458 int ev = kn->kn_filter;
1460 if (ev != EVFILT_READ && ev != EVFILT_WRITE) {
1461 nm_prerr("bad filter request %d", ev);
1464 error = devfs_get_cdevpriv((void**)&priv);
1466 nm_prerr("device not yet setup");
1471 nm_prerr("no netmap adapter for this file descriptor");
1474 /* the si is indicated in the priv */
1475 si = priv->np_si[(ev == EVFILT_WRITE) ? NR_TX : NR_RX];
1476 kn->kn_fop = (ev == EVFILT_WRITE) ?
1477 &netmap_wfiltops : &netmap_rfiltops;
1481 nm_prinf("kqueue users for %s: %d", si->mtxname, si->kqueue_users);
1483 knlist_add(&si->si.si_note, kn, /*islocked=*/0);
1489 freebsd_netmap_poll(struct cdev *cdevi __unused, int events, struct thread *td)
1491 struct netmap_priv_d *priv;
1492 if (devfs_get_cdevpriv((void **)&priv)) {
1495 return netmap_poll(priv, events, td);
1499 freebsd_netmap_ioctl(struct cdev *dev __unused, u_long cmd, caddr_t data,
1500 int ffla __unused, struct thread *td)
1503 struct netmap_priv_d *priv;
1505 CURVNET_SET(TD_TO_VNET(td));
1506 error = devfs_get_cdevpriv((void **)&priv);
1508 /* XXX ENOENT should be impossible, since the priv
1509 * is now created in the open */
1510 if (error == ENOENT)
1514 error = netmap_ioctl(priv, cmd, data, td, /*nr_body_is_user=*/1);
1522 nm_os_onattach(struct ifnet *ifp)
1524 ifp->if_capabilities |= IFCAP_NETMAP;
1528 nm_os_onenter(struct ifnet *ifp)
1530 struct netmap_adapter *na = NA(ifp);
1532 na->if_transmit = ifp->if_transmit;
1533 ifp->if_transmit = netmap_transmit;
1534 ifp->if_capenable |= IFCAP_NETMAP;
1538 nm_os_onexit(struct ifnet *ifp)
1540 struct netmap_adapter *na = NA(ifp);
1542 ifp->if_transmit = na->if_transmit;
1543 ifp->if_capenable &= ~IFCAP_NETMAP;
1546 extern struct cdevsw netmap_cdevsw; /* XXX used in netmap.c, should go elsewhere */
1547 struct cdevsw netmap_cdevsw = {
1548 .d_version = D_VERSION,
1550 .d_open = netmap_open,
1551 .d_mmap_single = netmap_mmap_single,
1552 .d_ioctl = freebsd_netmap_ioctl,
1553 .d_poll = freebsd_netmap_poll,
1554 .d_kqfilter = netmap_kqfilter,
1555 .d_close = netmap_close,
1557 /*--- end of kqueue support ----*/
1560 * Kernel entry point.
1562 * Initialize/finalize the module and return.
1564 * Return 0 on success, errno on failure.
1567 netmap_loader(__unused struct module *module, int event, __unused void *arg)
1573 error = netmap_init();
1578 * if some one is still using netmap,
1579 * then the module can not be unloaded.
1581 if (netmap_use_count) {
1582 nm_prerr("netmap module can not be unloaded - netmap_use_count: %d",
1598 #ifdef DEV_MODULE_ORDERED
1600 * The netmap module contains three drivers: (i) the netmap character device
1601 * driver; (ii) the ptnetmap memdev PCI device driver, (iii) the ptnet PCI
1602 * device driver. The attach() routines of both (ii) and (iii) need the
1603 * lock of the global allocator, and such lock is initialized in netmap_init(),
1604 * which is part of (i).
1605 * Therefore, we make sure that (i) is loaded before (ii) and (iii), using
1606 * the 'order' parameter of driver declaration macros. For (i), we specify
1607 * SI_ORDER_MIDDLE, while higher orders are used with the DRIVER_MODULE_ORDERED
1608 * macros for (ii) and (iii).
1610 DEV_MODULE_ORDERED(netmap, netmap_loader, NULL, SI_ORDER_MIDDLE);
1611 #else /* !DEV_MODULE_ORDERED */
1612 DEV_MODULE(netmap, netmap_loader, NULL);
1613 #endif /* DEV_MODULE_ORDERED */
1614 MODULE_DEPEND(netmap, pci, 1, 1, 1);
1615 MODULE_VERSION(netmap, 1);
1616 /* reduce conditional code */
1617 // linux API, use for the knlist in FreeBSD
1618 /* use a private mutex for the knlist */