2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (C) 2013-2014 Universita` di Pisa. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include "opt_inet6.h"
32 #include <sys/param.h>
33 #include <sys/module.h>
34 #include <sys/errno.h>
35 #include <sys/eventhandler.h>
37 #include <sys/poll.h> /* POLLIN, POLLOUT */
38 #include <sys/kernel.h> /* types used in module initialization */
39 #include <sys/conf.h> /* DEV_MODULE_ORDERED */
40 #include <sys/endian.h>
41 #include <sys/syscallsubr.h> /* kern_ioctl() */
43 #include <sys/rwlock.h>
45 #include <vm/vm.h> /* vtophys */
46 #include <vm/pmap.h> /* vtophys */
47 #include <vm/vm_param.h>
48 #include <vm/vm_object.h>
49 #include <vm/vm_page.h>
50 #include <vm/vm_pager.h>
54 #include <sys/malloc.h>
55 #include <sys/socket.h> /* sockaddrs */
56 #include <sys/selinfo.h>
57 #include <sys/kthread.h> /* kthread_add() */
58 #include <sys/proc.h> /* PROC_LOCK() */
59 #include <sys/unistd.h> /* RFNOWAIT */
60 #include <sys/sched.h> /* sched_bind() */
61 #include <sys/smp.h> /* mp_maxid */
62 #include <sys/taskqueue.h> /* taskqueue_enqueue(), taskqueue_create(), ... */
64 #include <net/if_var.h>
65 #include <net/if_types.h> /* IFT_ETHER */
66 #include <net/ethernet.h> /* ether_ifdetach */
67 #include <net/if_dl.h> /* LLADDR */
68 #include <machine/bus.h> /* bus_dmamap_* */
69 #include <netinet/in.h> /* in6_cksum_pseudo() */
70 #include <machine/in_cksum.h> /* in_pseudo(), in_cksum_hdr() */
72 #include <net/netmap.h>
73 #include <dev/netmap/netmap_kern.h>
74 #include <net/netmap_virt.h>
75 #include <dev/netmap/netmap_mem2.h>
78 /* ======================== FREEBSD-SPECIFIC ROUTINES ================== */
81 nm_kqueue_notify(void *opaque, int pending)
83 struct nm_selinfo *si = opaque;
85 /* We use a non-zero hint to distinguish this notification call
86 * from the call done in kqueue_scan(), which uses hint=0.
88 KNOTE_UNLOCKED(&si->si.si_note, /*hint=*/0x100);
91 int nm_os_selinfo_init(NM_SELINFO_T *si, const char *name) {
94 TASK_INIT(&si->ntfytask, 0, nm_kqueue_notify, si);
95 si->ntfytq = taskqueue_create(name, M_NOWAIT,
96 taskqueue_thread_enqueue, &si->ntfytq);
97 if (si->ntfytq == NULL)
99 err = taskqueue_start_threads(&si->ntfytq, 1, PI_NET, "tq %s", name);
101 taskqueue_free(si->ntfytq);
106 snprintf(si->mtxname, sizeof(si->mtxname), "nmkl%s", name);
107 mtx_init(&si->m, si->mtxname, NULL, MTX_DEF);
108 knlist_init_mtx(&si->si.si_note, &si->m);
109 si->kqueue_users = 0;
115 nm_os_selinfo_uninit(NM_SELINFO_T *si)
117 if (si->ntfytq == NULL) {
118 return; /* si was not initialized */
120 taskqueue_drain(si->ntfytq, &si->ntfytask);
121 taskqueue_free(si->ntfytq);
123 knlist_delete(&si->si.si_note, curthread, /*islocked=*/0);
124 knlist_destroy(&si->si.si_note);
125 /* now we don't need the mutex anymore */
130 nm_os_malloc(size_t size)
132 return malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
136 nm_os_realloc(void *addr, size_t new_size, size_t old_size __unused)
138 return realloc(addr, new_size, M_DEVBUF, M_NOWAIT | M_ZERO);
142 nm_os_free(void *addr)
144 free(addr, M_DEVBUF);
148 nm_os_ifnet_lock(void)
154 nm_os_ifnet_unlock(void)
159 static int netmap_use_count = 0;
162 nm_os_get_module(void)
168 nm_os_put_module(void)
174 netmap_ifnet_arrival_handler(void *arg __unused, struct ifnet *ifp)
176 netmap_undo_zombie(ifp);
180 netmap_ifnet_departure_handler(void *arg __unused, struct ifnet *ifp)
182 netmap_make_zombie(ifp);
185 static eventhandler_tag nm_ifnet_ah_tag;
186 static eventhandler_tag nm_ifnet_dh_tag;
189 nm_os_ifnet_init(void)
192 EVENTHANDLER_REGISTER(ifnet_arrival_event,
193 netmap_ifnet_arrival_handler,
194 NULL, EVENTHANDLER_PRI_ANY);
196 EVENTHANDLER_REGISTER(ifnet_departure_event,
197 netmap_ifnet_departure_handler,
198 NULL, EVENTHANDLER_PRI_ANY);
203 nm_os_ifnet_fini(void)
205 EVENTHANDLER_DEREGISTER(ifnet_arrival_event,
207 EVENTHANDLER_DEREGISTER(ifnet_departure_event,
212 nm_os_ifnet_mtu(struct ifnet *ifp)
214 #if __FreeBSD_version < 1100030
215 return ifp->if_data.ifi_mtu;
216 #else /* __FreeBSD_version >= 1100030 */
222 nm_os_csum_raw(uint8_t *data, size_t len, rawsum_t cur_sum)
224 /* TODO XXX please use the FreeBSD implementation for this. */
225 uint16_t *words = (uint16_t *)data;
229 for (i = 0; i < nw; i++)
230 cur_sum += be16toh(words[i]);
233 cur_sum += (data[len-1] << 8);
238 /* Fold a raw checksum: 'cur_sum' is in host byte order, while the
239 * return value is in network byte order.
242 nm_os_csum_fold(rawsum_t cur_sum)
244 /* TODO XXX please use the FreeBSD implementation for this. */
245 while (cur_sum >> 16)
246 cur_sum = (cur_sum & 0xFFFF) + (cur_sum >> 16);
248 return htobe16((~cur_sum) & 0xFFFF);
251 uint16_t nm_os_csum_ipv4(struct nm_iphdr *iph)
254 return in_cksum_hdr((void *)iph);
256 return nm_os_csum_fold(nm_os_csum_raw((uint8_t*)iph, sizeof(struct nm_iphdr), 0));
261 nm_os_csum_tcpudp_ipv4(struct nm_iphdr *iph, void *data,
262 size_t datalen, uint16_t *check)
265 uint16_t pseudolen = datalen + iph->protocol;
267 /* Compute and insert the pseudo-header checksum. */
268 *check = in_pseudo(iph->saddr, iph->daddr,
270 /* Compute the checksum on TCP/UDP header + payload
271 * (includes the pseudo-header).
273 *check = nm_os_csum_fold(nm_os_csum_raw(data, datalen, 0));
275 static int notsupported = 0;
278 nm_prerr("inet4 segmentation not supported");
284 nm_os_csum_tcpudp_ipv6(struct nm_ipv6hdr *ip6h, void *data,
285 size_t datalen, uint16_t *check)
288 *check = in6_cksum_pseudo((void*)ip6h, datalen, ip6h->nexthdr, 0);
289 *check = nm_os_csum_fold(nm_os_csum_raw(data, datalen, 0));
291 static int notsupported = 0;
294 nm_prerr("inet6 segmentation not supported");
299 /* on FreeBSD we send up one packet at a time */
301 nm_os_send_up(struct ifnet *ifp, struct mbuf *m, struct mbuf *prev)
303 NA(ifp)->if_input(ifp, m);
308 nm_os_mbuf_has_csum_offld(struct mbuf *m)
310 return m->m_pkthdr.csum_flags & (CSUM_TCP | CSUM_UDP | CSUM_SCTP |
311 CSUM_TCP_IPV6 | CSUM_UDP_IPV6 |
316 nm_os_mbuf_has_seg_offld(struct mbuf *m)
318 return m->m_pkthdr.csum_flags & CSUM_TSO;
322 freebsd_generic_rx_handler(struct ifnet *ifp, struct mbuf *m)
326 if (unlikely(!NM_NA_VALID(ifp))) {
327 nm_prlim(1, "Warning: RX packet intercepted, but no"
328 " emulated adapter");
332 stolen = generic_rx_handler(ifp, m);
334 struct netmap_generic_adapter *gna =
335 (struct netmap_generic_adapter *)NA(ifp);
336 gna->save_if_input(ifp, m);
341 * Intercept the rx routine in the standard device driver.
342 * Second argument is non-zero to intercept, 0 to restore
345 nm_os_catch_rx(struct netmap_generic_adapter *gna, int intercept)
347 struct netmap_adapter *na = &gna->up.up;
348 struct ifnet *ifp = na->ifp;
353 if (gna->save_if_input) {
354 nm_prerr("RX on %s already intercepted", na->name);
355 ret = EBUSY; /* already set */
358 gna->save_if_input = ifp->if_input;
359 ifp->if_input = freebsd_generic_rx_handler;
361 if (!gna->save_if_input) {
362 nm_prerr("Failed to undo RX intercept on %s",
364 ret = EINVAL; /* not saved */
367 ifp->if_input = gna->save_if_input;
368 gna->save_if_input = NULL;
371 nm_os_ifnet_unlock();
378 * Intercept the packet steering routine in the tx path,
379 * so that we can decide which queue is used for an mbuf.
380 * Second argument is non-zero to intercept, 0 to restore.
381 * On freebsd we just intercept if_transmit.
384 nm_os_catch_tx(struct netmap_generic_adapter *gna, int intercept)
386 struct netmap_adapter *na = &gna->up.up;
387 struct ifnet *ifp = netmap_generic_getifp(gna);
391 na->if_transmit = ifp->if_transmit;
392 ifp->if_transmit = netmap_transmit;
394 ifp->if_transmit = na->if_transmit;
396 nm_os_ifnet_unlock();
403 * Transmit routine used by generic_netmap_txsync(). Returns 0 on success
404 * and non-zero on error (which may be packet drops or other errors).
405 * addr and len identify the netmap buffer, m is the (preallocated)
406 * mbuf to use for transmissions.
408 * We should add a reference to the mbuf so the m_freem() at the end
409 * of the transmission does not consume resources.
411 * On FreeBSD, and on multiqueue cards, we can force the queue using
412 * if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
413 * i = m->m_pkthdr.flowid % adapter->num_queues;
415 * i = curcpu % adapter->num_queues;
419 nm_os_generic_xmit_frame(struct nm_os_gen_arg *a)
423 struct ifnet *ifp = a->ifp;
424 struct mbuf *m = a->m;
426 #if __FreeBSD_version < 1100000
428 * Old FreeBSD versions. The mbuf has a cluster attached,
429 * we need to copy from the cluster to the netmap buffer.
431 if (MBUF_REFCNT(m) != 1) {
432 nm_prerr("invalid refcnt %d for %p", MBUF_REFCNT(m), m);
433 panic("in generic_xmit_frame");
435 if (m->m_ext.ext_size < len) {
436 nm_prlim(2, "size %d < len %d", m->m_ext.ext_size, len);
437 len = m->m_ext.ext_size;
439 bcopy(a->addr, m->m_data, len);
440 #else /* __FreeBSD_version >= 1100000 */
441 /* New FreeBSD versions. Link the external storage to
442 * the netmap buffer, so that no copy is necessary. */
443 m->m_ext.ext_buf = m->m_data = a->addr;
444 m->m_ext.ext_size = len;
445 #endif /* __FreeBSD_version >= 1100000 */
447 m->m_flags |= M_PKTHDR;
448 m->m_len = m->m_pkthdr.len = len;
450 /* mbuf refcnt is not contended, no need to use atomic
451 * (a memory barrier is enough). */
452 SET_MBUF_REFCNT(m, 2);
453 M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
454 m->m_pkthdr.flowid = a->ring_nr;
455 m->m_pkthdr.rcvif = ifp; /* used for tx notification */
456 CURVNET_SET(ifp->if_vnet);
457 ret = NA(ifp)->if_transmit(ifp, m);
463 #if __FreeBSD_version >= 1100005
464 struct netmap_adapter *
465 netmap_getna(if_t ifp)
467 return (NA((struct ifnet *)ifp));
469 #endif /* __FreeBSD_version >= 1100005 */
472 * The following two functions are empty until we have a generic
473 * way to extract the info from the ifp
476 nm_os_generic_find_num_desc(struct ifnet *ifp, unsigned int *tx, unsigned int *rx)
483 nm_os_generic_find_num_queues(struct ifnet *ifp, u_int *txq, u_int *rxq)
485 unsigned num_rings = netmap_generic_rings ? netmap_generic_rings : 1;
492 nm_os_generic_set_features(struct netmap_generic_adapter *gna)
495 gna->rxsg = 1; /* Supported through m_copydata. */
496 gna->txqdisc = 0; /* Not supported. */
500 nm_os_mitigation_init(struct nm_generic_mit *mit, int idx, struct netmap_adapter *na)
502 mit->mit_pending = 0;
503 mit->mit_ring_idx = idx;
509 nm_os_mitigation_start(struct nm_generic_mit *mit)
515 nm_os_mitigation_restart(struct nm_generic_mit *mit)
521 nm_os_mitigation_active(struct nm_generic_mit *mit)
529 nm_os_mitigation_cleanup(struct nm_generic_mit *mit)
534 nm_vi_dummy(struct ifnet *ifp, u_long cmd, caddr_t addr)
541 nm_vi_start(struct ifnet *ifp)
543 panic("nm_vi_start() must not be called");
547 * Index manager of persistent virtual interfaces.
548 * It is used to decide the lowest byte of the MAC address.
549 * We use the same algorithm with management of bridge port index.
551 #define NM_VI_MAX 255
553 uint8_t index[NM_VI_MAX]; /* XXX just for a reasonable number */
559 nm_os_vi_init_index(void)
562 for (i = 0; i < NM_VI_MAX; i++)
563 nm_vi_indices.index[i] = i;
564 nm_vi_indices.active = 0;
565 mtx_init(&nm_vi_indices.lock, "nm_vi_indices_lock", NULL, MTX_DEF);
568 /* return -1 if no index available */
570 nm_vi_get_index(void)
574 mtx_lock(&nm_vi_indices.lock);
575 ret = nm_vi_indices.active == NM_VI_MAX ? -1 :
576 nm_vi_indices.index[nm_vi_indices.active++];
577 mtx_unlock(&nm_vi_indices.lock);
582 nm_vi_free_index(uint8_t val)
586 mtx_lock(&nm_vi_indices.lock);
587 lim = nm_vi_indices.active;
588 for (i = 0; i < lim; i++) {
589 if (nm_vi_indices.index[i] == val) {
590 /* swap index[lim-1] and j */
591 int tmp = nm_vi_indices.index[lim-1];
592 nm_vi_indices.index[lim-1] = val;
593 nm_vi_indices.index[i] = tmp;
594 nm_vi_indices.active--;
598 if (lim == nm_vi_indices.active)
599 nm_prerr("Index %u not found", val);
600 mtx_unlock(&nm_vi_indices.lock);
605 * Implementation of a netmap-capable virtual interface that
606 * registered to the system.
607 * It is based on if_tap.c and ip_fw_log.c in FreeBSD 9.
609 * Note: Linux sets refcount to 0 on allocation of net_device,
610 * then increments it on registration to the system.
611 * FreeBSD sets refcount to 1 on if_alloc(), and does not
612 * increment this refcount on if_attach().
615 nm_os_vi_persist(const char *name, struct ifnet **ret)
619 uint32_t macaddr_mid;
621 int unit = nm_vi_get_index(); /* just to decide MAC address */
626 * We use the same MAC address generation method with tap
627 * except for the highest octet is 00:be instead of 00:bd
629 macaddr_hi = htons(0x00be); /* XXX tap + 1 */
630 macaddr_mid = (uint32_t) ticks;
631 bcopy(&macaddr_hi, eaddr, sizeof(short));
632 bcopy(&macaddr_mid, &eaddr[2], sizeof(uint32_t));
633 eaddr[5] = (uint8_t)unit;
635 ifp = if_alloc(IFT_ETHER);
637 nm_prerr("if_alloc failed");
640 if_initname(ifp, name, IF_DUNIT_NONE);
642 ifp->if_flags = IFF_UP | IFF_SIMPLEX | IFF_MULTICAST;
643 ifp->if_init = (void *)nm_vi_dummy;
644 ifp->if_ioctl = nm_vi_dummy;
645 ifp->if_start = nm_vi_start;
646 ifp->if_mtu = ETHERMTU;
647 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
648 ifp->if_capabilities |= IFCAP_LINKSTATE;
649 ifp->if_capenable |= IFCAP_LINKSTATE;
651 ether_ifattach(ifp, eaddr);
656 /* unregister from the system and drop the final refcount */
658 nm_os_vi_detach(struct ifnet *ifp)
660 nm_vi_free_index(((char *)IF_LLADDR(ifp))[5]);
666 #include <vm/vm_map.h>
667 #include <vm/vm_extern.h>
668 #include <vm/vm_kern.h>
669 struct nm_os_extmem {
677 nm_os_extmem_delete(struct nm_os_extmem *e)
679 nm_prinf("freeing %zx bytes", (size_t)e->size);
680 vm_map_remove(kernel_map, e->kva, e->kva + e->size);
685 nm_os_extmem_nextpage(struct nm_os_extmem *e)
688 if (e->scan < e->kva + e->size) {
689 rv = (char *)e->scan;
690 e->scan += PAGE_SIZE;
696 nm_os_extmem_isequal(struct nm_os_extmem *e1, struct nm_os_extmem *e2)
698 return (e1->obj == e2->obj);
702 nm_os_extmem_nr_pages(struct nm_os_extmem *e)
704 return e->size >> PAGE_SHIFT;
707 struct nm_os_extmem *
708 nm_os_extmem_create(unsigned long p, struct nmreq_pools_info *pi, int *perror)
711 vm_map_entry_t entry;
716 struct nm_os_extmem *e = NULL;
719 e = nm_os_malloc(sizeof(*e));
725 map = &curthread->td_proc->p_vmspace->vm_map;
726 rv = vm_map_lookup(&map, p, VM_PROT_RW, &entry,
727 &obj, &index, &prot, &wired);
728 if (rv != KERN_SUCCESS) {
729 nm_prerr("address %lx not found", p);
730 error = vm_mmap_to_errno(rv);
733 vm_object_reference(obj);
735 /* check that we are given the whole vm_object ? */
736 vm_map_lookup_done(map, entry);
739 /* Wire the memory and add the vm_object to the kernel map,
740 * to make sure that it is not freed even if all the processes
741 * that are mmap()ing should munmap() it.
743 e->kva = vm_map_min(kernel_map);
744 e->size = obj->size << PAGE_SHIFT;
745 rv = vm_map_find(kernel_map, obj, 0, &e->kva, e->size, 0,
746 VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
747 VM_PROT_READ | VM_PROT_WRITE, 0);
748 if (rv != KERN_SUCCESS) {
749 nm_prerr("vm_map_find(%zx) failed", (size_t)e->size);
750 error = vm_mmap_to_errno(rv);
753 rv = vm_map_wire(kernel_map, e->kva, e->kva + e->size,
754 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
755 if (rv != KERN_SUCCESS) {
756 nm_prerr("vm_map_wire failed");
757 error = vm_mmap_to_errno(rv);
766 vm_map_remove(kernel_map, e->kva, e->kva + e->size);
768 vm_object_deallocate(e->obj);
777 #endif /* WITH_EXTMEM */
779 /* ================== PTNETMAP GUEST SUPPORT ==================== */
783 #include <sys/rman.h>
784 #include <machine/bus.h> /* bus_dmamap_* */
785 #include <machine/resource.h>
786 #include <dev/pci/pcivar.h>
787 #include <dev/pci/pcireg.h>
789 * ptnetmap memory device (memdev) for freebsd guest,
790 * ssed to expose host netmap memory to the guest through a PCI BAR.
794 * ptnetmap memdev private data structure
796 struct ptnetmap_memdev {
798 struct resource *pci_io;
799 struct resource *pci_mem;
800 struct netmap_mem_d *nm_mem;
803 static int ptn_memdev_probe(device_t);
804 static int ptn_memdev_attach(device_t);
805 static int ptn_memdev_detach(device_t);
806 static int ptn_memdev_shutdown(device_t);
808 static device_method_t ptn_memdev_methods[] = {
809 DEVMETHOD(device_probe, ptn_memdev_probe),
810 DEVMETHOD(device_attach, ptn_memdev_attach),
811 DEVMETHOD(device_detach, ptn_memdev_detach),
812 DEVMETHOD(device_shutdown, ptn_memdev_shutdown),
816 static driver_t ptn_memdev_driver = {
817 PTNETMAP_MEMDEV_NAME,
819 sizeof(struct ptnetmap_memdev),
822 /* We use (SI_ORDER_MIDDLE+1) here, see DEV_MODULE_ORDERED() invocation
824 static devclass_t ptnetmap_devclass;
825 DRIVER_MODULE_ORDERED(ptn_memdev, pci, ptn_memdev_driver, ptnetmap_devclass,
826 NULL, NULL, SI_ORDER_MIDDLE + 1);
829 * Map host netmap memory through PCI-BAR in the guest OS,
830 * returning physical (nm_paddr) and virtual (nm_addr) addresses
831 * of the netmap memory mapped in the guest.
834 nm_os_pt_memdev_iomap(struct ptnetmap_memdev *ptn_dev, vm_paddr_t *nm_paddr,
835 void **nm_addr, uint64_t *mem_size)
839 nm_prinf("ptn_memdev_driver iomap");
841 rid = PCIR_BAR(PTNETMAP_MEM_PCI_BAR);
842 *mem_size = bus_read_4(ptn_dev->pci_io, PTNET_MDEV_IO_MEMSIZE_HI);
843 *mem_size = bus_read_4(ptn_dev->pci_io, PTNET_MDEV_IO_MEMSIZE_LO) |
846 /* map memory allocator */
847 ptn_dev->pci_mem = bus_alloc_resource(ptn_dev->dev, SYS_RES_MEMORY,
848 &rid, 0, ~0, *mem_size, RF_ACTIVE);
849 if (ptn_dev->pci_mem == NULL) {
855 *nm_paddr = rman_get_start(ptn_dev->pci_mem);
856 *nm_addr = rman_get_virtual(ptn_dev->pci_mem);
858 nm_prinf("=== BAR %d start %lx len %lx mem_size %lx ===",
859 PTNETMAP_MEM_PCI_BAR,
860 (unsigned long)(*nm_paddr),
861 (unsigned long)rman_get_size(ptn_dev->pci_mem),
862 (unsigned long)*mem_size);
867 nm_os_pt_memdev_ioread(struct ptnetmap_memdev *ptn_dev, unsigned int reg)
869 return bus_read_4(ptn_dev->pci_io, reg);
872 /* Unmap host netmap memory. */
874 nm_os_pt_memdev_iounmap(struct ptnetmap_memdev *ptn_dev)
876 nm_prinf("ptn_memdev_driver iounmap");
878 if (ptn_dev->pci_mem) {
879 bus_release_resource(ptn_dev->dev, SYS_RES_MEMORY,
880 PCIR_BAR(PTNETMAP_MEM_PCI_BAR), ptn_dev->pci_mem);
881 ptn_dev->pci_mem = NULL;
885 /* Device identification routine, return BUS_PROBE_DEFAULT on success,
886 * positive on failure */
888 ptn_memdev_probe(device_t dev)
892 if (pci_get_vendor(dev) != PTNETMAP_PCI_VENDOR_ID)
894 if (pci_get_device(dev) != PTNETMAP_PCI_DEVICE_ID)
897 snprintf(desc, sizeof(desc), "%s PCI adapter",
898 PTNETMAP_MEMDEV_NAME);
899 device_set_desc_copy(dev, desc);
901 return (BUS_PROBE_DEFAULT);
904 /* Device initialization routine. */
906 ptn_memdev_attach(device_t dev)
908 struct ptnetmap_memdev *ptn_dev;
912 ptn_dev = device_get_softc(dev);
915 pci_enable_busmaster(dev);
917 rid = PCIR_BAR(PTNETMAP_IO_PCI_BAR);
918 ptn_dev->pci_io = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid,
920 if (ptn_dev->pci_io == NULL) {
921 device_printf(dev, "cannot map I/O space\n");
925 mem_id = bus_read_4(ptn_dev->pci_io, PTNET_MDEV_IO_MEMID);
927 /* create guest allocator */
928 ptn_dev->nm_mem = netmap_mem_pt_guest_attach(ptn_dev, mem_id);
929 if (ptn_dev->nm_mem == NULL) {
930 ptn_memdev_detach(dev);
933 netmap_mem_get(ptn_dev->nm_mem);
935 nm_prinf("ptnetmap memdev attached, host memid: %u", mem_id);
940 /* Device removal routine. */
942 ptn_memdev_detach(device_t dev)
944 struct ptnetmap_memdev *ptn_dev;
946 ptn_dev = device_get_softc(dev);
948 if (ptn_dev->nm_mem) {
949 nm_prinf("ptnetmap memdev detached, host memid %u",
950 netmap_mem_get_id(ptn_dev->nm_mem));
951 netmap_mem_put(ptn_dev->nm_mem);
952 ptn_dev->nm_mem = NULL;
954 if (ptn_dev->pci_mem) {
955 bus_release_resource(dev, SYS_RES_MEMORY,
956 PCIR_BAR(PTNETMAP_MEM_PCI_BAR), ptn_dev->pci_mem);
957 ptn_dev->pci_mem = NULL;
959 if (ptn_dev->pci_io) {
960 bus_release_resource(dev, SYS_RES_IOPORT,
961 PCIR_BAR(PTNETMAP_IO_PCI_BAR), ptn_dev->pci_io);
962 ptn_dev->pci_io = NULL;
969 ptn_memdev_shutdown(device_t dev)
971 return bus_generic_shutdown(dev);
974 #endif /* WITH_PTNETMAP */
977 * In order to track whether pages are still mapped, we hook into
978 * the standard cdev_pager and intercept the constructor and
982 struct netmap_vm_handle_t {
984 struct netmap_priv_d *priv;
989 netmap_dev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
990 vm_ooffset_t foff, struct ucred *cred, u_short *color)
992 struct netmap_vm_handle_t *vmh = handle;
995 nm_prinf("handle %p size %jd prot %d foff %jd",
996 handle, (intmax_t)size, prot, (intmax_t)foff);
1005 netmap_dev_pager_dtor(void *handle)
1007 struct netmap_vm_handle_t *vmh = handle;
1008 struct cdev *dev = vmh->dev;
1009 struct netmap_priv_d *priv = vmh->priv;
1012 nm_prinf("handle %p", handle);
1014 free(vmh, M_DEVBUF);
1020 netmap_dev_pager_fault(vm_object_t object, vm_ooffset_t offset,
1021 int prot, vm_page_t *mres)
1023 struct netmap_vm_handle_t *vmh = object->handle;
1024 struct netmap_priv_d *priv = vmh->priv;
1025 struct netmap_adapter *na = priv->np_na;
1028 vm_memattr_t memattr;
1030 nm_prdis("object %p offset %jd prot %d mres %p",
1031 object, (intmax_t)offset, prot, mres);
1032 memattr = object->memattr;
1033 paddr = netmap_mem_ofstophys(na->nm_mem, offset);
1035 return VM_PAGER_FAIL;
1037 if (((*mres)->flags & PG_FICTITIOUS) != 0) {
1039 * If the passed in result page is a fake page, update it with
1040 * the new physical address.
1043 vm_page_updatefake(page, paddr, memattr);
1046 * Replace the passed in reqpage page with our own fake page and
1047 * free up the all of the original pages.
1049 #ifndef VM_OBJECT_WUNLOCK /* FreeBSD < 10.x */
1050 #define VM_OBJECT_WUNLOCK VM_OBJECT_UNLOCK
1051 #define VM_OBJECT_WLOCK VM_OBJECT_LOCK
1052 #endif /* VM_OBJECT_WUNLOCK */
1054 VM_OBJECT_WUNLOCK(object);
1055 page = vm_page_getfake(paddr, memattr);
1056 VM_OBJECT_WLOCK(object);
1057 vm_page_replace(page, object, (*mres)->pindex, *mres);
1060 page->valid = VM_PAGE_BITS_ALL;
1061 return (VM_PAGER_OK);
1065 static struct cdev_pager_ops netmap_cdev_pager_ops = {
1066 .cdev_pg_ctor = netmap_dev_pager_ctor,
1067 .cdev_pg_dtor = netmap_dev_pager_dtor,
1068 .cdev_pg_fault = netmap_dev_pager_fault,
1073 netmap_mmap_single(struct cdev *cdev, vm_ooffset_t *foff,
1074 vm_size_t objsize, vm_object_t *objp, int prot)
1077 struct netmap_vm_handle_t *vmh;
1078 struct netmap_priv_d *priv;
1082 nm_prinf("cdev %p foff %jd size %jd objp %p prot %d", cdev,
1083 (intmax_t )*foff, (intmax_t )objsize, objp, prot);
1085 vmh = malloc(sizeof(struct netmap_vm_handle_t), M_DEVBUF,
1092 error = devfs_get_cdevpriv((void**)&priv);
1095 if (priv->np_nifp == NULL) {
1103 obj = cdev_pager_allocate(vmh, OBJT_DEVICE,
1104 &netmap_cdev_pager_ops, objsize, prot,
1107 nm_prerr("cdev_pager_allocate failed");
1121 free(vmh, M_DEVBUF);
1126 * On FreeBSD the close routine is only called on the last close on
1127 * the device (/dev/netmap) so we cannot do anything useful.
1128 * To track close() on individual file descriptors we pass netmap_dtor() to
1129 * devfs_set_cdevpriv() on open(). The FreeBSD kernel will call the destructor
1130 * when the last fd pointing to the device is closed.
1132 * Note that FreeBSD does not even munmap() on close() so we also have
1133 * to track mmap() ourselves, and postpone the call to
1134 * netmap_dtor() is called when the process has no open fds and no active
1135 * memory maps on /dev/netmap, as in linux.
1138 netmap_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
1141 nm_prinf("dev %p fflag 0x%x devtype %d td %p",
1142 dev, fflag, devtype, td);
1148 netmap_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
1150 struct netmap_priv_d *priv;
1159 priv = netmap_priv_new();
1164 error = devfs_set_cdevpriv(priv, netmap_dtor);
1166 netmap_priv_delete(priv);
1173 /******************** kthread wrapper ****************/
1174 #include <sys/sysproto.h>
1178 return mp_maxid + 1;
1181 struct nm_kctx_ctx {
1182 /* Userspace thread (kthread creator). */
1183 struct thread *user_td;
1185 /* worker function and parameter */
1186 nm_kctx_worker_fn_t worker_fn;
1187 void *worker_private;
1189 struct nm_kctx *nmk;
1191 /* integer to manage multiple worker contexts (e.g., RX or TX on ptnetmap) */
1196 struct thread *worker;
1197 struct mtx worker_lock;
1198 struct nm_kctx_ctx worker_ctx;
1199 int run; /* used to stop kthread */
1200 int attach_user; /* kthread attached to user_process */
1205 nm_kctx_worker(void *data)
1207 struct nm_kctx *nmk = data;
1208 struct nm_kctx_ctx *ctx = &nmk->worker_ctx;
1210 if (nmk->affinity >= 0) {
1211 thread_lock(curthread);
1212 sched_bind(curthread, nmk->affinity);
1213 thread_unlock(curthread);
1218 * check if the parent process dies
1219 * (when kthread is attached to user process)
1223 thread_suspend_check(0);
1224 PROC_UNLOCK(curproc);
1226 kthread_suspend_check();
1229 /* Continuously execute worker process. */
1230 ctx->worker_fn(ctx->worker_private); /* worker body */
1237 nm_os_kctx_worker_setaff(struct nm_kctx *nmk, int affinity)
1239 nmk->affinity = affinity;
1243 nm_os_kctx_create(struct nm_kctx_cfg *cfg, void *opaque)
1245 struct nm_kctx *nmk = NULL;
1247 nmk = malloc(sizeof(*nmk), M_DEVBUF, M_NOWAIT | M_ZERO);
1251 mtx_init(&nmk->worker_lock, "nm_kthread lock", NULL, MTX_DEF);
1252 nmk->worker_ctx.worker_fn = cfg->worker_fn;
1253 nmk->worker_ctx.worker_private = cfg->worker_private;
1254 nmk->worker_ctx.type = cfg->type;
1257 /* attach kthread to user process (ptnetmap) */
1258 nmk->attach_user = cfg->attach_user;
1264 nm_os_kctx_worker_start(struct nm_kctx *nmk)
1266 struct proc *p = NULL;
1269 /* Temporarily disable this function as it is currently broken
1270 * and causes kernel crashes. The failure can be triggered by
1271 * the "vale_polling_enable_disable" test in ctrl-api-test.c. */
1277 /* check if we want to attach kthread to user process */
1278 if (nmk->attach_user) {
1279 nmk->worker_ctx.user_td = curthread;
1280 p = curthread->td_proc;
1283 /* enable kthread main loop */
1285 /* create kthread */
1286 if((error = kthread_add(nm_kctx_worker, nmk, p,
1287 &nmk->worker, RFNOWAIT /* to be checked */, 0, "nm-kthread-%ld",
1288 nmk->worker_ctx.type))) {
1292 nm_prinf("nm_kthread started td %p", nmk->worker);
1296 nm_prerr("nm_kthread start failed err %d", error);
1302 nm_os_kctx_worker_stop(struct nm_kctx *nmk)
1307 /* tell to kthread to exit from main loop */
1310 /* wake up kthread if it sleeps */
1311 kthread_resume(nmk->worker);
1317 nm_os_kctx_destroy(struct nm_kctx *nmk)
1323 nm_os_kctx_worker_stop(nmk);
1325 free(nmk, M_DEVBUF);
1328 /******************** kqueue support ****************/
1331 * In addition to calling selwakeuppri(), nm_os_selwakeup() also
1332 * needs to call knote() to wake up kqueue listeners.
1333 * This operation is deferred to a taskqueue in order to avoid possible
1334 * lock order reversals; these may happen because knote() grabs a
1335 * private lock associated to the 'si' (see struct selinfo,
1336 * struct nm_selinfo, and nm_os_selinfo_init), and nm_os_selwakeup()
1337 * can be called while holding the lock associated to a different
1339 * When calling knote() we use a non-zero 'hint' argument to inform
1340 * the netmap_knrw() function that it is being called from
1341 * 'nm_os_selwakeup'; this is necessary because when netmap_knrw() is
1342 * called by the kevent subsystem (i.e. kevent_scan()) we also need to
1343 * call netmap_poll().
1345 * The netmap_kqfilter() function registers one or another f_event
1346 * depending on read or write mode. A pointer to the struct
1347 * 'netmap_priv_d' is stored into kn->kn_hook, so that it can later
1348 * be passed to netmap_poll(). We pass NULL as a third argument to
1349 * netmap_poll(), so that the latter only runs the txsync/rxsync
1350 * (if necessary), and skips the nm_os_selrecord() calls.
1355 nm_os_selwakeup(struct nm_selinfo *si)
1357 selwakeuppri(&si->si, PI_NET);
1358 if (si->kqueue_users > 0) {
1359 taskqueue_enqueue(si->ntfytq, &si->ntfytask);
1364 nm_os_selrecord(struct thread *td, struct nm_selinfo *si)
1366 selrecord(td, &si->si);
1370 netmap_knrdetach(struct knote *kn)
1372 struct netmap_priv_d *priv = (struct netmap_priv_d *)kn->kn_hook;
1373 struct nm_selinfo *si = priv->np_si[NR_RX];
1375 knlist_remove(&si->si.si_note, kn, /*islocked=*/0);
1377 KASSERT(si->kqueue_users > 0, ("kqueue_user underflow on %s",
1380 nm_prinf("kqueue users for %s: %d", si->mtxname, si->kqueue_users);
1385 netmap_knwdetach(struct knote *kn)
1387 struct netmap_priv_d *priv = (struct netmap_priv_d *)kn->kn_hook;
1388 struct nm_selinfo *si = priv->np_si[NR_TX];
1390 knlist_remove(&si->si.si_note, kn, /*islocked=*/0);
1393 nm_prinf("kqueue users for %s: %d", si->mtxname, si->kqueue_users);
1398 * Callback triggered by netmap notifications (see netmap_notify()),
1399 * and by the application calling kevent(). In the former case we
1400 * just return 1 (events ready), since we are not able to do better.
1401 * In the latter case we use netmap_poll() to see which events are
1405 netmap_knrw(struct knote *kn, long hint, int events)
1407 struct netmap_priv_d *priv;
1411 /* Called from netmap_notify(), typically from a
1412 * thread different from the one issuing kevent().
1413 * Assume we are ready. */
1417 /* Called from kevent(). */
1419 revents = netmap_poll(priv, events, /*thread=*/NULL);
1421 return (events & revents) ? 1 : 0;
1425 netmap_knread(struct knote *kn, long hint)
1427 return netmap_knrw(kn, hint, POLLIN);
1431 netmap_knwrite(struct knote *kn, long hint)
1433 return netmap_knrw(kn, hint, POLLOUT);
1436 static struct filterops netmap_rfiltops = {
1438 .f_detach = netmap_knrdetach,
1439 .f_event = netmap_knread,
1442 static struct filterops netmap_wfiltops = {
1444 .f_detach = netmap_knwdetach,
1445 .f_event = netmap_knwrite,
1450 * This is called when a thread invokes kevent() to record
1451 * a change in the configuration of the kqueue().
1452 * The 'priv' is the one associated to the open netmap device.
1455 netmap_kqfilter(struct cdev *dev, struct knote *kn)
1457 struct netmap_priv_d *priv;
1459 struct netmap_adapter *na;
1460 struct nm_selinfo *si;
1461 int ev = kn->kn_filter;
1463 if (ev != EVFILT_READ && ev != EVFILT_WRITE) {
1464 nm_prerr("bad filter request %d", ev);
1467 error = devfs_get_cdevpriv((void**)&priv);
1469 nm_prerr("device not yet setup");
1474 nm_prerr("no netmap adapter for this file descriptor");
1477 /* the si is indicated in the priv */
1478 si = priv->np_si[(ev == EVFILT_WRITE) ? NR_TX : NR_RX];
1479 kn->kn_fop = (ev == EVFILT_WRITE) ?
1480 &netmap_wfiltops : &netmap_rfiltops;
1484 nm_prinf("kqueue users for %s: %d", si->mtxname, si->kqueue_users);
1486 knlist_add(&si->si.si_note, kn, /*islocked=*/0);
1492 freebsd_netmap_poll(struct cdev *cdevi __unused, int events, struct thread *td)
1494 struct netmap_priv_d *priv;
1495 if (devfs_get_cdevpriv((void **)&priv)) {
1498 return netmap_poll(priv, events, td);
1502 freebsd_netmap_ioctl(struct cdev *dev __unused, u_long cmd, caddr_t data,
1503 int ffla __unused, struct thread *td)
1506 struct netmap_priv_d *priv;
1508 CURVNET_SET(TD_TO_VNET(td));
1509 error = devfs_get_cdevpriv((void **)&priv);
1511 /* XXX ENOENT should be impossible, since the priv
1512 * is now created in the open */
1513 if (error == ENOENT)
1517 error = netmap_ioctl(priv, cmd, data, td, /*nr_body_is_user=*/1);
1525 nm_os_onattach(struct ifnet *ifp)
1527 ifp->if_capabilities |= IFCAP_NETMAP;
1531 nm_os_onenter(struct ifnet *ifp)
1533 struct netmap_adapter *na = NA(ifp);
1535 na->if_transmit = ifp->if_transmit;
1536 ifp->if_transmit = netmap_transmit;
1537 ifp->if_capenable |= IFCAP_NETMAP;
1541 nm_os_onexit(struct ifnet *ifp)
1543 struct netmap_adapter *na = NA(ifp);
1545 ifp->if_transmit = na->if_transmit;
1546 ifp->if_capenable &= ~IFCAP_NETMAP;
1549 extern struct cdevsw netmap_cdevsw; /* XXX used in netmap.c, should go elsewhere */
1550 struct cdevsw netmap_cdevsw = {
1551 .d_version = D_VERSION,
1553 .d_open = netmap_open,
1554 .d_mmap_single = netmap_mmap_single,
1555 .d_ioctl = freebsd_netmap_ioctl,
1556 .d_poll = freebsd_netmap_poll,
1557 .d_kqfilter = netmap_kqfilter,
1558 .d_close = netmap_close,
1560 /*--- end of kqueue support ----*/
1563 * Kernel entry point.
1565 * Initialize/finalize the module and return.
1567 * Return 0 on success, errno on failure.
1570 netmap_loader(__unused struct module *module, int event, __unused void *arg)
1576 error = netmap_init();
1581 * if some one is still using netmap,
1582 * then the module can not be unloaded.
1584 if (netmap_use_count) {
1585 nm_prerr("netmap module can not be unloaded - netmap_use_count: %d",
1601 #ifdef DEV_MODULE_ORDERED
1603 * The netmap module contains three drivers: (i) the netmap character device
1604 * driver; (ii) the ptnetmap memdev PCI device driver, (iii) the ptnet PCI
1605 * device driver. The attach() routines of both (ii) and (iii) need the
1606 * lock of the global allocator, and such lock is initialized in netmap_init(),
1607 * which is part of (i).
1608 * Therefore, we make sure that (i) is loaded before (ii) and (iii), using
1609 * the 'order' parameter of driver declaration macros. For (i), we specify
1610 * SI_ORDER_MIDDLE, while higher orders are used with the DRIVER_MODULE_ORDERED
1611 * macros for (ii) and (iii).
1613 DEV_MODULE_ORDERED(netmap, netmap_loader, NULL, SI_ORDER_MIDDLE);
1614 #else /* !DEV_MODULE_ORDERED */
1615 DEV_MODULE(netmap, netmap_loader, NULL);
1616 #endif /* DEV_MODULE_ORDERED */
1617 MODULE_DEPEND(netmap, pci, 1, 1, 1);
1618 MODULE_VERSION(netmap, 1);
1619 /* reduce conditional code */
1620 // linux API, use for the knlist in FreeBSD
1621 /* use a private mutex for the knlist */