2 * Copyright (C) 2013-2014 Universita` di Pisa. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include "opt_inet6.h"
30 #include <sys/types.h>
31 #include <sys/module.h>
32 #include <sys/errno.h>
33 #include <sys/param.h> /* defines used in kernel.h */
34 #include <sys/poll.h> /* POLLIN, POLLOUT */
35 #include <sys/kernel.h> /* types used in module initialization */
36 #include <sys/conf.h> /* DEV_MODULE */
37 #include <sys/endian.h>
39 #include <sys/rwlock.h>
41 #include <vm/vm.h> /* vtophys */
42 #include <vm/pmap.h> /* vtophys */
43 #include <vm/vm_param.h>
44 #include <vm/vm_object.h>
45 #include <vm/vm_page.h>
46 #include <vm/vm_pager.h>
50 #include <sys/malloc.h>
51 #include <sys/socket.h> /* sockaddrs */
52 #include <sys/selinfo.h>
54 #include <net/if_var.h>
55 #include <net/if_types.h> /* IFT_ETHER */
56 #include <net/ethernet.h> /* ether_ifdetach */
57 #include <net/if_dl.h> /* LLADDR */
58 #include <machine/bus.h> /* bus_dmamap_* */
59 #include <netinet/in.h> /* in6_cksum_pseudo() */
60 #include <machine/in_cksum.h> /* in_pseudo(), in_cksum_hdr() */
62 #include <net/netmap.h>
63 #include <dev/netmap/netmap_kern.h>
64 #include <dev/netmap/netmap_mem2.h>
67 /* ======================== FREEBSD-SPECIFIC ROUTINES ================== */
70 nm_csum_raw(uint8_t *data, size_t len, rawsum_t cur_sum)
72 /* TODO XXX please use the FreeBSD implementation for this. */
73 uint16_t *words = (uint16_t *)data;
77 for (i = 0; i < nw; i++)
78 cur_sum += be16toh(words[i]);
81 cur_sum += (data[len-1] << 8);
86 /* Fold a raw checksum: 'cur_sum' is in host byte order, while the
87 * return value is in network byte order.
90 nm_csum_fold(rawsum_t cur_sum)
92 /* TODO XXX please use the FreeBSD implementation for this. */
94 cur_sum = (cur_sum & 0xFFFF) + (cur_sum >> 16);
96 return htobe16((~cur_sum) & 0xFFFF);
99 uint16_t nm_csum_ipv4(struct nm_iphdr *iph)
102 return in_cksum_hdr((void *)iph);
104 return nm_csum_fold(nm_csum_raw((uint8_t*)iph, sizeof(struct nm_iphdr), 0));
109 nm_csum_tcpudp_ipv4(struct nm_iphdr *iph, void *data,
110 size_t datalen, uint16_t *check)
113 uint16_t pseudolen = datalen + iph->protocol;
115 /* Compute and insert the pseudo-header cheksum. */
116 *check = in_pseudo(iph->saddr, iph->daddr,
118 /* Compute the checksum on TCP/UDP header + payload
119 * (includes the pseudo-header).
121 *check = nm_csum_fold(nm_csum_raw(data, datalen, 0));
123 static int notsupported = 0;
126 D("inet4 segmentation not supported");
132 nm_csum_tcpudp_ipv6(struct nm_ipv6hdr *ip6h, void *data,
133 size_t datalen, uint16_t *check)
136 *check = in6_cksum_pseudo((void*)ip6h, datalen, ip6h->nexthdr, 0);
137 *check = nm_csum_fold(nm_csum_raw(data, datalen, 0));
139 static int notsupported = 0;
142 D("inet6 segmentation not supported");
149 * Intercept the rx routine in the standard device driver.
150 * Second argument is non-zero to intercept, 0 to restore
153 netmap_catch_rx(struct netmap_generic_adapter *gna, int intercept)
155 struct netmap_adapter *na = &gna->up.up;
156 struct ifnet *ifp = na->ifp;
159 if (gna->save_if_input) {
160 D("cannot intercept again");
161 return EINVAL; /* already set */
163 gna->save_if_input = ifp->if_input;
164 ifp->if_input = generic_rx_handler;
166 if (!gna->save_if_input){
168 return EINVAL; /* not saved */
170 ifp->if_input = gna->save_if_input;
171 gna->save_if_input = NULL;
179 * Intercept the packet steering routine in the tx path,
180 * so that we can decide which queue is used for an mbuf.
181 * Second argument is non-zero to intercept, 0 to restore.
182 * On freebsd we just intercept if_transmit.
185 netmap_catch_tx(struct netmap_generic_adapter *gna, int enable)
187 struct netmap_adapter *na = &gna->up.up;
188 struct ifnet *ifp = netmap_generic_getifp(gna);
191 na->if_transmit = ifp->if_transmit;
192 ifp->if_transmit = netmap_transmit;
194 ifp->if_transmit = na->if_transmit;
200 * Transmit routine used by generic_netmap_txsync(). Returns 0 on success
201 * and non-zero on error (which may be packet drops or other errors).
202 * addr and len identify the netmap buffer, m is the (preallocated)
203 * mbuf to use for transmissions.
205 * We should add a reference to the mbuf so the m_freem() at the end
206 * of the transmission does not consume resources.
208 * On FreeBSD, and on multiqueue cards, we can force the queue using
209 * if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
210 * i = m->m_pkthdr.flowid % adapter->num_queues;
212 * i = curcpu % adapter->num_queues;
216 generic_xmit_frame(struct ifnet *ifp, struct mbuf *m,
217 void *addr, u_int len, u_int ring_nr)
221 /* Link the external storage to the netmap buffer, so that
222 * no copy is necessary. */
223 m->m_ext.ext_buf = m->m_data = addr;
224 m->m_ext.ext_size = len;
226 m->m_len = m->m_pkthdr.len = len;
228 /* mbuf refcnt is not contended, no need to use atomic
229 * (a memory barrier is enough). */
230 SET_MBUF_REFCNT(m, 2);
231 M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
232 m->m_pkthdr.flowid = ring_nr;
233 m->m_pkthdr.rcvif = ifp; /* used for tx notification */
234 ret = NA(ifp)->if_transmit(ifp, m);
239 #if __FreeBSD_version >= 1100005
240 struct netmap_adapter *
241 netmap_getna(if_t ifp)
243 return (NA((struct ifnet *)ifp));
245 #endif /* __FreeBSD_version >= 1100005 */
248 * The following two functions are empty until we have a generic
249 * way to extract the info from the ifp
252 generic_find_num_desc(struct ifnet *ifp, unsigned int *tx, unsigned int *rx)
254 D("called, in tx %d rx %d", *tx, *rx);
260 generic_find_num_queues(struct ifnet *ifp, u_int *txq, u_int *rxq)
262 D("called, in txq %d rxq %d", *txq, *rxq);
263 *txq = netmap_generic_rings;
264 *rxq = netmap_generic_rings;
269 netmap_mitigation_init(struct nm_generic_mit *mit, int idx, struct netmap_adapter *na)
272 mit->mit_pending = 0;
273 mit->mit_ring_idx = idx;
279 netmap_mitigation_start(struct nm_generic_mit *mit)
286 netmap_mitigation_restart(struct nm_generic_mit *mit)
293 netmap_mitigation_active(struct nm_generic_mit *mit)
301 netmap_mitigation_cleanup(struct nm_generic_mit *mit)
307 nm_vi_dummy(struct ifnet *ifp, u_long cmd, caddr_t addr)
313 nm_vi_start(struct ifnet *ifp)
315 panic("nm_vi_start() must not be called");
319 * Index manager of persistent virtual interfaces.
320 * It is used to decide the lowest byte of the MAC address.
321 * We use the same algorithm with management of bridge port index.
323 #define NM_VI_MAX 255
325 uint8_t index[NM_VI_MAX]; /* XXX just for a reasonable number */
331 nm_vi_init_index(void)
334 for (i = 0; i < NM_VI_MAX; i++)
335 nm_vi_indices.index[i] = i;
336 nm_vi_indices.active = 0;
337 mtx_init(&nm_vi_indices.lock, "nm_vi_indices_lock", NULL, MTX_DEF);
340 /* return -1 if no index available */
342 nm_vi_get_index(void)
346 mtx_lock(&nm_vi_indices.lock);
347 ret = nm_vi_indices.active == NM_VI_MAX ? -1 :
348 nm_vi_indices.index[nm_vi_indices.active++];
349 mtx_unlock(&nm_vi_indices.lock);
354 nm_vi_free_index(uint8_t val)
358 mtx_lock(&nm_vi_indices.lock);
359 lim = nm_vi_indices.active;
360 for (i = 0; i < lim; i++) {
361 if (nm_vi_indices.index[i] == val) {
362 /* swap index[lim-1] and j */
363 int tmp = nm_vi_indices.index[lim-1];
364 nm_vi_indices.index[lim-1] = val;
365 nm_vi_indices.index[i] = tmp;
366 nm_vi_indices.active--;
370 if (lim == nm_vi_indices.active)
371 D("funny, index %u didn't found", val);
372 mtx_unlock(&nm_vi_indices.lock);
377 * Implementation of a netmap-capable virtual interface that
378 * registered to the system.
379 * It is based on if_tap.c and ip_fw_log.c in FreeBSD 9.
381 * Note: Linux sets refcount to 0 on allocation of net_device,
382 * then increments it on registration to the system.
383 * FreeBSD sets refcount to 1 on if_alloc(), and does not
384 * increment this refcount on if_attach().
387 nm_vi_persist(const char *name, struct ifnet **ret)
391 uint32_t macaddr_mid;
393 int unit = nm_vi_get_index(); /* just to decide MAC address */
398 * We use the same MAC address generation method with tap
399 * except for the highest octet is 00:be instead of 00:bd
401 macaddr_hi = htons(0x00be); /* XXX tap + 1 */
402 macaddr_mid = (uint32_t) ticks;
403 bcopy(&macaddr_hi, eaddr, sizeof(short));
404 bcopy(&macaddr_mid, &eaddr[2], sizeof(uint32_t));
405 eaddr[5] = (uint8_t)unit;
407 ifp = if_alloc(IFT_ETHER);
409 D("if_alloc failed");
412 if_initname(ifp, name, IF_DUNIT_NONE);
414 ifp->if_flags = IFF_UP | IFF_SIMPLEX | IFF_MULTICAST;
415 ifp->if_init = (void *)nm_vi_dummy;
416 ifp->if_ioctl = nm_vi_dummy;
417 ifp->if_start = nm_vi_start;
418 ifp->if_mtu = ETHERMTU;
419 IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen);
420 ifp->if_capabilities |= IFCAP_LINKSTATE;
421 ifp->if_capenable |= IFCAP_LINKSTATE;
423 ether_ifattach(ifp, eaddr);
427 /* unregister from the system and drop the final refcount */
429 nm_vi_detach(struct ifnet *ifp)
431 nm_vi_free_index(((char *)IF_LLADDR(ifp))[5]);
437 * In order to track whether pages are still mapped, we hook into
438 * the standard cdev_pager and intercept the constructor and
442 struct netmap_vm_handle_t {
444 struct netmap_priv_d *priv;
449 netmap_dev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
450 vm_ooffset_t foff, struct ucred *cred, u_short *color)
452 struct netmap_vm_handle_t *vmh = handle;
455 D("handle %p size %jd prot %d foff %jd",
456 handle, (intmax_t)size, prot, (intmax_t)foff);
465 netmap_dev_pager_dtor(void *handle)
467 struct netmap_vm_handle_t *vmh = handle;
468 struct cdev *dev = vmh->dev;
469 struct netmap_priv_d *priv = vmh->priv;
472 D("handle %p", handle);
480 netmap_dev_pager_fault(vm_object_t object, vm_ooffset_t offset,
481 int prot, vm_page_t *mres)
483 struct netmap_vm_handle_t *vmh = object->handle;
484 struct netmap_priv_d *priv = vmh->priv;
485 struct netmap_adapter *na = priv->np_na;
488 vm_memattr_t memattr;
491 ND("object %p offset %jd prot %d mres %p",
492 object, (intmax_t)offset, prot, mres);
493 memattr = object->memattr;
494 pidx = OFF_TO_IDX(offset);
495 paddr = netmap_mem_ofstophys(na->nm_mem, offset);
497 return VM_PAGER_FAIL;
499 if (((*mres)->flags & PG_FICTITIOUS) != 0) {
501 * If the passed in result page is a fake page, update it with
502 * the new physical address.
505 vm_page_updatefake(page, paddr, memattr);
508 * Replace the passed in reqpage page with our own fake page and
509 * free up the all of the original pages.
511 #ifndef VM_OBJECT_WUNLOCK /* FreeBSD < 10.x */
512 #define VM_OBJECT_WUNLOCK VM_OBJECT_UNLOCK
513 #define VM_OBJECT_WLOCK VM_OBJECT_LOCK
514 #endif /* VM_OBJECT_WUNLOCK */
516 VM_OBJECT_WUNLOCK(object);
517 page = vm_page_getfake(paddr, memattr);
518 VM_OBJECT_WLOCK(object);
521 vm_page_unlock(*mres);
523 vm_page_insert(page, object, pidx);
525 page->valid = VM_PAGE_BITS_ALL;
526 return (VM_PAGER_OK);
530 static struct cdev_pager_ops netmap_cdev_pager_ops = {
531 .cdev_pg_ctor = netmap_dev_pager_ctor,
532 .cdev_pg_dtor = netmap_dev_pager_dtor,
533 .cdev_pg_fault = netmap_dev_pager_fault,
538 netmap_mmap_single(struct cdev *cdev, vm_ooffset_t *foff,
539 vm_size_t objsize, vm_object_t *objp, int prot)
542 struct netmap_vm_handle_t *vmh;
543 struct netmap_priv_d *priv;
547 D("cdev %p foff %jd size %jd objp %p prot %d", cdev,
548 (intmax_t )*foff, (intmax_t )objsize, objp, prot);
550 vmh = malloc(sizeof(struct netmap_vm_handle_t), M_DEVBUF,
557 error = devfs_get_cdevpriv((void**)&priv);
560 if (priv->np_nifp == NULL) {
568 obj = cdev_pager_allocate(vmh, OBJT_DEVICE,
569 &netmap_cdev_pager_ops, objsize, prot,
572 D("cdev_pager_allocate failed");
591 * On FreeBSD the close routine is only called on the last close on
592 * the device (/dev/netmap) so we cannot do anything useful.
593 * To track close() on individual file descriptors we pass netmap_dtor() to
594 * devfs_set_cdevpriv() on open(). The FreeBSD kernel will call the destructor
595 * when the last fd pointing to the device is closed.
597 * Note that FreeBSD does not even munmap() on close() so we also have
598 * to track mmap() ourselves, and postpone the call to
599 * netmap_dtor() is called when the process has no open fds and no active
600 * memory maps on /dev/netmap, as in linux.
603 netmap_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
606 D("dev %p fflag 0x%x devtype %d td %p",
607 dev, fflag, devtype, td);
613 netmap_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
615 struct netmap_priv_d *priv;
623 priv = malloc(sizeof(struct netmap_priv_d), M_DEVBUF,
628 error = devfs_set_cdevpriv(priv, netmap_dtor);
630 free(priv, M_DEVBUF);
639 /******************** kqueue support ****************/
642 * The OS_selwakeup also needs to issue a KNOTE_UNLOCKED.
643 * We use a non-zero argument to distinguish the call from the one
644 * in kevent_scan() which instead also needs to run netmap_poll().
645 * The knote uses a global mutex for the time being. We might
646 * try to reuse the one in the si, but it is not allocated
647 * permanently so it might be a bit tricky.
649 * The *kqfilter function registers one or another f_event
650 * depending on read or write mode.
651 * In the call to f_event() td_fpop is NULL so any child function
652 * calling devfs_get_cdevpriv() would fail - and we need it in
653 * netmap_poll(). As a workaround we store priv into kn->kn_hook
654 * and pass it as first argument to netmap_poll(), which then
655 * uses the failure to tell that we are called from f_event()
656 * and do not need the selrecord().
661 freebsd_selwakeup(struct nm_selinfo *si, int pri)
664 D("on knote %p", &si->si.si_note);
665 selwakeuppri(&si->si, pri);
666 /* use a non-zero hint to tell the notification from the
667 * call done in kqueue_scan() which uses 0
669 KNOTE_UNLOCKED(&si->si.si_note, 0x100 /* notification */);
673 netmap_knrdetach(struct knote *kn)
675 struct netmap_priv_d *priv = (struct netmap_priv_d *)kn->kn_hook;
676 struct selinfo *si = &priv->np_si[NR_RX]->si;
678 D("remove selinfo %p", si);
679 knlist_remove(&si->si_note, kn, 0);
683 netmap_knwdetach(struct knote *kn)
685 struct netmap_priv_d *priv = (struct netmap_priv_d *)kn->kn_hook;
686 struct selinfo *si = &priv->np_si[NR_TX]->si;
688 D("remove selinfo %p", si);
689 knlist_remove(&si->si_note, kn, 0);
693 * callback from notifies (generated externally) and our
694 * calls to kevent(). The former we just return 1 (ready)
695 * since we do not know better.
696 * In the latter we call netmap_poll and return 0/1 accordingly.
699 netmap_knrw(struct knote *kn, long hint, int events)
701 struct netmap_priv_d *priv;
705 ND(5, "call from notify");
706 return 1; /* assume we are ready */
709 /* the notification may come from an external thread,
710 * in which case we do not want to run the netmap_poll
711 * This should be filtered above, but check just in case.
713 if (curthread != priv->np_td) { /* should not happen */
714 RD(5, "curthread changed %p %p", curthread, priv->np_td);
717 revents = netmap_poll((void *)priv, events, curthread);
718 return (events & revents) ? 1 : 0;
723 netmap_knread(struct knote *kn, long hint)
725 return netmap_knrw(kn, hint, POLLIN);
729 netmap_knwrite(struct knote *kn, long hint)
731 return netmap_knrw(kn, hint, POLLOUT);
734 static struct filterops netmap_rfiltops = {
736 .f_detach = netmap_knrdetach,
737 .f_event = netmap_knread,
740 static struct filterops netmap_wfiltops = {
742 .f_detach = netmap_knwdetach,
743 .f_event = netmap_knwrite,
748 * This is called when a thread invokes kevent() to record
749 * a change in the configuration of the kqueue().
750 * The 'priv' should be the same as in the netmap device.
753 netmap_kqfilter(struct cdev *dev, struct knote *kn)
755 struct netmap_priv_d *priv;
757 struct netmap_adapter *na;
758 struct nm_selinfo *si;
759 int ev = kn->kn_filter;
761 if (ev != EVFILT_READ && ev != EVFILT_WRITE) {
762 D("bad filter request %d", ev);
765 error = devfs_get_cdevpriv((void**)&priv);
767 D("device not yet setup");
772 D("no netmap adapter for this file descriptor");
775 /* the si is indicated in the priv */
776 si = priv->np_si[(ev == EVFILT_WRITE) ? NR_TX : NR_RX];
778 kn->kn_fop = (ev == EVFILT_WRITE) ?
779 &netmap_wfiltops : &netmap_rfiltops;
781 knlist_add(&si->si.si_note, kn, 1);
783 ND("register %p %s td %p priv %p kn %p np_nifp %p kn_fp/fpop %s",
784 na, na->ifp->if_xname, curthread, priv, kn,
786 kn->kn_fp == curthread->td_fpop ? "match" : "MISMATCH");
790 struct cdevsw netmap_cdevsw = {
791 .d_version = D_VERSION,
793 .d_open = netmap_open,
794 .d_mmap_single = netmap_mmap_single,
795 .d_ioctl = netmap_ioctl,
796 .d_poll = netmap_poll,
797 .d_kqfilter = netmap_kqfilter,
798 .d_close = netmap_close,
800 /*--- end of kqueue support ----*/
803 * Kernel entry point.
805 * Initialize/finalize the module and return.
807 * Return 0 on success, errno on failure.
810 netmap_loader(__unused struct module *module, int event, __unused void *arg)
816 error = netmap_init();
821 * if some one is still using netmap,
822 * then the module can not be unloaded.
824 if (netmap_use_count) {
825 D("netmap module can not be unloaded - netmap_use_count: %d",
842 DEV_MODULE(netmap, netmap_loader, NULL);
843 MODULE_VERSION(netmap, 1);