2 * Copyright (C) 2013-2014 Universita` di Pisa. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/types.h>
29 #include <sys/module.h>
30 #include <sys/errno.h>
31 #include <sys/param.h> /* defines used in kernel.h */
32 #include <sys/poll.h> /* POLLIN, POLLOUT */
33 #include <sys/kernel.h> /* types used in module initialization */
34 #include <sys/conf.h> /* DEV_MODULE */
35 #include <sys/endian.h>
37 #include <sys/rwlock.h>
39 #include <vm/vm.h> /* vtophys */
40 #include <vm/pmap.h> /* vtophys */
41 #include <vm/vm_param.h>
42 #include <vm/vm_object.h>
43 #include <vm/vm_page.h>
44 #include <vm/vm_pager.h>
48 #include <sys/malloc.h>
49 #include <sys/socket.h> /* sockaddrs */
50 #include <sys/selinfo.h>
52 #include <net/if_var.h>
53 #include <machine/bus.h> /* bus_dmamap_* */
54 #include <netinet/in.h> /* in6_cksum_pseudo() */
55 #include <machine/in_cksum.h> /* in_pseudo(), in_cksum_hdr() */
57 #include <net/netmap.h>
58 #include <dev/netmap/netmap_kern.h>
59 #include <dev/netmap/netmap_mem2.h>
62 /* ======================== FREEBSD-SPECIFIC ROUTINES ================== */
64 rawsum_t nm_csum_raw(uint8_t *data, size_t len, rawsum_t cur_sum)
66 /* TODO XXX please use the FreeBSD implementation for this. */
67 uint16_t *words = (uint16_t *)data;
71 for (i = 0; i < nw; i++)
72 cur_sum += be16toh(words[i]);
75 cur_sum += (data[len-1] << 8);
80 /* Fold a raw checksum: 'cur_sum' is in host byte order, while the
81 * return value is in network byte order.
83 uint16_t nm_csum_fold(rawsum_t cur_sum)
85 /* TODO XXX please use the FreeBSD implementation for this. */
87 cur_sum = (cur_sum & 0xFFFF) + (cur_sum >> 16);
89 return htobe16((~cur_sum) & 0xFFFF);
92 uint16_t nm_csum_ipv4(struct nm_iphdr *iph)
95 return in_cksum_hdr((void *)iph);
97 return nm_csum_fold(nm_csum_raw((uint8_t*)iph, sizeof(struct nm_iphdr), 0));
101 void nm_csum_tcpudp_ipv4(struct nm_iphdr *iph, void *data,
102 size_t datalen, uint16_t *check)
104 uint16_t pseudolen = datalen + iph->protocol;
106 /* Compute and insert the pseudo-header cheksum. */
107 *check = in_pseudo(iph->saddr, iph->daddr,
109 /* Compute the checksum on TCP/UDP header + payload
110 * (includes the pseudo-header).
112 *check = nm_csum_fold(nm_csum_raw(data, datalen, 0));
115 void nm_csum_tcpudp_ipv6(struct nm_ipv6hdr *ip6h, void *data,
116 size_t datalen, uint16_t *check)
119 *check = in6_cksum_pseudo((void*)ip6h, datalen, ip6h->nexthdr, 0);
120 *check = nm_csum_fold(nm_csum_raw(data, datalen, 0));
122 static int notsupported = 0;
125 D("inet6 segmentation not supported");
132 * Intercept the rx routine in the standard device driver.
133 * Second argument is non-zero to intercept, 0 to restore
136 netmap_catch_rx(struct netmap_adapter *na, int intercept)
138 struct netmap_generic_adapter *gna = (struct netmap_generic_adapter *)na;
139 struct ifnet *ifp = na->ifp;
142 if (gna->save_if_input) {
143 D("cannot intercept again");
144 return EINVAL; /* already set */
146 gna->save_if_input = ifp->if_input;
147 ifp->if_input = generic_rx_handler;
149 if (!gna->save_if_input){
151 return EINVAL; /* not saved */
153 ifp->if_input = gna->save_if_input;
154 gna->save_if_input = NULL;
162 * Intercept the packet steering routine in the tx path,
163 * so that we can decide which queue is used for an mbuf.
164 * Second argument is non-zero to intercept, 0 to restore.
165 * On freebsd we just intercept if_transmit.
168 netmap_catch_tx(struct netmap_generic_adapter *gna, int enable)
170 struct netmap_adapter *na = &gna->up.up;
171 struct ifnet *ifp = na->ifp;
174 na->if_transmit = ifp->if_transmit;
175 ifp->if_transmit = netmap_transmit;
177 ifp->if_transmit = na->if_transmit;
183 * Transmit routine used by generic_netmap_txsync(). Returns 0 on success
184 * and non-zero on error (which may be packet drops or other errors).
185 * addr and len identify the netmap buffer, m is the (preallocated)
186 * mbuf to use for transmissions.
188 * We should add a reference to the mbuf so the m_freem() at the end
189 * of the transmission does not consume resources.
191 * On FreeBSD, and on multiqueue cards, we can force the queue using
192 * if ((m->m_flags & M_FLOWID) != 0)
193 * i = m->m_pkthdr.flowid % adapter->num_queues;
195 * i = curcpu % adapter->num_queues;
199 generic_xmit_frame(struct ifnet *ifp, struct mbuf *m,
200 void *addr, u_int len, u_int ring_nr)
204 m->m_len = m->m_pkthdr.len = 0;
206 // copy data to the mbuf
207 m_copyback(m, 0, len, addr);
208 // inc refcount. We are alone, so we can skip the atomic
209 atomic_fetchadd_int(m->m_ext.ref_cnt, 1);
210 m->m_flags |= M_FLOWID;
211 m->m_pkthdr.flowid = ring_nr;
212 m->m_pkthdr.rcvif = ifp; /* used for tx notification */
213 ret = NA(ifp)->if_transmit(ifp, m);
219 * The following two functions are empty until we have a generic
220 * way to extract the info from the ifp
223 generic_find_num_desc(struct ifnet *ifp, unsigned int *tx, unsigned int *rx)
231 generic_find_num_queues(struct ifnet *ifp, u_int *txq, u_int *rxq)
234 *txq = netmap_generic_rings;
235 *rxq = netmap_generic_rings;
239 void netmap_mitigation_init(struct nm_generic_mit *mit, struct netmap_adapter *na)
242 mit->mit_pending = 0;
247 void netmap_mitigation_start(struct nm_generic_mit *mit)
253 void netmap_mitigation_restart(struct nm_generic_mit *mit)
259 int netmap_mitigation_active(struct nm_generic_mit *mit)
266 void netmap_mitigation_cleanup(struct nm_generic_mit *mit)
273 * In order to track whether pages are still mapped, we hook into
274 * the standard cdev_pager and intercept the constructor and
278 struct netmap_vm_handle_t {
280 struct netmap_priv_d *priv;
285 netmap_dev_pager_ctor(void *handle, vm_ooffset_t size, vm_prot_t prot,
286 vm_ooffset_t foff, struct ucred *cred, u_short *color)
288 struct netmap_vm_handle_t *vmh = handle;
291 D("handle %p size %jd prot %d foff %jd",
292 handle, (intmax_t)size, prot, (intmax_t)foff);
299 netmap_dev_pager_dtor(void *handle)
301 struct netmap_vm_handle_t *vmh = handle;
302 struct cdev *dev = vmh->dev;
303 struct netmap_priv_d *priv = vmh->priv;
306 D("handle %p", handle);
314 netmap_dev_pager_fault(vm_object_t object, vm_ooffset_t offset,
315 int prot, vm_page_t *mres)
317 struct netmap_vm_handle_t *vmh = object->handle;
318 struct netmap_priv_d *priv = vmh->priv;
321 vm_memattr_t memattr;
324 ND("object %p offset %jd prot %d mres %p",
325 object, (intmax_t)offset, prot, mres);
326 memattr = object->memattr;
327 pidx = OFF_TO_IDX(offset);
328 paddr = netmap_mem_ofstophys(priv->np_mref, offset);
330 return VM_PAGER_FAIL;
332 if (((*mres)->flags & PG_FICTITIOUS) != 0) {
334 * If the passed in result page is a fake page, update it with
335 * the new physical address.
338 vm_page_updatefake(page, paddr, memattr);
341 * Replace the passed in reqpage page with our own fake page and
342 * free up the all of the original pages.
344 #ifndef VM_OBJECT_WUNLOCK /* FreeBSD < 10.x */
345 #define VM_OBJECT_WUNLOCK VM_OBJECT_UNLOCK
346 #define VM_OBJECT_WLOCK VM_OBJECT_LOCK
347 #endif /* VM_OBJECT_WUNLOCK */
349 VM_OBJECT_WUNLOCK(object);
350 page = vm_page_getfake(paddr, memattr);
351 VM_OBJECT_WLOCK(object);
354 vm_page_unlock(*mres);
356 vm_page_insert(page, object, pidx);
358 page->valid = VM_PAGE_BITS_ALL;
359 return (VM_PAGER_OK);
363 static struct cdev_pager_ops netmap_cdev_pager_ops = {
364 .cdev_pg_ctor = netmap_dev_pager_ctor,
365 .cdev_pg_dtor = netmap_dev_pager_dtor,
366 .cdev_pg_fault = netmap_dev_pager_fault,
371 netmap_mmap_single(struct cdev *cdev, vm_ooffset_t *foff,
372 vm_size_t objsize, vm_object_t *objp, int prot)
375 struct netmap_vm_handle_t *vmh;
376 struct netmap_priv_d *priv;
380 D("cdev %p foff %jd size %jd objp %p prot %d", cdev,
381 (intmax_t )*foff, (intmax_t )objsize, objp, prot);
383 vmh = malloc(sizeof(struct netmap_vm_handle_t), M_DEVBUF,
390 error = devfs_get_cdevpriv((void**)&priv);
397 error = netmap_get_memory(priv);
401 obj = cdev_pager_allocate(vmh, OBJT_DEVICE,
402 &netmap_cdev_pager_ops, objsize, prot,
405 D("cdev_pager_allocate failed");
424 // XXX can we remove this ?
426 netmap_close(struct cdev *dev, int fflag, int devtype, struct thread *td)
429 D("dev %p fflag 0x%x devtype %d td %p",
430 dev, fflag, devtype, td);
436 netmap_open(struct cdev *dev, int oflags, int devtype, struct thread *td)
438 struct netmap_priv_d *priv;
446 // XXX wait or nowait ?
447 priv = malloc(sizeof(struct netmap_priv_d), M_DEVBUF,
452 error = devfs_set_cdevpriv(priv, netmap_dtor);
456 priv->np_refcount = 1;
461 /******************** kqueue support ****************/
464 * The OS_selwakeup also needs to issue a KNOTE_UNLOCKED.
465 * We use a non-zero argument to distinguish the call from the one
466 * in kevent_scan() which instead also needs to run netmap_poll().
467 * The knote uses a global mutex for the time being. We might
468 * try to reuse the one in the si, but it is not allocated
469 * permanently so it might be a bit tricky.
471 * The *kqfilter function registers one or another f_event
472 * depending on read or write mode.
473 * In the call to f_event() td_fpop is NULL so any child function
474 * calling devfs_get_cdevpriv() would fail - and we need it in
475 * netmap_poll(). As a workaround we store priv into kn->kn_hook
476 * and pass it as first argument to netmap_poll(), which then
477 * uses the failure to tell that we are called from f_event()
478 * and do not need the selrecord().
481 void freebsd_selwakeup(struct selinfo *si, int pri);
484 freebsd_selwakeup(struct selinfo *si, int pri)
487 D("on knote %p", &si->si_note);
488 selwakeuppri(si, pri);
489 /* use a non-zero hint to tell the notification from the
490 * call done in kqueue_scan() which uses 0
492 KNOTE_UNLOCKED(&si->si_note, 0x100 /* notification */);
496 netmap_knrdetach(struct knote *kn)
498 struct netmap_priv_d *priv = (struct netmap_priv_d *)kn->kn_hook;
499 struct selinfo *si = priv->np_rxsi;
501 D("remove selinfo %p", si);
502 knlist_remove(&si->si_note, kn, 0);
506 netmap_knwdetach(struct knote *kn)
508 struct netmap_priv_d *priv = (struct netmap_priv_d *)kn->kn_hook;
509 struct selinfo *si = priv->np_txsi;
511 D("remove selinfo %p", si);
512 knlist_remove(&si->si_note, kn, 0);
516 * callback from notifies (generated externally) and our
517 * calls to kevent(). The former we just return 1 (ready)
518 * since we do not know better.
519 * In the latter we call netmap_poll and return 0/1 accordingly.
522 netmap_knrw(struct knote *kn, long hint, int events)
524 struct netmap_priv_d *priv;
528 ND(5, "call from notify");
529 return 1; /* assume we are ready */
532 /* the notification may come from an external thread,
533 * in which case we do not want to run the netmap_poll
534 * This should be filtered above, but check just in case.
536 if (curthread != priv->np_td) { /* should not happen */
537 RD(5, "curthread changed %p %p", curthread, priv->np_td);
540 revents = netmap_poll((void *)priv, events, curthread);
541 return (events & revents) ? 1 : 0;
546 netmap_knread(struct knote *kn, long hint)
548 return netmap_knrw(kn, hint, POLLIN);
552 netmap_knwrite(struct knote *kn, long hint)
554 return netmap_knrw(kn, hint, POLLOUT);
557 static struct filterops netmap_rfiltops = {
559 .f_detach = netmap_knrdetach,
560 .f_event = netmap_knread,
563 static struct filterops netmap_wfiltops = {
565 .f_detach = netmap_knwdetach,
566 .f_event = netmap_knwrite,
571 * This is called when a thread invokes kevent() to record
572 * a change in the configuration of the kqueue().
573 * The 'priv' should be the same as in the netmap device.
576 netmap_kqfilter(struct cdev *dev, struct knote *kn)
578 struct netmap_priv_d *priv;
580 struct netmap_adapter *na;
582 int ev = kn->kn_filter;
584 if (ev != EVFILT_READ && ev != EVFILT_WRITE) {
585 D("bad filter request %d", ev);
588 error = devfs_get_cdevpriv((void**)&priv);
590 D("device not yet setup");
595 D("no netmap adapter for this file descriptor");
598 /* the si is indicated in the priv */
599 si = (ev == EVFILT_WRITE) ? priv->np_txsi : priv->np_rxsi;
601 kn->kn_fop = (ev == EVFILT_WRITE) ?
602 &netmap_wfiltops : &netmap_rfiltops;
604 knlist_add(&si->si_note, kn, 1);
606 ND("register %p %s td %p priv %p kn %p np_nifp %p kn_fp/fpop %s",
607 na, na->ifp->if_xname, curthread, priv, kn,
609 kn->kn_fp == curthread->td_fpop ? "match" : "MISMATCH");
613 struct cdevsw netmap_cdevsw = {
614 .d_version = D_VERSION,
616 .d_open = netmap_open,
617 .d_mmap_single = netmap_mmap_single,
618 .d_ioctl = netmap_ioctl,
619 .d_poll = netmap_poll,
620 .d_kqfilter = netmap_kqfilter,
621 .d_close = netmap_close,
623 /*--- end of kqueue support ----*/
626 * Kernel entry point.
628 * Initialize/finalize the module and return.
630 * Return 0 on success, errno on failure.
633 netmap_loader(__unused struct module *module, int event, __unused void *arg)
639 error = netmap_init();
655 DEV_MODULE(netmap, netmap_loader, NULL);