2 * Copyright (C) 2015 Stefano Garzarella
3 * Copyright (C) 2016 Vincenzo Maffione
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #if defined(__FreeBSD__)
34 #include <sys/cdefs.h>
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/types.h>
38 #include <sys/selinfo.h>
39 #include <sys/socket.h>
41 #include <net/if_var.h>
42 #include <machine/bus.h>
44 //#define usleep_range(_1, _2)
45 #define usleep_range(_1, _2) \
46 pause_sbt("ptnetmap-sleep", SBT_1US * _1, SBT_1US * 1, C_ABSOLUTE)
52 #include <net/netmap.h>
53 #include <dev/netmap/netmap_kern.h>
54 #include <net/netmap_virt.h>
55 #include <dev/netmap/netmap_mem2.h>
57 #ifdef WITH_PTNETMAP_HOST
59 /* RX cycle without receive any packets */
60 #define PTN_RX_DRY_CYCLES_MAX 10
62 /* Limit Batch TX to half ring.
63 * Currently disabled, since it does not manage NS_MOREFRAG, which
64 * results in random drops in the VALE txsync. */
65 //#define PTN_TX_BATCH_LIM(_n) ((_n >> 1))
69 #define NETMAP_PT_DEBUG /* Enables communication debugging. */
70 #ifdef NETMAP_PT_DEBUG
78 //#define RATE /* Enables communication statistics. */
81 struct rate_batch_stats {
83 unsigned long sync_dry;
88 unsigned long gtxk; /* Guest --> Host Tx kicks. */
89 unsigned long grxk; /* Guest --> Host Rx kicks. */
90 unsigned long htxk; /* Host --> Guest Tx kicks. */
91 unsigned long hrxk; /* Host --> Guest Rx Kicks. */
92 unsigned long btxwu; /* Backend Tx wake-up. */
93 unsigned long brxwu; /* Backend Rx wake-up. */
94 struct rate_batch_stats txbs;
95 struct rate_batch_stats rxbs;
99 struct timer_list timer;
100 struct rate_stats new;
101 struct rate_stats old;
104 #define RATE_PERIOD 2
106 rate_callback(unsigned long arg)
108 struct rate_context * ctx = (struct rate_context *)arg;
109 struct rate_stats cur = ctx->new;
110 struct rate_batch_stats *txbs = &cur.txbs;
111 struct rate_batch_stats *rxbs = &cur.rxbs;
112 struct rate_batch_stats *txbs_old = &ctx->old.txbs;
113 struct rate_batch_stats *rxbs_old = &ctx->old.rxbs;
114 uint64_t tx_batch, rx_batch;
115 unsigned long txpkts, rxpkts;
116 unsigned long gtxk, grxk;
119 txpkts = txbs->pkt - txbs_old->pkt;
120 rxpkts = rxbs->pkt - rxbs_old->pkt;
122 tx_batch = ((txbs->sync - txbs_old->sync) > 0) ?
123 txpkts / (txbs->sync - txbs_old->sync): 0;
124 rx_batch = ((rxbs->sync - rxbs_old->sync) > 0) ?
125 rxpkts / (rxbs->sync - rxbs_old->sync): 0;
127 /* Fix-up gtxk and grxk estimates. */
128 gtxk = (cur.gtxk - ctx->old.gtxk) - (cur.btxwu - ctx->old.btxwu);
129 grxk = (cur.grxk - ctx->old.grxk) - (cur.brxwu - ctx->old.brxwu);
131 printk("txpkts = %lu Hz\n", txpkts/RATE_PERIOD);
132 printk("gtxk = %lu Hz\n", gtxk/RATE_PERIOD);
133 printk("htxk = %lu Hz\n", (cur.htxk - ctx->old.htxk)/RATE_PERIOD);
134 printk("btxw = %lu Hz\n", (cur.btxwu - ctx->old.btxwu)/RATE_PERIOD);
135 printk("rxpkts = %lu Hz\n", rxpkts/RATE_PERIOD);
136 printk("grxk = %lu Hz\n", grxk/RATE_PERIOD);
137 printk("hrxk = %lu Hz\n", (cur.hrxk - ctx->old.hrxk)/RATE_PERIOD);
138 printk("brxw = %lu Hz\n", (cur.brxwu - ctx->old.brxwu)/RATE_PERIOD);
139 printk("txbatch = %llu avg\n", tx_batch);
140 printk("rxbatch = %llu avg\n", rx_batch);
144 r = mod_timer(&ctx->timer, jiffies +
145 msecs_to_jiffies(RATE_PERIOD * 1000));
147 D("[ptnetmap] Error: mod_timer()\n");
151 rate_batch_stats_update(struct rate_batch_stats *bf, uint32_t pre_tail,
152 uint32_t act_tail, uint32_t num_slots)
154 int n = (int)act_tail - pre_tail;
171 struct ptnetmap_state {
173 struct nm_kthread **kthreads;
175 /* Shared memory with the guest (TX/RX) */
176 struct ptnet_ring __user *ptrings;
180 /* Netmap adapter wrapping the backend. */
181 struct netmap_pt_host_adapter *pth_na;
183 IFRATE(struct rate_context rate_ctx;)
187 ptnetmap_kring_dump(const char *title, const struct netmap_kring *kring)
189 RD(1, "%s - name: %s hwcur: %d hwtail: %d rhead: %d rcur: %d \
190 rtail: %d head: %d cur: %d tail: %d",
191 title, kring->name, kring->nr_hwcur,
192 kring->nr_hwtail, kring->rhead, kring->rcur, kring->rtail,
193 kring->ring->head, kring->ring->cur, kring->ring->tail);
197 * TX functions to set/get and to handle host/guest kick.
201 /* Enable or disable guest --> host kicks. */
203 ptring_kick_enable(struct ptnet_ring __user *ptring, uint32_t val)
205 CSB_WRITE(ptring, host_need_kick, val);
208 /* Are guest interrupt enabled or disabled? */
209 static inline uint32_t
210 ptring_intr_enabled(struct ptnet_ring __user *ptring)
214 CSB_READ(ptring, guest_need_kick, v);
219 /* Enable or disable guest interrupts. */
221 ptring_intr_enable(struct ptnet_ring __user *ptring, uint32_t val)
223 CSB_WRITE(ptring, guest_need_kick, val);
226 /* Handle TX events: from the guest or from the backend */
228 ptnetmap_tx_handler(void *data)
230 struct netmap_kring *kring = data;
231 struct netmap_pt_host_adapter *pth_na =
232 (struct netmap_pt_host_adapter *)kring->na->na_private;
233 struct ptnetmap_state *ptns = pth_na->ptns;
234 struct ptnet_ring __user *ptring;
235 struct netmap_ring shadow_ring; /* shadow copy of the netmap_ring */
236 bool more_txspace = false;
237 struct nm_kthread *kth;
240 IFRATE(uint32_t pre_tail);
242 if (unlikely(!ptns)) {
243 D("ERROR ptnetmap state is NULL");
247 if (unlikely(ptns->stopped)) {
248 RD(1, "backend netmap is being stopped");
252 if (unlikely(nm_kr_tryget(kring, 1, NULL))) {
253 D("ERROR nm_kr_tryget()");
257 /* This is a guess, to be fixed in the rate callback. */
258 IFRATE(ptns->rate_ctx.new.gtxk++);
260 /* Get TX ptring pointer from the CSB. */
261 ptring = ptns->ptrings + kring->ring_id;
262 kth = ptns->kthreads[kring->ring_id];
264 num_slots = kring->nkr_num_slots;
265 shadow_ring.head = kring->rhead;
266 shadow_ring.cur = kring->rcur;
268 /* Disable guest --> host notifications. */
269 ptring_kick_enable(ptring, 0);
270 /* Copy the guest kring pointers from the CSB */
271 ptnetmap_host_read_kring_csb(ptring, &shadow_ring, num_slots);
274 /* If guest moves ahead too fast, let's cut the move so
275 * that we don't exceed our batch limit. */
276 batch = shadow_ring.head - kring->nr_hwcur;
280 #ifdef PTN_TX_BATCH_LIM
281 if (batch > PTN_TX_BATCH_LIM(num_slots)) {
282 uint32_t head_lim = kring->nr_hwcur + PTN_TX_BATCH_LIM(num_slots);
284 if (head_lim >= num_slots)
285 head_lim -= num_slots;
286 ND(1, "batch: %d head: %d head_lim: %d", batch, shadow_ring.head,
288 shadow_ring.head = head_lim;
289 batch = PTN_TX_BATCH_LIM(num_slots);
291 #endif /* PTN_TX_BATCH_LIM */
293 if (nm_kr_txspace(kring) <= (num_slots >> 1)) {
294 shadow_ring.flags |= NAF_FORCE_RECLAIM;
297 /* Netmap prologue */
298 shadow_ring.tail = kring->rtail;
299 if (unlikely(nm_txsync_prologue(kring, &shadow_ring) >= num_slots)) {
300 /* Reinit ring and enable notifications. */
301 netmap_ring_reinit(kring);
302 ptring_kick_enable(ptring, 1);
306 if (unlikely(netmap_verbose & NM_VERB_TXSYNC)) {
307 ptnetmap_kring_dump("pre txsync", kring);
310 IFRATE(pre_tail = kring->rtail);
311 if (unlikely(kring->nm_sync(kring, shadow_ring.flags))) {
312 /* Reenable notifications. */
313 ptring_kick_enable(ptring, 1);
320 * Copy host hwcur and hwtail into the CSB for the guest sync(), and
321 * do the nm_sync_finalize.
323 ptnetmap_host_write_kring_csb(ptring, kring->nr_hwcur,
325 if (kring->rtail != kring->nr_hwtail) {
326 /* Some more room available in the parent adapter. */
327 kring->rtail = kring->nr_hwtail;
331 IFRATE(rate_batch_stats_update(&ptns->rate_ctx.new.txbs, pre_tail,
332 kring->rtail, num_slots));
334 if (unlikely(netmap_verbose & NM_VERB_TXSYNC)) {
335 ptnetmap_kring_dump("post txsync", kring);
339 /* Interrupt the guest if needed. */
340 if (more_txspace && ptring_intr_enabled(ptring)) {
341 /* Disable guest kick to avoid sending unnecessary kicks */
342 ptring_intr_enable(ptring, 0);
343 nm_os_kthread_send_irq(kth);
344 IFRATE(ptns->rate_ctx.new.htxk++);
345 more_txspace = false;
348 /* Read CSB to see if there is more work to do. */
349 ptnetmap_host_read_kring_csb(ptring, &shadow_ring, num_slots);
351 if (shadow_ring.head == kring->rhead) {
353 * No more packets to transmit. We enable notifications and
354 * go to sleep, waiting for a kick from the guest when new
355 * new slots are ready for transmission.
358 /* Reenable notifications. */
359 ptring_kick_enable(ptring, 1);
361 ptnetmap_host_read_kring_csb(ptring, &shadow_ring, num_slots);
362 if (shadow_ring.head != kring->rhead) {
363 /* We won the race condition, there are more packets to
364 * transmit. Disable notifications and do another cycle */
365 ptring_kick_enable(ptring, 0);
371 if (nm_kr_txempty(kring)) {
372 /* No more available TX slots. We stop waiting for a notification
373 * from the backend (netmap_tx_irq). */
378 if (unlikely(ptns->stopped)) {
379 D("backend netmap is being stopped");
386 if (more_txspace && ptring_intr_enabled(ptring)) {
387 ptring_intr_enable(ptring, 0);
388 nm_os_kthread_send_irq(kth);
389 IFRATE(ptns->rate_ctx.new.htxk++);
394 * We need RX kicks from the guest when (tail == head-1), where we wait
395 * for the guest to refill.
399 ptnetmap_norxslots(struct netmap_kring *kring, uint32_t g_head)
401 return (NM_ACCESS_ONCE(kring->nr_hwtail) == nm_prev(g_head,
402 kring->nkr_num_slots - 1));
404 #endif /* !BUSY_WAIT */
406 /* Handle RX events: from the guest or from the backend */
408 ptnetmap_rx_handler(void *data)
410 struct netmap_kring *kring = data;
411 struct netmap_pt_host_adapter *pth_na =
412 (struct netmap_pt_host_adapter *)kring->na->na_private;
413 struct ptnetmap_state *ptns = pth_na->ptns;
414 struct ptnet_ring __user *ptring;
415 struct netmap_ring shadow_ring; /* shadow copy of the netmap_ring */
416 struct nm_kthread *kth;
419 bool some_recvd = false;
420 IFRATE(uint32_t pre_tail);
422 if (unlikely(!ptns || !ptns->pth_na)) {
423 D("ERROR ptnetmap state %p, ptnetmap host adapter %p", ptns,
424 ptns ? ptns->pth_na : NULL);
428 if (unlikely(ptns->stopped)) {
429 RD(1, "backend netmap is being stopped");
433 if (unlikely(nm_kr_tryget(kring, 1, NULL))) {
434 D("ERROR nm_kr_tryget()");
438 /* This is a guess, to be fixed in the rate callback. */
439 IFRATE(ptns->rate_ctx.new.grxk++);
441 /* Get RX ptring pointer from the CSB. */
442 ptring = ptns->ptrings + (pth_na->up.num_tx_rings + kring->ring_id);
443 kth = ptns->kthreads[pth_na->up.num_tx_rings + kring->ring_id];
445 num_slots = kring->nkr_num_slots;
446 shadow_ring.head = kring->rhead;
447 shadow_ring.cur = kring->rcur;
449 /* Disable notifications. */
450 ptring_kick_enable(ptring, 0);
451 /* Copy the guest kring pointers from the CSB */
452 ptnetmap_host_read_kring_csb(ptring, &shadow_ring, num_slots);
457 /* Netmap prologue */
458 shadow_ring.tail = kring->rtail;
459 if (unlikely(nm_rxsync_prologue(kring, &shadow_ring) >= num_slots)) {
460 /* Reinit ring and enable notifications. */
461 netmap_ring_reinit(kring);
462 ptring_kick_enable(ptring, 1);
466 if (unlikely(netmap_verbose & NM_VERB_RXSYNC)) {
467 ptnetmap_kring_dump("pre rxsync", kring);
470 IFRATE(pre_tail = kring->rtail);
471 if (unlikely(kring->nm_sync(kring, shadow_ring.flags))) {
472 /* Reenable notifications. */
473 ptring_kick_enable(ptring, 1);
479 * Copy host hwcur and hwtail into the CSB for the guest sync()
481 hwtail = NM_ACCESS_ONCE(kring->nr_hwtail);
482 ptnetmap_host_write_kring_csb(ptring, kring->nr_hwcur, hwtail);
483 if (kring->rtail != hwtail) {
484 kring->rtail = hwtail;
491 IFRATE(rate_batch_stats_update(&ptns->rate_ctx.new.rxbs, pre_tail,
492 kring->rtail, num_slots));
494 if (unlikely(netmap_verbose & NM_VERB_RXSYNC)) {
495 ptnetmap_kring_dump("post rxsync", kring);
499 /* Interrupt the guest if needed. */
500 if (some_recvd && ptring_intr_enabled(ptring)) {
501 /* Disable guest kick to avoid sending unnecessary kicks */
502 ptring_intr_enable(ptring, 0);
503 nm_os_kthread_send_irq(kth);
504 IFRATE(ptns->rate_ctx.new.hrxk++);
508 /* Read CSB to see if there is more work to do. */
509 ptnetmap_host_read_kring_csb(ptring, &shadow_ring, num_slots);
511 if (ptnetmap_norxslots(kring, shadow_ring.head)) {
513 * No more slots available for reception. We enable notification and
514 * go to sleep, waiting for a kick from the guest when new receive
515 * slots are available.
518 /* Reenable notifications. */
519 ptring_kick_enable(ptring, 1);
521 ptnetmap_host_read_kring_csb(ptring, &shadow_ring, num_slots);
522 if (!ptnetmap_norxslots(kring, shadow_ring.head)) {
523 /* We won the race condition, more slots are available. Disable
524 * notifications and do another cycle. */
525 ptring_kick_enable(ptring, 0);
531 hwtail = NM_ACCESS_ONCE(kring->nr_hwtail);
532 if (unlikely(hwtail == kring->rhead ||
533 dry_cycles >= PTN_RX_DRY_CYCLES_MAX)) {
534 /* No more packets to be read from the backend. We stop and
535 * wait for a notification from the backend (netmap_rx_irq). */
536 ND(1, "nr_hwtail: %d rhead: %d dry_cycles: %d",
537 hwtail, kring->rhead, dry_cycles);
541 if (unlikely(ptns->stopped)) {
542 D("backend netmap is being stopped");
549 /* Interrupt the guest if needed. */
550 if (some_recvd && ptring_intr_enabled(ptring)) {
551 ptring_intr_enable(ptring, 0);
552 nm_os_kthread_send_irq(kth);
553 IFRATE(ptns->rate_ctx.new.hrxk++);
557 #ifdef NETMAP_PT_DEBUG
559 ptnetmap_print_configuration(struct ptnetmap_cfg *cfg)
563 D("ptnetmap configuration:");
564 D(" CSB ptrings @%p, num_rings=%u, cfgtype %08x", cfg->ptrings,
565 cfg->num_rings, cfg->cfgtype);
566 for (k = 0; k < cfg->num_rings; k++) {
567 switch (cfg->cfgtype) {
568 case PTNETMAP_CFGTYPE_QEMU: {
569 struct ptnetmap_cfgentry_qemu *e =
570 (struct ptnetmap_cfgentry_qemu *)(cfg+1) + k;
571 D(" ring #%d: ioeventfd=%lu, irqfd=%lu", k,
572 (unsigned long)e->ioeventfd,
573 (unsigned long)e->irqfd);
577 case PTNETMAP_CFGTYPE_BHYVE:
579 struct ptnetmap_cfgentry_bhyve *e =
580 (struct ptnetmap_cfgentry_bhyve *)(cfg+1) + k;
581 D(" ring #%d: wchan=%lu, ioctl_fd=%lu, "
582 "ioctl_cmd=%lu, msix_msg_data=%lu, msix_addr=%lu",
583 k, (unsigned long)e->wchan,
584 (unsigned long)e->ioctl_fd,
585 (unsigned long)e->ioctl_cmd,
586 (unsigned long)e->ioctl_data.msg_data,
587 (unsigned long)e->ioctl_data.addr);
594 #endif /* NETMAP_PT_DEBUG */
596 /* Copy actual state of the host ring into the CSB for the guest init */
598 ptnetmap_kring_snapshot(struct netmap_kring *kring, struct ptnet_ring __user *ptring)
600 if(CSB_WRITE(ptring, head, kring->rhead))
602 if(CSB_WRITE(ptring, cur, kring->rcur))
605 if(CSB_WRITE(ptring, hwcur, kring->nr_hwcur))
607 if(CSB_WRITE(ptring, hwtail, NM_ACCESS_ONCE(kring->nr_hwtail)))
610 DBG(ptnetmap_kring_dump("ptnetmap_kring_snapshot", kring);)
617 static struct netmap_kring *
618 ptnetmap_kring(struct netmap_pt_host_adapter *pth_na, int k)
620 if (k < pth_na->up.num_tx_rings) {
621 return pth_na->up.tx_rings + k;
623 return pth_na->up.rx_rings + k - pth_na->up.num_tx_rings;
627 ptnetmap_krings_snapshot(struct netmap_pt_host_adapter *pth_na)
629 struct ptnetmap_state *ptns = pth_na->ptns;
630 struct netmap_kring *kring;
631 unsigned int num_rings;
634 num_rings = pth_na->up.num_tx_rings +
635 pth_na->up.num_rx_rings;
637 for (k = 0; k < num_rings; k++) {
638 kring = ptnetmap_kring(pth_na, k);
639 err |= ptnetmap_kring_snapshot(kring, ptns->ptrings + k);
646 * Functions to create, start and stop the kthreads
650 ptnetmap_create_kthreads(struct netmap_pt_host_adapter *pth_na,
651 struct ptnetmap_cfg *cfg)
653 struct ptnetmap_state *ptns = pth_na->ptns;
654 struct nm_kthread_cfg nmk_cfg;
655 unsigned int num_rings;
656 uint8_t *cfg_entries = (uint8_t *)(cfg + 1);
659 num_rings = pth_na->up.num_tx_rings +
660 pth_na->up.num_rx_rings;
662 for (k = 0; k < num_rings; k++) {
663 nmk_cfg.attach_user = 1; /* attach kthread to user process */
664 nmk_cfg.worker_private = ptnetmap_kring(pth_na, k);
666 if (k < pth_na->up.num_tx_rings) {
667 nmk_cfg.worker_fn = ptnetmap_tx_handler;
669 nmk_cfg.worker_fn = ptnetmap_rx_handler;
672 ptns->kthreads[k] = nm_os_kthread_create(&nmk_cfg,
673 cfg->cfgtype, cfg_entries + k * cfg->entry_size);
674 if (ptns->kthreads[k] == NULL) {
681 for (k = 0; k < num_rings; k++) {
682 if (ptns->kthreads[k]) {
683 nm_os_kthread_delete(ptns->kthreads[k]);
684 ptns->kthreads[k] = NULL;
691 ptnetmap_start_kthreads(struct netmap_pt_host_adapter *pth_na)
693 struct ptnetmap_state *ptns = pth_na->ptns;
699 D("BUG ptns is NULL");
703 ptns->stopped = false;
705 num_rings = ptns->pth_na->up.num_tx_rings +
706 ptns->pth_na->up.num_rx_rings;
707 for (k = 0; k < num_rings; k++) {
708 //nm_os_kthread_set_affinity(ptns->kthreads[k], xxx);
709 error = nm_os_kthread_start(ptns->kthreads[k]);
719 ptnetmap_stop_kthreads(struct netmap_pt_host_adapter *pth_na)
721 struct ptnetmap_state *ptns = pth_na->ptns;
730 ptns->stopped = true;
732 num_rings = ptns->pth_na->up.num_tx_rings +
733 ptns->pth_na->up.num_rx_rings;
734 for (k = 0; k < num_rings; k++) {
735 nm_os_kthread_stop(ptns->kthreads[k]);
739 static struct ptnetmap_cfg *
740 ptnetmap_read_cfg(struct nmreq *nmr)
742 uintptr_t *nmr_ptncfg = (uintptr_t *)&nmr->nr_arg1;
743 struct ptnetmap_cfg *cfg;
744 struct ptnetmap_cfg tmp;
747 if (copyin((const void *)*nmr_ptncfg, &tmp, sizeof(tmp))) {
748 D("Partial copyin() failed");
752 cfglen = sizeof(tmp) + tmp.num_rings * tmp.entry_size;
753 cfg = malloc(cfglen, M_DEVBUF, M_NOWAIT | M_ZERO);
758 if (copyin((const void *)*nmr_ptncfg, cfg, cfglen)) {
759 D("Full copyin() failed");
767 static int nm_unused_notify(struct netmap_kring *, int);
768 static int nm_pt_host_notify(struct netmap_kring *, int);
770 /* Create ptnetmap state and switch parent adapter to ptnetmap mode. */
772 ptnetmap_create(struct netmap_pt_host_adapter *pth_na,
773 struct ptnetmap_cfg *cfg)
775 struct ptnetmap_state *ptns;
776 unsigned int num_rings;
779 /* Check if ptnetmap state is already there. */
781 D("ERROR adapter %p already in ptnetmap mode", pth_na->parent);
785 num_rings = pth_na->up.num_tx_rings + pth_na->up.num_rx_rings;
787 if (num_rings != cfg->num_rings) {
788 D("ERROR configuration mismatch, expected %u rings, found %u",
789 num_rings, cfg->num_rings);
793 ptns = malloc(sizeof(*ptns) + num_rings * sizeof(*ptns->kthreads),
794 M_DEVBUF, M_NOWAIT | M_ZERO);
799 ptns->kthreads = (struct nm_kthread **)(ptns + 1);
800 ptns->stopped = true;
802 /* Cross-link data structures. */
804 ptns->pth_na = pth_na;
806 /* Store the CSB address provided by the hypervisor. */
807 ptns->ptrings = cfg->ptrings;
809 DBG(ptnetmap_print_configuration(cfg));
811 /* Create kthreads */
812 if ((ret = ptnetmap_create_kthreads(pth_na, cfg))) {
813 D("ERROR ptnetmap_create_kthreads()");
816 /* Copy krings state into the CSB for the guest initialization */
817 if ((ret = ptnetmap_krings_snapshot(pth_na))) {
818 D("ERROR ptnetmap_krings_snapshot()");
822 /* Overwrite parent nm_notify krings callback. */
823 pth_na->parent->na_private = pth_na;
824 pth_na->parent_nm_notify = pth_na->parent->nm_notify;
825 pth_na->parent->nm_notify = nm_unused_notify;
827 for (i = 0; i < pth_na->parent->num_rx_rings; i++) {
828 pth_na->up.rx_rings[i].save_notify =
829 pth_na->up.rx_rings[i].nm_notify;
830 pth_na->up.rx_rings[i].nm_notify = nm_pt_host_notify;
832 for (i = 0; i < pth_na->parent->num_tx_rings; i++) {
833 pth_na->up.tx_rings[i].save_notify =
834 pth_na->up.tx_rings[i].nm_notify;
835 pth_na->up.tx_rings[i].nm_notify = nm_pt_host_notify;
839 memset(&ptns->rate_ctx, 0, sizeof(ptns->rate_ctx));
840 setup_timer(&ptns->rate_ctx.timer, &rate_callback,
841 (unsigned long)&ptns->rate_ctx);
842 if (mod_timer(&ptns->rate_ctx.timer, jiffies + msecs_to_jiffies(1500)))
843 D("[ptn] Error: mod_timer()\n");
846 DBG(D("[%s] ptnetmap configuration DONE", pth_na->up.name));
852 free(ptns, M_DEVBUF);
856 /* Switch parent adapter back to normal mode and destroy
859 ptnetmap_delete(struct netmap_pt_host_adapter *pth_na)
861 struct ptnetmap_state *ptns = pth_na->ptns;
870 /* Restore parent adapter callbacks. */
871 pth_na->parent->nm_notify = pth_na->parent_nm_notify;
872 pth_na->parent->na_private = NULL;
874 for (i = 0; i < pth_na->parent->num_rx_rings; i++) {
875 pth_na->up.rx_rings[i].nm_notify =
876 pth_na->up.rx_rings[i].save_notify;
877 pth_na->up.rx_rings[i].save_notify = NULL;
879 for (i = 0; i < pth_na->parent->num_tx_rings; i++) {
880 pth_na->up.tx_rings[i].nm_notify =
881 pth_na->up.tx_rings[i].save_notify;
882 pth_na->up.tx_rings[i].save_notify = NULL;
885 /* Delete kthreads. */
886 num_rings = ptns->pth_na->up.num_tx_rings +
887 ptns->pth_na->up.num_rx_rings;
888 for (i = 0; i < num_rings; i++) {
889 nm_os_kthread_delete(ptns->kthreads[i]);
890 ptns->kthreads[i] = NULL;
893 IFRATE(del_timer(&ptns->rate_ctx.timer));
895 free(ptns, M_DEVBUF);
899 DBG(D("[%s] ptnetmap deleted", pth_na->up.name));
903 * Called by netmap_ioctl().
904 * Operation is indicated in nmr->nr_cmd.
906 * Called without NMG_LOCK.
909 ptnetmap_ctl(struct nmreq *nmr, struct netmap_adapter *na)
911 struct netmap_pt_host_adapter *pth_na;
912 struct ptnetmap_cfg *cfg;
919 DBG(D("name: %s", name));
921 if (!nm_ptnetmap_host_on(na)) {
922 D("ERROR Netmap adapter %p is not a ptnetmap host adapter", na);
926 pth_na = (struct netmap_pt_host_adapter *)na;
930 case NETMAP_PT_HOST_CREATE:
931 /* Read hypervisor configuration from userspace. */
932 cfg = ptnetmap_read_cfg(nmr);
935 /* Create ptnetmap state (kthreads, ...) and switch parent
936 * adapter to ptnetmap mode. */
937 error = ptnetmap_create(pth_na, cfg);
941 /* Start kthreads. */
942 error = ptnetmap_start_kthreads(pth_na);
944 ptnetmap_delete(pth_na);
947 case NETMAP_PT_HOST_DELETE:
949 ptnetmap_stop_kthreads(pth_na);
950 /* Switch parent adapter back to normal mode and destroy
951 * ptnetmap state (kthreads, ...). */
952 ptnetmap_delete(pth_na);
956 D("ERROR invalid cmd (nmr->nr_cmd) (0x%x)", cmd);
966 /* nm_notify callbacks for ptnetmap */
968 nm_pt_host_notify(struct netmap_kring *kring, int flags)
970 struct netmap_adapter *na = kring->na;
971 struct netmap_pt_host_adapter *pth_na =
972 (struct netmap_pt_host_adapter *)na->na_private;
973 struct ptnetmap_state *ptns;
976 /* First check that the passthrough port is not being destroyed. */
977 if (unlikely(!pth_na)) {
978 return NM_IRQ_COMPLETED;
982 if (unlikely(!ptns || ptns->stopped)) {
983 return NM_IRQ_COMPLETED;
988 /* Notify kthreads (wake up if needed) */
989 if (kring->tx == NR_TX) {
990 ND(1, "TX backend irq");
991 IFRATE(ptns->rate_ctx.new.btxwu++);
993 k += pth_na->up.num_tx_rings;
994 ND(1, "RX backend irq");
995 IFRATE(ptns->rate_ctx.new.brxwu++);
997 nm_os_kthread_wakeup_worker(ptns->kthreads[k]);
999 return NM_IRQ_COMPLETED;
1003 nm_unused_notify(struct netmap_kring *kring, int flags)
1005 D("BUG this should never be called");
1009 /* nm_config callback for bwrap */
1011 nm_pt_host_config(struct netmap_adapter *na, u_int *txr, u_int *txd,
1012 u_int *rxr, u_int *rxd)
1014 struct netmap_pt_host_adapter *pth_na =
1015 (struct netmap_pt_host_adapter *)na;
1016 struct netmap_adapter *parent = pth_na->parent;
1019 //XXX: maybe calling parent->nm_config is better
1021 /* forward the request */
1022 error = netmap_update_config(parent);
1024 *rxr = na->num_rx_rings = parent->num_rx_rings;
1025 *txr = na->num_tx_rings = parent->num_tx_rings;
1026 *txd = na->num_tx_desc = parent->num_tx_desc;
1027 *rxd = na->num_rx_desc = parent->num_rx_desc;
1029 DBG(D("rxr: %d txr: %d txd: %d rxd: %d", *rxr, *txr, *txd, *rxd));
1034 /* nm_krings_create callback for ptnetmap */
1036 nm_pt_host_krings_create(struct netmap_adapter *na)
1038 struct netmap_pt_host_adapter *pth_na =
1039 (struct netmap_pt_host_adapter *)na;
1040 struct netmap_adapter *parent = pth_na->parent;
1044 DBG(D("%s", pth_na->up.name));
1046 /* create the parent krings */
1047 error = parent->nm_krings_create(parent);
1052 /* A ptnetmap host adapter points the very same krings
1053 * as its parent adapter. These pointer are used in the
1054 * TX/RX worker functions. */
1055 na->tx_rings = parent->tx_rings;
1056 na->rx_rings = parent->rx_rings;
1057 na->tailroom = parent->tailroom;
1060 struct netmap_kring *kring;
1062 /* Parent's kring_create function will initialize
1063 * its own na->si. We have to init our na->si here. */
1064 nm_os_selinfo_init(&na->si[t]);
1066 /* Force the mem_rings_create() method to create the
1067 * host rings independently on what the regif asked for:
1068 * these rings are needed by the guest ptnetmap adapter
1070 kring = &NMR(na, t)[nma_get_nrings(na, t)];
1071 kring->nr_kflags |= NKR_NEEDRING;
1077 /* nm_krings_delete callback for ptnetmap */
1079 nm_pt_host_krings_delete(struct netmap_adapter *na)
1081 struct netmap_pt_host_adapter *pth_na =
1082 (struct netmap_pt_host_adapter *)na;
1083 struct netmap_adapter *parent = pth_na->parent;
1085 DBG(D("%s", pth_na->up.name));
1087 parent->nm_krings_delete(parent);
1089 na->tx_rings = na->rx_rings = na->tailroom = NULL;
1092 /* nm_register callback */
1094 nm_pt_host_register(struct netmap_adapter *na, int onoff)
1096 struct netmap_pt_host_adapter *pth_na =
1097 (struct netmap_pt_host_adapter *)na;
1098 struct netmap_adapter *parent = pth_na->parent;
1100 DBG(D("%s onoff %d", pth_na->up.name, onoff));
1103 /* netmap_do_regif has been called on the ptnetmap na.
1104 * We need to pass the information about the
1105 * memory allocator to the parent before
1106 * putting it in netmap mode
1108 parent->na_lut = na->na_lut;
1111 /* forward the request to the parent */
1112 error = parent->nm_register(parent, onoff);
1118 na->na_flags |= NAF_NETMAP_ON | NAF_PTNETMAP_HOST;
1120 ptnetmap_delete(pth_na);
1121 na->na_flags &= ~(NAF_NETMAP_ON | NAF_PTNETMAP_HOST);
1127 /* nm_dtor callback */
1129 nm_pt_host_dtor(struct netmap_adapter *na)
1131 struct netmap_pt_host_adapter *pth_na =
1132 (struct netmap_pt_host_adapter *)na;
1133 struct netmap_adapter *parent = pth_na->parent;
1135 DBG(D("%s", pth_na->up.name));
1137 /* The equivalent of NETMAP_PT_HOST_DELETE if the hypervisor
1139 ptnetmap_stop_kthreads(pth_na);
1140 ptnetmap_delete(pth_na);
1142 parent->na_flags &= ~NAF_BUSY;
1144 netmap_adapter_put(pth_na->parent);
1145 pth_na->parent = NULL;
1148 /* check if nmr is a request for a ptnetmap adapter that we can satisfy */
1150 netmap_get_pt_host_na(struct nmreq *nmr, struct netmap_adapter **na, int create)
1152 struct nmreq parent_nmr;
1153 struct netmap_adapter *parent; /* target adapter */
1154 struct netmap_pt_host_adapter *pth_na;
1155 struct ifnet *ifp = NULL;
1158 /* Check if it is a request for a ptnetmap adapter */
1159 if ((nmr->nr_flags & (NR_PTNETMAP_HOST)) == 0) {
1163 D("Requesting a ptnetmap host adapter");
1165 pth_na = malloc(sizeof(*pth_na), M_DEVBUF, M_NOWAIT | M_ZERO);
1166 if (pth_na == NULL) {
1171 /* first, try to find the adapter that we want to passthrough
1172 * We use the same nmr, after we have turned off the ptnetmap flag.
1173 * In this way we can potentially passthrough everything netmap understands.
1175 memcpy(&parent_nmr, nmr, sizeof(parent_nmr));
1176 parent_nmr.nr_flags &= ~(NR_PTNETMAP_HOST);
1177 error = netmap_get_na(&parent_nmr, &parent, &ifp, create);
1179 D("parent lookup failed: %d", error);
1180 goto put_out_noputparent;
1182 DBG(D("found parent: %s", parent->name));
1184 /* make sure the interface is not already in use */
1185 if (NETMAP_OWNED_BY_ANY(parent)) {
1186 D("NIC %s busy, cannot ptnetmap", parent->name);
1191 pth_na->parent = parent;
1193 /* Follow netmap_attach()-like operations for the host
1194 * ptnetmap adapter. */
1196 //XXX pth_na->up.na_flags = parent->na_flags;
1197 pth_na->up.num_rx_rings = parent->num_rx_rings;
1198 pth_na->up.num_tx_rings = parent->num_tx_rings;
1199 pth_na->up.num_tx_desc = parent->num_tx_desc;
1200 pth_na->up.num_rx_desc = parent->num_rx_desc;
1202 pth_na->up.nm_dtor = nm_pt_host_dtor;
1203 pth_na->up.nm_register = nm_pt_host_register;
1205 /* Reuse parent's adapter txsync and rxsync methods. */
1206 pth_na->up.nm_txsync = parent->nm_txsync;
1207 pth_na->up.nm_rxsync = parent->nm_rxsync;
1209 pth_na->up.nm_krings_create = nm_pt_host_krings_create;
1210 pth_na->up.nm_krings_delete = nm_pt_host_krings_delete;
1211 pth_na->up.nm_config = nm_pt_host_config;
1213 /* Set the notify method only or convenience, it will never
1214 * be used, since - differently from default krings_create - we
1215 * ptnetmap krings_create callback inits kring->nm_notify
1217 pth_na->up.nm_notify = nm_unused_notify;
1219 pth_na->up.nm_mem = parent->nm_mem;
1221 pth_na->up.na_flags |= NAF_HOST_RINGS;
1223 error = netmap_attach_common(&pth_na->up);
1225 D("ERROR netmap_attach_common()");
1230 netmap_adapter_get(*na);
1232 /* set parent busy, because attached for ptnetmap */
1233 parent->na_flags |= NAF_BUSY;
1235 strncpy(pth_na->up.name, parent->name, sizeof(pth_na->up.name));
1236 strcat(pth_na->up.name, "-PTN");
1238 DBG(D("%s ptnetmap request DONE", pth_na->up.name));
1240 /* drop the reference to the ifp, if any */
1247 netmap_adapter_put(parent);
1250 put_out_noputparent:
1251 free(pth_na, M_DEVBUF);
1254 #endif /* WITH_PTNETMAP_HOST */
1256 #ifdef WITH_PTNETMAP_GUEST
1258 * Guest ptnetmap txsync()/rxsync() routines, used in ptnet device drivers.
1259 * These routines are reused across the different operating systems supported
1264 * Reconcile host and guest views of the transmit ring.
1266 * Guest user wants to transmit packets up to the one before ring->head,
1267 * and guest kernel knows tx_ring->hwcur is the first packet unsent
1268 * by the host kernel.
1270 * We push out as many packets as possible, and possibly
1271 * reclaim buffers from previously completed transmission.
1273 * Notifications from the host are enabled only if the user guest would
1274 * block (no space in the ring).
1277 netmap_pt_guest_txsync(struct ptnet_ring *ptring, struct netmap_kring *kring,
1280 bool notify = false;
1282 /* Disable notifications */
1283 ptring->guest_need_kick = 0;
1286 * First part: tell the host (updating the CSB) to process the new
1289 kring->nr_hwcur = ptring->hwcur;
1290 ptnetmap_guest_write_kring_csb(ptring, kring->rcur, kring->rhead);
1292 /* Ask for a kick from a guest to the host if needed. */
1293 if ((kring->rhead != kring->nr_hwcur &&
1294 NM_ACCESS_ONCE(ptring->host_need_kick)) ||
1295 (flags & NAF_FORCE_RECLAIM)) {
1296 ptring->sync_flags = flags;
1301 * Second part: reclaim buffers for completed transmissions.
1303 if (nm_kr_txempty(kring) || (flags & NAF_FORCE_RECLAIM)) {
1304 ptnetmap_guest_read_kring_csb(ptring, kring);
1308 * No more room in the ring for new transmissions. The user thread will
1309 * go to sleep and we need to be notified by the host when more free
1310 * space is available.
1312 if (nm_kr_txempty(kring)) {
1313 /* Reenable notifications. */
1314 ptring->guest_need_kick = 1;
1316 ptnetmap_guest_read_kring_csb(ptring, kring);
1317 /* If there is new free space, disable notifications */
1318 if (unlikely(!nm_kr_txempty(kring))) {
1319 ptring->guest_need_kick = 0;
1323 ND(1, "TX - CSB: head:%u cur:%u hwtail:%u - KRING: head:%u cur:%u tail: %u",
1324 ptring->head, ptring->cur, ptring->hwtail,
1325 kring->rhead, kring->rcur, kring->nr_hwtail);
1331 * Reconcile host and guest view of the receive ring.
1333 * Update hwcur/hwtail from host (reading from CSB).
1335 * If guest user has released buffers up to the one before ring->head, we
1336 * also give them to the host.
1338 * Notifications from the host are enabled only if the user guest would
1339 * block (no more completed slots in the ring).
1342 netmap_pt_guest_rxsync(struct ptnet_ring *ptring, struct netmap_kring *kring,
1345 bool notify = false;
1347 /* Disable notifications */
1348 ptring->guest_need_kick = 0;
1351 * First part: import newly received packets, by updating the kring
1352 * hwtail to the hwtail known from the host (read from the CSB).
1353 * This also updates the kring hwcur.
1355 ptnetmap_guest_read_kring_csb(ptring, kring);
1356 kring->nr_kflags &= ~NKR_PENDINTR;
1359 * Second part: tell the host about the slots that guest user has
1360 * released, by updating cur and head in the CSB.
1362 if (kring->rhead != kring->nr_hwcur) {
1363 ptnetmap_guest_write_kring_csb(ptring, kring->rcur,
1365 /* Ask for a kick from the guest to the host if needed. */
1366 if (NM_ACCESS_ONCE(ptring->host_need_kick)) {
1367 ptring->sync_flags = flags;
1373 * No more completed RX slots. The user thread will go to sleep and
1374 * we need to be notified by the host when more RX slots have been
1377 if (nm_kr_rxempty(kring)) {
1378 /* Reenable notifications. */
1379 ptring->guest_need_kick = 1;
1381 ptnetmap_guest_read_kring_csb(ptring, kring);
1382 /* If there are new slots, disable notifications. */
1383 if (!nm_kr_rxempty(kring)) {
1384 ptring->guest_need_kick = 0;
1388 ND(1, "RX - CSB: head:%u cur:%u hwtail:%u - KRING: head:%u cur:%u",
1389 ptring->head, ptring->cur, ptring->hwtail,
1390 kring->rhead, kring->rcur);
1396 * Callbacks for ptnet drivers: nm_krings_create, nm_krings_delete, nm_dtor.
1399 ptnet_nm_krings_create(struct netmap_adapter *na)
1401 struct netmap_pt_guest_adapter *ptna =
1402 (struct netmap_pt_guest_adapter *)na; /* Upcast. */
1403 struct netmap_adapter *na_nm = &ptna->hwup.up;
1404 struct netmap_adapter *na_dr = &ptna->dr.up;
1407 if (ptna->backend_regifs) {
1411 /* Create krings on the public netmap adapter. */
1412 ret = netmap_hw_krings_create(na_nm);
1417 /* Copy krings into the netmap adapter private to the driver. */
1418 na_dr->tx_rings = na_nm->tx_rings;
1419 na_dr->rx_rings = na_nm->rx_rings;
1425 ptnet_nm_krings_delete(struct netmap_adapter *na)
1427 struct netmap_pt_guest_adapter *ptna =
1428 (struct netmap_pt_guest_adapter *)na; /* Upcast. */
1429 struct netmap_adapter *na_nm = &ptna->hwup.up;
1430 struct netmap_adapter *na_dr = &ptna->dr.up;
1432 if (ptna->backend_regifs) {
1436 na_dr->tx_rings = NULL;
1437 na_dr->rx_rings = NULL;
1439 netmap_hw_krings_delete(na_nm);
1443 ptnet_nm_dtor(struct netmap_adapter *na)
1445 struct netmap_pt_guest_adapter *ptna =
1446 (struct netmap_pt_guest_adapter *)na;
1448 netmap_mem_put(ptna->dr.up.nm_mem);
1449 memset(&ptna->dr, 0, sizeof(ptna->dr));
1450 netmap_mem_pt_guest_ifp_del(na->nm_mem, na->ifp);
1453 #endif /* WITH_PTNETMAP_GUEST */