2 * Copyright (C) 2015 Stefano Garzarella
3 * Copyright (C) 2016 Vincenzo Maffione
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #if defined(__FreeBSD__)
34 #include <sys/cdefs.h>
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/types.h>
38 #include <sys/selinfo.h>
39 #include <sys/socket.h>
41 #include <net/if_var.h>
42 #include <machine/bus.h>
44 //#define usleep_range(_1, _2)
45 #define usleep_range(_1, _2) \
46 pause_sbt("ptnetmap-sleep", SBT_1US * _1, SBT_1US * 1, C_ABSOLUTE)
52 #include <net/netmap.h>
53 #include <dev/netmap/netmap_kern.h>
54 #include <net/netmap_virt.h>
55 #include <dev/netmap/netmap_mem2.h>
57 #ifdef WITH_PTNETMAP_HOST
59 /* RX cycle without receive any packets */
60 #define PTN_RX_DRY_CYCLES_MAX 10
62 /* Limit Batch TX to half ring.
63 * Currently disabled, since it does not manage NS_MOREFRAG, which
64 * results in random drops in the VALE txsync. */
65 //#define PTN_TX_BATCH_LIM(_n) ((_n >> 1))
69 #define NETMAP_PT_DEBUG /* Enables communication debugging. */
70 #ifdef NETMAP_PT_DEBUG
78 //#define RATE /* Enables communication statistics. */
81 struct rate_batch_stats {
83 unsigned long sync_dry;
88 unsigned long gtxk; /* Guest --> Host Tx kicks. */
89 unsigned long grxk; /* Guest --> Host Rx kicks. */
90 unsigned long htxk; /* Host --> Guest Tx kicks. */
91 unsigned long hrxk; /* Host --> Guest Rx Kicks. */
92 unsigned long btxwu; /* Backend Tx wake-up. */
93 unsigned long brxwu; /* Backend Rx wake-up. */
94 struct rate_batch_stats txbs;
95 struct rate_batch_stats rxbs;
99 struct timer_list timer;
100 struct rate_stats new;
101 struct rate_stats old;
104 #define RATE_PERIOD 2
106 rate_callback(unsigned long arg)
108 struct rate_context * ctx = (struct rate_context *)arg;
109 struct rate_stats cur = ctx->new;
110 struct rate_batch_stats *txbs = &cur.txbs;
111 struct rate_batch_stats *rxbs = &cur.rxbs;
112 struct rate_batch_stats *txbs_old = &ctx->old.txbs;
113 struct rate_batch_stats *rxbs_old = &ctx->old.rxbs;
114 uint64_t tx_batch, rx_batch;
115 unsigned long txpkts, rxpkts;
116 unsigned long gtxk, grxk;
119 txpkts = txbs->pkt - txbs_old->pkt;
120 rxpkts = rxbs->pkt - rxbs_old->pkt;
122 tx_batch = ((txbs->sync - txbs_old->sync) > 0) ?
123 txpkts / (txbs->sync - txbs_old->sync): 0;
124 rx_batch = ((rxbs->sync - rxbs_old->sync) > 0) ?
125 rxpkts / (rxbs->sync - rxbs_old->sync): 0;
127 /* Fix-up gtxk and grxk estimates. */
128 gtxk = (cur.gtxk - ctx->old.gtxk) - (cur.btxwu - ctx->old.btxwu);
129 grxk = (cur.grxk - ctx->old.grxk) - (cur.brxwu - ctx->old.brxwu);
131 printk("txpkts = %lu Hz\n", txpkts/RATE_PERIOD);
132 printk("gtxk = %lu Hz\n", gtxk/RATE_PERIOD);
133 printk("htxk = %lu Hz\n", (cur.htxk - ctx->old.htxk)/RATE_PERIOD);
134 printk("btxw = %lu Hz\n", (cur.btxwu - ctx->old.btxwu)/RATE_PERIOD);
135 printk("rxpkts = %lu Hz\n", rxpkts/RATE_PERIOD);
136 printk("grxk = %lu Hz\n", grxk/RATE_PERIOD);
137 printk("hrxk = %lu Hz\n", (cur.hrxk - ctx->old.hrxk)/RATE_PERIOD);
138 printk("brxw = %lu Hz\n", (cur.brxwu - ctx->old.brxwu)/RATE_PERIOD);
139 printk("txbatch = %llu avg\n", tx_batch);
140 printk("rxbatch = %llu avg\n", rx_batch);
144 r = mod_timer(&ctx->timer, jiffies +
145 msecs_to_jiffies(RATE_PERIOD * 1000));
147 D("[ptnetmap] Error: mod_timer()\n");
151 rate_batch_stats_update(struct rate_batch_stats *bf, uint32_t pre_tail,
152 uint32_t act_tail, uint32_t num_slots)
154 int n = (int)act_tail - pre_tail;
171 struct ptnetmap_state {
173 struct nm_kctx **kctxs;
175 /* Shared memory with the guest (TX/RX) */
176 struct ptnet_csb_gh __user *csb_gh;
177 struct ptnet_csb_hg __user *csb_hg;
181 /* Netmap adapter wrapping the backend. */
182 struct netmap_pt_host_adapter *pth_na;
184 IFRATE(struct rate_context rate_ctx;)
188 ptnetmap_kring_dump(const char *title, const struct netmap_kring *kring)
190 D("%s - name: %s hwcur: %d hwtail: %d rhead: %d rcur: %d"
191 " rtail: %d head: %d cur: %d tail: %d",
192 title, kring->name, kring->nr_hwcur,
193 kring->nr_hwtail, kring->rhead, kring->rcur, kring->rtail,
194 kring->ring->head, kring->ring->cur, kring->ring->tail);
198 * TX functions to set/get and to handle host/guest kick.
202 /* Enable or disable guest --> host kicks. */
204 pthg_kick_enable(struct ptnet_csb_hg __user *pthg, uint32_t val)
206 CSB_WRITE(pthg, host_need_kick, val);
209 /* Are guest interrupt enabled or disabled? */
210 static inline uint32_t
211 ptgh_intr_enabled(struct ptnet_csb_gh __user *ptgh)
215 CSB_READ(ptgh, guest_need_kick, v);
220 /* Handle TX events: from the guest or from the backend */
222 ptnetmap_tx_handler(void *data, int is_kthread)
224 struct netmap_kring *kring = data;
225 struct netmap_pt_host_adapter *pth_na =
226 (struct netmap_pt_host_adapter *)kring->na->na_private;
227 struct ptnetmap_state *ptns = pth_na->ptns;
228 struct ptnet_csb_gh __user *ptgh;
229 struct ptnet_csb_hg __user *pthg;
230 struct netmap_ring shadow_ring; /* shadow copy of the netmap_ring */
231 bool more_txspace = false;
235 IFRATE(uint32_t pre_tail);
237 if (unlikely(!ptns)) {
238 D("ERROR ptnetmap state is NULL");
242 if (unlikely(ptns->stopped)) {
243 RD(1, "backend netmap is being stopped");
247 if (unlikely(nm_kr_tryget(kring, 1, NULL))) {
248 D("ERROR nm_kr_tryget()");
252 /* This is a guess, to be fixed in the rate callback. */
253 IFRATE(ptns->rate_ctx.new.gtxk++);
255 /* Get TX ptgh/pthg pointer from the CSB. */
256 ptgh = ptns->csb_gh + kring->ring_id;
257 pthg = ptns->csb_hg + kring->ring_id;
258 kth = ptns->kctxs[kring->ring_id];
260 num_slots = kring->nkr_num_slots;
262 /* Disable guest --> host notifications. */
263 pthg_kick_enable(pthg, 0);
264 /* Copy the guest kring pointers from the CSB */
265 ptnetmap_host_read_kring_csb(ptgh, &shadow_ring, num_slots);
268 /* If guest moves ahead too fast, let's cut the move so
269 * that we don't exceed our batch limit. */
270 batch = shadow_ring.head - kring->nr_hwcur;
274 #ifdef PTN_TX_BATCH_LIM
275 if (batch > PTN_TX_BATCH_LIM(num_slots)) {
276 uint32_t head_lim = kring->nr_hwcur + PTN_TX_BATCH_LIM(num_slots);
278 if (head_lim >= num_slots)
279 head_lim -= num_slots;
280 ND(1, "batch: %d head: %d head_lim: %d", batch, shadow_ring.head,
282 shadow_ring.head = head_lim;
283 batch = PTN_TX_BATCH_LIM(num_slots);
285 #endif /* PTN_TX_BATCH_LIM */
287 if (nm_kr_txspace(kring) <= (num_slots >> 1)) {
288 shadow_ring.flags |= NAF_FORCE_RECLAIM;
291 /* Netmap prologue */
292 shadow_ring.tail = kring->rtail;
293 if (unlikely(nm_txsync_prologue(kring, &shadow_ring) >= num_slots)) {
294 /* Reinit ring and enable notifications. */
295 netmap_ring_reinit(kring);
296 pthg_kick_enable(pthg, 1);
300 if (unlikely(netmap_verbose & NM_VERB_TXSYNC)) {
301 ptnetmap_kring_dump("pre txsync", kring);
304 IFRATE(pre_tail = kring->rtail);
305 if (unlikely(kring->nm_sync(kring, shadow_ring.flags))) {
306 /* Reenable notifications. */
307 pthg_kick_enable(pthg, 1);
314 * Copy host hwcur and hwtail into the CSB for the guest sync(), and
315 * do the nm_sync_finalize.
317 ptnetmap_host_write_kring_csb(pthg, kring->nr_hwcur,
319 if (kring->rtail != kring->nr_hwtail) {
320 /* Some more room available in the parent adapter. */
321 kring->rtail = kring->nr_hwtail;
325 IFRATE(rate_batch_stats_update(&ptns->rate_ctx.new.txbs, pre_tail,
326 kring->rtail, num_slots));
328 if (unlikely(netmap_verbose & NM_VERB_TXSYNC)) {
329 ptnetmap_kring_dump("post txsync", kring);
333 /* Interrupt the guest if needed. */
334 if (more_txspace && ptgh_intr_enabled(ptgh) && is_kthread) {
335 /* Disable guest kick to avoid sending unnecessary kicks */
336 nm_os_kctx_send_irq(kth);
337 IFRATE(ptns->rate_ctx.new.htxk++);
338 more_txspace = false;
341 /* Read CSB to see if there is more work to do. */
342 ptnetmap_host_read_kring_csb(ptgh, &shadow_ring, num_slots);
344 if (shadow_ring.head == kring->rhead) {
346 * No more packets to transmit. We enable notifications and
347 * go to sleep, waiting for a kick from the guest when new
348 * new slots are ready for transmission.
353 /* Reenable notifications. */
354 pthg_kick_enable(pthg, 1);
356 ptnetmap_host_read_kring_csb(ptgh, &shadow_ring, num_slots);
357 if (shadow_ring.head != kring->rhead) {
358 /* We won the race condition, there are more packets to
359 * transmit. Disable notifications and do another cycle */
360 pthg_kick_enable(pthg, 0);
366 if (nm_kr_txempty(kring)) {
367 /* No more available TX slots. We stop waiting for a notification
368 * from the backend (netmap_tx_irq). */
373 if (unlikely(ptns->stopped)) {
374 D("backend netmap is being stopped");
381 if (more_txspace && ptgh_intr_enabled(ptgh) && is_kthread) {
382 nm_os_kctx_send_irq(kth);
383 IFRATE(ptns->rate_ctx.new.htxk++);
387 /* Called on backend nm_notify when there is no worker thread. */
389 ptnetmap_tx_nothread_notify(void *data)
391 struct netmap_kring *kring = data;
392 struct netmap_pt_host_adapter *pth_na =
393 (struct netmap_pt_host_adapter *)kring->na->na_private;
394 struct ptnetmap_state *ptns = pth_na->ptns;
396 if (unlikely(!ptns)) {
397 D("ERROR ptnetmap state is NULL");
401 if (unlikely(ptns->stopped)) {
402 D("backend netmap is being stopped");
406 /* We cannot access the CSB here (to check ptgh->guest_need_kick),
407 * unless we switch address space to the one of the guest. For now
408 * we unconditionally inject an interrupt. */
409 nm_os_kctx_send_irq(ptns->kctxs[kring->ring_id]);
410 IFRATE(ptns->rate_ctx.new.htxk++);
411 ND(1, "%s interrupt", kring->name);
415 * We need RX kicks from the guest when (tail == head-1), where we wait
416 * for the guest to refill.
420 ptnetmap_norxslots(struct netmap_kring *kring, uint32_t g_head)
422 return (NM_ACCESS_ONCE(kring->nr_hwtail) == nm_prev(g_head,
423 kring->nkr_num_slots - 1));
425 #endif /* !BUSY_WAIT */
427 /* Handle RX events: from the guest or from the backend */
429 ptnetmap_rx_handler(void *data, int is_kthread)
431 struct netmap_kring *kring = data;
432 struct netmap_pt_host_adapter *pth_na =
433 (struct netmap_pt_host_adapter *)kring->na->na_private;
434 struct ptnetmap_state *ptns = pth_na->ptns;
435 struct ptnet_csb_gh __user *ptgh;
436 struct ptnet_csb_hg __user *pthg;
437 struct netmap_ring shadow_ring; /* shadow copy of the netmap_ring */
441 bool some_recvd = false;
442 IFRATE(uint32_t pre_tail);
444 if (unlikely(!ptns || !ptns->pth_na)) {
445 D("ERROR ptnetmap state %p, ptnetmap host adapter %p", ptns,
446 ptns ? ptns->pth_na : NULL);
450 if (unlikely(ptns->stopped)) {
451 RD(1, "backend netmap is being stopped");
455 if (unlikely(nm_kr_tryget(kring, 1, NULL))) {
456 D("ERROR nm_kr_tryget()");
460 /* This is a guess, to be fixed in the rate callback. */
461 IFRATE(ptns->rate_ctx.new.grxk++);
463 /* Get RX ptgh and pthg pointers from the CSB. */
464 ptgh = ptns->csb_gh + (pth_na->up.num_tx_rings + kring->ring_id);
465 pthg = ptns->csb_hg + (pth_na->up.num_tx_rings + kring->ring_id);
466 kth = ptns->kctxs[pth_na->up.num_tx_rings + kring->ring_id];
468 num_slots = kring->nkr_num_slots;
470 /* Disable notifications. */
471 pthg_kick_enable(pthg, 0);
472 /* Copy the guest kring pointers from the CSB */
473 ptnetmap_host_read_kring_csb(ptgh, &shadow_ring, num_slots);
478 /* Netmap prologue */
479 shadow_ring.tail = kring->rtail;
480 if (unlikely(nm_rxsync_prologue(kring, &shadow_ring) >= num_slots)) {
481 /* Reinit ring and enable notifications. */
482 netmap_ring_reinit(kring);
483 pthg_kick_enable(pthg, 1);
487 if (unlikely(netmap_verbose & NM_VERB_RXSYNC)) {
488 ptnetmap_kring_dump("pre rxsync", kring);
491 IFRATE(pre_tail = kring->rtail);
492 if (unlikely(kring->nm_sync(kring, shadow_ring.flags))) {
493 /* Reenable notifications. */
494 pthg_kick_enable(pthg, 1);
500 * Copy host hwcur and hwtail into the CSB for the guest sync()
502 hwtail = NM_ACCESS_ONCE(kring->nr_hwtail);
503 ptnetmap_host_write_kring_csb(pthg, kring->nr_hwcur, hwtail);
504 if (kring->rtail != hwtail) {
505 kring->rtail = hwtail;
512 IFRATE(rate_batch_stats_update(&ptns->rate_ctx.new.rxbs, pre_tail,
513 kring->rtail, num_slots));
515 if (unlikely(netmap_verbose & NM_VERB_RXSYNC)) {
516 ptnetmap_kring_dump("post rxsync", kring);
520 /* Interrupt the guest if needed. */
521 if (some_recvd && ptgh_intr_enabled(ptgh)) {
522 /* Disable guest kick to avoid sending unnecessary kicks */
523 nm_os_kctx_send_irq(kth);
524 IFRATE(ptns->rate_ctx.new.hrxk++);
528 /* Read CSB to see if there is more work to do. */
529 ptnetmap_host_read_kring_csb(ptgh, &shadow_ring, num_slots);
531 if (ptnetmap_norxslots(kring, shadow_ring.head)) {
533 * No more slots available for reception. We enable notification and
534 * go to sleep, waiting for a kick from the guest when new receive
535 * slots are available.
538 /* Reenable notifications. */
539 pthg_kick_enable(pthg, 1);
541 ptnetmap_host_read_kring_csb(ptgh, &shadow_ring, num_slots);
542 if (!ptnetmap_norxslots(kring, shadow_ring.head)) {
543 /* We won the race condition, more slots are available. Disable
544 * notifications and do another cycle. */
545 pthg_kick_enable(pthg, 0);
551 hwtail = NM_ACCESS_ONCE(kring->nr_hwtail);
552 if (unlikely(hwtail == kring->rhead ||
553 dry_cycles >= PTN_RX_DRY_CYCLES_MAX)) {
554 /* No more packets to be read from the backend. We stop and
555 * wait for a notification from the backend (netmap_rx_irq). */
556 ND(1, "nr_hwtail: %d rhead: %d dry_cycles: %d",
557 hwtail, kring->rhead, dry_cycles);
561 if (unlikely(ptns->stopped)) {
562 D("backend netmap is being stopped");
569 /* Interrupt the guest if needed. */
570 if (some_recvd && ptgh_intr_enabled(ptgh)) {
571 nm_os_kctx_send_irq(kth);
572 IFRATE(ptns->rate_ctx.new.hrxk++);
576 #ifdef NETMAP_PT_DEBUG
578 ptnetmap_print_configuration(struct ptnetmap_cfg *cfg)
582 D("ptnetmap configuration:");
583 D(" CSB @%p@:%p, num_rings=%u, cfgtype %08x", cfg->csb_gh,
584 cfg->csb_hg, cfg->num_rings, cfg->cfgtype);
585 for (k = 0; k < cfg->num_rings; k++) {
586 switch (cfg->cfgtype) {
587 case PTNETMAP_CFGTYPE_QEMU: {
588 struct ptnetmap_cfgentry_qemu *e =
589 (struct ptnetmap_cfgentry_qemu *)(cfg+1) + k;
590 D(" ring #%d: ioeventfd=%lu, irqfd=%lu", k,
591 (unsigned long)e->ioeventfd,
592 (unsigned long)e->irqfd);
596 case PTNETMAP_CFGTYPE_BHYVE:
598 struct ptnetmap_cfgentry_bhyve *e =
599 (struct ptnetmap_cfgentry_bhyve *)(cfg+1) + k;
600 D(" ring #%d: wchan=%lu, ioctl_fd=%lu, "
601 "ioctl_cmd=%lu, msix_msg_data=%lu, msix_addr=%lu",
602 k, (unsigned long)e->wchan,
603 (unsigned long)e->ioctl_fd,
604 (unsigned long)e->ioctl_cmd,
605 (unsigned long)e->ioctl_data.msg_data,
606 (unsigned long)e->ioctl_data.addr);
613 #endif /* NETMAP_PT_DEBUG */
615 /* Copy actual state of the host ring into the CSB for the guest init */
617 ptnetmap_kring_snapshot(struct netmap_kring *kring,
618 struct ptnet_csb_gh __user *ptgh,
619 struct ptnet_csb_hg __user *pthg)
621 if (CSB_WRITE(ptgh, head, kring->rhead))
623 if (CSB_WRITE(ptgh, cur, kring->rcur))
626 if (CSB_WRITE(pthg, hwcur, kring->nr_hwcur))
628 if (CSB_WRITE(pthg, hwtail, NM_ACCESS_ONCE(kring->nr_hwtail)))
631 DBG(ptnetmap_kring_dump("ptnetmap_kring_snapshot", kring);)
638 static struct netmap_kring *
639 ptnetmap_kring(struct netmap_pt_host_adapter *pth_na, int k)
641 if (k < pth_na->up.num_tx_rings) {
642 return pth_na->up.tx_rings[k];
644 return pth_na->up.rx_rings[k - pth_na->up.num_tx_rings];
648 ptnetmap_krings_snapshot(struct netmap_pt_host_adapter *pth_na)
650 struct ptnetmap_state *ptns = pth_na->ptns;
651 struct netmap_kring *kring;
652 unsigned int num_rings;
655 num_rings = pth_na->up.num_tx_rings +
656 pth_na->up.num_rx_rings;
658 for (k = 0; k < num_rings; k++) {
659 kring = ptnetmap_kring(pth_na, k);
660 err |= ptnetmap_kring_snapshot(kring, ptns->csb_gh + k,
668 * Functions to create kernel contexts, and start/stop the workers.
672 ptnetmap_create_kctxs(struct netmap_pt_host_adapter *pth_na,
673 struct ptnetmap_cfg *cfg, int use_tx_kthreads)
675 struct ptnetmap_state *ptns = pth_na->ptns;
676 struct nm_kctx_cfg nmk_cfg;
677 unsigned int num_rings;
678 uint8_t *cfg_entries = (uint8_t *)(cfg + 1);
679 unsigned int expected_cfgtype = 0;
682 #if defined(__FreeBSD__)
683 expected_cfgtype = PTNETMAP_CFGTYPE_BHYVE;
685 expected_cfgtype = PTNETMAP_CFGTYPE_QEMU;
687 if (cfg->cfgtype != expected_cfgtype) {
688 D("Unsupported cfgtype %u", cfg->cfgtype);
692 num_rings = pth_na->up.num_tx_rings +
693 pth_na->up.num_rx_rings;
695 for (k = 0; k < num_rings; k++) {
696 nmk_cfg.attach_user = 1; /* attach kthread to user process */
697 nmk_cfg.worker_private = ptnetmap_kring(pth_na, k);
699 if (k < pth_na->up.num_tx_rings) {
700 nmk_cfg.worker_fn = ptnetmap_tx_handler;
701 nmk_cfg.use_kthread = use_tx_kthreads;
702 nmk_cfg.notify_fn = ptnetmap_tx_nothread_notify;
704 nmk_cfg.worker_fn = ptnetmap_rx_handler;
705 nmk_cfg.use_kthread = 1;
708 ptns->kctxs[k] = nm_os_kctx_create(&nmk_cfg,
709 cfg_entries + k * cfg->entry_size);
710 if (ptns->kctxs[k] == NULL) {
717 for (k = 0; k < num_rings; k++) {
718 if (ptns->kctxs[k]) {
719 nm_os_kctx_destroy(ptns->kctxs[k]);
720 ptns->kctxs[k] = NULL;
727 ptnetmap_start_kctx_workers(struct netmap_pt_host_adapter *pth_na)
729 struct ptnetmap_state *ptns = pth_na->ptns;
735 D("BUG ptns is NULL");
739 ptns->stopped = false;
741 num_rings = ptns->pth_na->up.num_tx_rings +
742 ptns->pth_na->up.num_rx_rings;
743 for (k = 0; k < num_rings; k++) {
744 //nm_os_kctx_worker_setaff(ptns->kctxs[k], xxx);
745 error = nm_os_kctx_worker_start(ptns->kctxs[k]);
755 ptnetmap_stop_kctx_workers(struct netmap_pt_host_adapter *pth_na)
757 struct ptnetmap_state *ptns = pth_na->ptns;
766 ptns->stopped = true;
768 num_rings = ptns->pth_na->up.num_tx_rings +
769 ptns->pth_na->up.num_rx_rings;
770 for (k = 0; k < num_rings; k++) {
771 nm_os_kctx_worker_stop(ptns->kctxs[k]);
775 static int nm_unused_notify(struct netmap_kring *, int);
776 static int nm_pt_host_notify(struct netmap_kring *, int);
778 /* Create ptnetmap state and switch parent adapter to ptnetmap mode. */
780 ptnetmap_create(struct netmap_pt_host_adapter *pth_na,
781 struct ptnetmap_cfg *cfg)
783 int use_tx_kthreads = ptnetmap_tx_workers; /* snapshot */
784 struct ptnetmap_state *ptns;
785 unsigned int num_rings;
788 /* Check if ptnetmap state is already there. */
790 D("ERROR adapter %p already in ptnetmap mode", pth_na->parent);
794 num_rings = pth_na->up.num_tx_rings + pth_na->up.num_rx_rings;
796 if (num_rings != cfg->num_rings) {
797 D("ERROR configuration mismatch, expected %u rings, found %u",
798 num_rings, cfg->num_rings);
802 if (!use_tx_kthreads && na_is_generic(pth_na->parent)) {
803 D("ERROR ptnetmap direct transmission not supported with "
804 "passed-through emulated adapters");
808 ptns = nm_os_malloc(sizeof(*ptns) + num_rings * sizeof(*ptns->kctxs));
813 ptns->kctxs = (struct nm_kctx **)(ptns + 1);
814 ptns->stopped = true;
816 /* Cross-link data structures. */
818 ptns->pth_na = pth_na;
820 /* Store the CSB address provided by the hypervisor. */
821 ptns->csb_gh = cfg->csb_gh;
822 ptns->csb_hg = cfg->csb_hg;
824 DBG(ptnetmap_print_configuration(cfg));
826 /* Create kernel contexts. */
827 if ((ret = ptnetmap_create_kctxs(pth_na, cfg, use_tx_kthreads))) {
828 D("ERROR ptnetmap_create_kctxs()");
831 /* Copy krings state into the CSB for the guest initialization */
832 if ((ret = ptnetmap_krings_snapshot(pth_na))) {
833 D("ERROR ptnetmap_krings_snapshot()");
837 /* Overwrite parent nm_notify krings callback, and
838 * clear NAF_BDG_MAYSLEEP if needed. */
839 pth_na->parent->na_private = pth_na;
840 pth_na->parent_nm_notify = pth_na->parent->nm_notify;
841 pth_na->parent->nm_notify = nm_unused_notify;
842 pth_na->parent_na_flags = pth_na->parent->na_flags;
843 if (!use_tx_kthreads) {
844 /* VALE port txsync is executed under spinlock on Linux, so
845 * we need to make sure the bridge cannot sleep. */
846 pth_na->parent->na_flags &= ~NAF_BDG_MAYSLEEP;
849 for (i = 0; i < pth_na->parent->num_rx_rings; i++) {
850 pth_na->up.rx_rings[i]->save_notify =
851 pth_na->up.rx_rings[i]->nm_notify;
852 pth_na->up.rx_rings[i]->nm_notify = nm_pt_host_notify;
854 for (i = 0; i < pth_na->parent->num_tx_rings; i++) {
855 pth_na->up.tx_rings[i]->save_notify =
856 pth_na->up.tx_rings[i]->nm_notify;
857 pth_na->up.tx_rings[i]->nm_notify = nm_pt_host_notify;
861 memset(&ptns->rate_ctx, 0, sizeof(ptns->rate_ctx));
862 setup_timer(&ptns->rate_ctx.timer, &rate_callback,
863 (unsigned long)&ptns->rate_ctx);
864 if (mod_timer(&ptns->rate_ctx.timer, jiffies + msecs_to_jiffies(1500)))
865 D("[ptn] Error: mod_timer()\n");
868 DBG(D("[%s] ptnetmap configuration DONE", pth_na->up.name));
878 /* Switch parent adapter back to normal mode and destroy
881 ptnetmap_delete(struct netmap_pt_host_adapter *pth_na)
883 struct ptnetmap_state *ptns = pth_na->ptns;
892 /* Restore parent adapter callbacks. */
893 pth_na->parent->nm_notify = pth_na->parent_nm_notify;
894 pth_na->parent->na_private = NULL;
895 pth_na->parent->na_flags = pth_na->parent_na_flags;
897 for (i = 0; i < pth_na->parent->num_rx_rings; i++) {
898 pth_na->up.rx_rings[i]->nm_notify =
899 pth_na->up.rx_rings[i]->save_notify;
900 pth_na->up.rx_rings[i]->save_notify = NULL;
902 for (i = 0; i < pth_na->parent->num_tx_rings; i++) {
903 pth_na->up.tx_rings[i]->nm_notify =
904 pth_na->up.tx_rings[i]->save_notify;
905 pth_na->up.tx_rings[i]->save_notify = NULL;
908 /* Destroy kernel contexts. */
909 num_rings = ptns->pth_na->up.num_tx_rings +
910 ptns->pth_na->up.num_rx_rings;
911 for (i = 0; i < num_rings; i++) {
912 nm_os_kctx_destroy(ptns->kctxs[i]);
913 ptns->kctxs[i] = NULL;
916 IFRATE(del_timer(&ptns->rate_ctx.timer));
922 DBG(D("[%s] ptnetmap deleted", pth_na->up.name));
926 * Called by netmap_ioctl().
927 * Operation is indicated in nr_name.
929 * Called without NMG_LOCK.
932 ptnetmap_ctl(const char *nr_name, int create, struct netmap_adapter *na)
934 struct netmap_pt_host_adapter *pth_na;
935 struct ptnetmap_cfg *cfg = NULL;
938 DBG(D("name: %s", nr_name));
940 if (!nm_ptnetmap_host_on(na)) {
941 D("ERROR Netmap adapter %p is not a ptnetmap host adapter",
945 pth_na = (struct netmap_pt_host_adapter *)na;
949 /* Read hypervisor configuration from userspace. */
954 /* Create ptnetmap state (kctxs, ...) and switch parent
955 * adapter to ptnetmap mode. */
956 error = ptnetmap_create(pth_na, cfg);
961 /* Start kthreads. */
962 error = ptnetmap_start_kctx_workers(pth_na);
964 ptnetmap_delete(pth_na);
967 ptnetmap_stop_kctx_workers(pth_na);
968 /* Switch parent adapter back to normal mode and destroy
969 * ptnetmap state (kthreads, ...). */
970 ptnetmap_delete(pth_na);
978 /* nm_notify callbacks for ptnetmap */
980 nm_pt_host_notify(struct netmap_kring *kring, int flags)
982 struct netmap_adapter *na = kring->na;
983 struct netmap_pt_host_adapter *pth_na =
984 (struct netmap_pt_host_adapter *)na->na_private;
985 struct ptnetmap_state *ptns;
988 /* First check that the passthrough port is not being destroyed. */
989 if (unlikely(!pth_na)) {
990 return NM_IRQ_COMPLETED;
994 if (unlikely(!ptns || ptns->stopped)) {
995 return NM_IRQ_COMPLETED;
1000 /* Notify kthreads (wake up if needed) */
1001 if (kring->tx == NR_TX) {
1002 ND(1, "TX backend irq");
1003 IFRATE(ptns->rate_ctx.new.btxwu++);
1005 k += pth_na->up.num_tx_rings;
1006 ND(1, "RX backend irq");
1007 IFRATE(ptns->rate_ctx.new.brxwu++);
1009 nm_os_kctx_worker_wakeup(ptns->kctxs[k]);
1011 return NM_IRQ_COMPLETED;
1015 nm_unused_notify(struct netmap_kring *kring, int flags)
1017 D("BUG this should never be called");
1021 /* nm_config callback for bwrap */
1023 nm_pt_host_config(struct netmap_adapter *na, struct nm_config_info *info)
1025 struct netmap_pt_host_adapter *pth_na =
1026 (struct netmap_pt_host_adapter *)na;
1027 struct netmap_adapter *parent = pth_na->parent;
1030 //XXX: maybe calling parent->nm_config is better
1032 /* forward the request */
1033 error = netmap_update_config(parent);
1035 info->num_rx_rings = na->num_rx_rings = parent->num_rx_rings;
1036 info->num_tx_rings = na->num_tx_rings = parent->num_tx_rings;
1037 info->num_tx_descs = na->num_tx_desc = parent->num_tx_desc;
1038 info->num_rx_descs = na->num_rx_desc = parent->num_rx_desc;
1039 info->rx_buf_maxsize = na->rx_buf_maxsize = parent->rx_buf_maxsize;
1044 /* nm_krings_create callback for ptnetmap */
1046 nm_pt_host_krings_create(struct netmap_adapter *na)
1048 struct netmap_pt_host_adapter *pth_na =
1049 (struct netmap_pt_host_adapter *)na;
1050 struct netmap_adapter *parent = pth_na->parent;
1054 DBG(D("%s", pth_na->up.name));
1056 /* create the parent krings */
1057 error = parent->nm_krings_create(parent);
1062 /* A ptnetmap host adapter points the very same krings
1063 * as its parent adapter. These pointer are used in the
1064 * TX/RX worker functions. */
1065 na->tx_rings = parent->tx_rings;
1066 na->rx_rings = parent->rx_rings;
1067 na->tailroom = parent->tailroom;
1070 struct netmap_kring *kring;
1072 /* Parent's kring_create function will initialize
1073 * its own na->si. We have to init our na->si here. */
1074 nm_os_selinfo_init(&na->si[t]);
1076 /* Force the mem_rings_create() method to create the
1077 * host rings independently on what the regif asked for:
1078 * these rings are needed by the guest ptnetmap adapter
1080 kring = NMR(na, t)[nma_get_nrings(na, t)];
1081 kring->nr_kflags |= NKR_NEEDRING;
1087 /* nm_krings_delete callback for ptnetmap */
1089 nm_pt_host_krings_delete(struct netmap_adapter *na)
1091 struct netmap_pt_host_adapter *pth_na =
1092 (struct netmap_pt_host_adapter *)na;
1093 struct netmap_adapter *parent = pth_na->parent;
1095 DBG(D("%s", pth_na->up.name));
1097 parent->nm_krings_delete(parent);
1099 na->tx_rings = na->rx_rings = na->tailroom = NULL;
1102 /* nm_register callback */
1104 nm_pt_host_register(struct netmap_adapter *na, int onoff)
1106 struct netmap_pt_host_adapter *pth_na =
1107 (struct netmap_pt_host_adapter *)na;
1108 struct netmap_adapter *parent = pth_na->parent;
1110 DBG(D("%s onoff %d", pth_na->up.name, onoff));
1113 /* netmap_do_regif has been called on the ptnetmap na.
1114 * We need to pass the information about the
1115 * memory allocator to the parent before
1116 * putting it in netmap mode
1118 parent->na_lut = na->na_lut;
1121 /* forward the request to the parent */
1122 error = parent->nm_register(parent, onoff);
1128 na->na_flags |= NAF_NETMAP_ON | NAF_PTNETMAP_HOST;
1130 ptnetmap_delete(pth_na);
1131 na->na_flags &= ~(NAF_NETMAP_ON | NAF_PTNETMAP_HOST);
1137 /* nm_dtor callback */
1139 nm_pt_host_dtor(struct netmap_adapter *na)
1141 struct netmap_pt_host_adapter *pth_na =
1142 (struct netmap_pt_host_adapter *)na;
1143 struct netmap_adapter *parent = pth_na->parent;
1145 DBG(D("%s", pth_na->up.name));
1147 /* The equivalent of NETMAP_PT_HOST_DELETE if the hypervisor
1149 ptnetmap_stop_kctx_workers(pth_na);
1150 ptnetmap_delete(pth_na);
1152 parent->na_flags &= ~NAF_BUSY;
1154 netmap_adapter_put(pth_na->parent);
1155 pth_na->parent = NULL;
1158 /* check if nmr is a request for a ptnetmap adapter that we can satisfy */
1160 netmap_get_pt_host_na(struct nmreq_header *hdr, struct netmap_adapter **na,
1161 struct netmap_mem_d *nmd, int create)
1163 struct nmreq_register *req = (struct nmreq_register *)(uintptr_t)hdr->nr_body;
1164 struct nmreq_register preq;
1165 struct netmap_adapter *parent; /* target adapter */
1166 struct netmap_pt_host_adapter *pth_na;
1167 struct ifnet *ifp = NULL;
1170 /* Check if it is a request for a ptnetmap adapter */
1171 if ((req->nr_flags & (NR_PTNETMAP_HOST)) == 0) {
1175 D("Requesting a ptnetmap host adapter");
1177 pth_na = nm_os_malloc(sizeof(*pth_na));
1178 if (pth_na == NULL) {
1183 /* first, try to find the adapter that we want to passthrough
1184 * We use the same req, after we have turned off the ptnetmap flag.
1185 * In this way we can potentially passthrough everything netmap understands.
1187 memcpy(&preq, req, sizeof(preq));
1188 preq.nr_flags &= ~(NR_PTNETMAP_HOST);
1189 hdr->nr_body = (uintptr_t)&preq;
1190 error = netmap_get_na(hdr, &parent, &ifp, nmd, create);
1191 hdr->nr_body = (uintptr_t)req;
1193 D("parent lookup failed: %d", error);
1194 goto put_out_noputparent;
1196 DBG(D("found parent: %s", parent->name));
1198 /* make sure the interface is not already in use */
1199 if (NETMAP_OWNED_BY_ANY(parent)) {
1200 D("NIC %s busy, cannot ptnetmap", parent->name);
1205 pth_na->parent = parent;
1207 /* Follow netmap_attach()-like operations for the host
1208 * ptnetmap adapter. */
1210 //XXX pth_na->up.na_flags = parent->na_flags;
1211 pth_na->up.num_rx_rings = parent->num_rx_rings;
1212 pth_na->up.num_tx_rings = parent->num_tx_rings;
1213 pth_na->up.num_tx_desc = parent->num_tx_desc;
1214 pth_na->up.num_rx_desc = parent->num_rx_desc;
1216 pth_na->up.nm_dtor = nm_pt_host_dtor;
1217 pth_na->up.nm_register = nm_pt_host_register;
1219 /* Reuse parent's adapter txsync and rxsync methods. */
1220 pth_na->up.nm_txsync = parent->nm_txsync;
1221 pth_na->up.nm_rxsync = parent->nm_rxsync;
1223 pth_na->up.nm_krings_create = nm_pt_host_krings_create;
1224 pth_na->up.nm_krings_delete = nm_pt_host_krings_delete;
1225 pth_na->up.nm_config = nm_pt_host_config;
1227 /* Set the notify method only or convenience, it will never
1228 * be used, since - differently from default krings_create - we
1229 * ptnetmap krings_create callback inits kring->nm_notify
1231 pth_na->up.nm_notify = nm_unused_notify;
1233 pth_na->up.nm_mem = netmap_mem_get(parent->nm_mem);
1235 pth_na->up.na_flags |= NAF_HOST_RINGS;
1237 error = netmap_attach_common(&pth_na->up);
1239 D("ERROR netmap_attach_common()");
1244 /* set parent busy, because attached for ptnetmap */
1245 parent->na_flags |= NAF_BUSY;
1246 strncpy(pth_na->up.name, parent->name, sizeof(pth_na->up.name));
1247 strcat(pth_na->up.name, "-PTN");
1248 netmap_adapter_get(*na);
1250 DBG(D("%s ptnetmap request DONE", pth_na->up.name));
1252 /* drop the reference to the ifp, if any */
1259 netmap_adapter_put(parent);
1262 put_out_noputparent:
1266 #endif /* WITH_PTNETMAP_HOST */
1268 #ifdef WITH_PTNETMAP_GUEST
1270 * Guest ptnetmap txsync()/rxsync() routines, used in ptnet device drivers.
1271 * These routines are reused across the different operating systems supported
1276 * Reconcile host and guest views of the transmit ring.
1278 * Guest user wants to transmit packets up to the one before ring->head,
1279 * and guest kernel knows tx_ring->hwcur is the first packet unsent
1280 * by the host kernel.
1282 * We push out as many packets as possible, and possibly
1283 * reclaim buffers from previously completed transmission.
1285 * Notifications from the host are enabled only if the user guest would
1286 * block (no space in the ring).
1289 netmap_pt_guest_txsync(struct ptnet_csb_gh *ptgh, struct ptnet_csb_hg *pthg,
1290 struct netmap_kring *kring, int flags)
1292 bool notify = false;
1294 /* Disable notifications */
1295 ptgh->guest_need_kick = 0;
1298 * First part: tell the host (updating the CSB) to process the new
1301 kring->nr_hwcur = pthg->hwcur;
1302 ptnetmap_guest_write_kring_csb(ptgh, kring->rcur, kring->rhead);
1304 /* Ask for a kick from a guest to the host if needed. */
1305 if (((kring->rhead != kring->nr_hwcur || nm_kr_txempty(kring))
1306 && NM_ACCESS_ONCE(pthg->host_need_kick)) ||
1307 (flags & NAF_FORCE_RECLAIM)) {
1308 ptgh->sync_flags = flags;
1313 * Second part: reclaim buffers for completed transmissions.
1315 if (nm_kr_txempty(kring) || (flags & NAF_FORCE_RECLAIM)) {
1316 ptnetmap_guest_read_kring_csb(pthg, kring);
1320 * No more room in the ring for new transmissions. The user thread will
1321 * go to sleep and we need to be notified by the host when more free
1322 * space is available.
1324 if (nm_kr_txempty(kring) && !(kring->nr_kflags & NKR_NOINTR)) {
1325 /* Reenable notifications. */
1326 ptgh->guest_need_kick = 1;
1328 ptnetmap_guest_read_kring_csb(pthg, kring);
1329 /* If there is new free space, disable notifications */
1330 if (unlikely(!nm_kr_txempty(kring))) {
1331 ptgh->guest_need_kick = 0;
1335 ND(1, "%s CSB(head:%u cur:%u hwtail:%u) KRING(head:%u cur:%u tail:%u)",
1336 kring->name, ptgh->head, ptgh->cur, pthg->hwtail,
1337 kring->rhead, kring->rcur, kring->nr_hwtail);
1343 * Reconcile host and guest view of the receive ring.
1345 * Update hwcur/hwtail from host (reading from CSB).
1347 * If guest user has released buffers up to the one before ring->head, we
1348 * also give them to the host.
1350 * Notifications from the host are enabled only if the user guest would
1351 * block (no more completed slots in the ring).
1354 netmap_pt_guest_rxsync(struct ptnet_csb_gh *ptgh, struct ptnet_csb_hg *pthg,
1355 struct netmap_kring *kring, int flags)
1357 bool notify = false;
1359 /* Disable notifications */
1360 ptgh->guest_need_kick = 0;
1363 * First part: import newly received packets, by updating the kring
1364 * hwtail to the hwtail known from the host (read from the CSB).
1365 * This also updates the kring hwcur.
1367 ptnetmap_guest_read_kring_csb(pthg, kring);
1368 kring->nr_kflags &= ~NKR_PENDINTR;
1371 * Second part: tell the host about the slots that guest user has
1372 * released, by updating cur and head in the CSB.
1374 if (kring->rhead != kring->nr_hwcur) {
1375 ptnetmap_guest_write_kring_csb(ptgh, kring->rcur,
1377 /* Ask for a kick from the guest to the host if needed. */
1378 if (NM_ACCESS_ONCE(pthg->host_need_kick)) {
1379 ptgh->sync_flags = flags;
1385 * No more completed RX slots. The user thread will go to sleep and
1386 * we need to be notified by the host when more RX slots have been
1389 if (nm_kr_rxempty(kring) && !(kring->nr_kflags & NKR_NOINTR)) {
1390 /* Reenable notifications. */
1391 ptgh->guest_need_kick = 1;
1393 ptnetmap_guest_read_kring_csb(pthg, kring);
1394 /* If there are new slots, disable notifications. */
1395 if (!nm_kr_rxempty(kring)) {
1396 ptgh->guest_need_kick = 0;
1400 ND(1, "%s CSB(head:%u cur:%u hwtail:%u) KRING(head:%u cur:%u tail:%u)",
1401 kring->name, ptgh->head, ptgh->cur, pthg->hwtail,
1402 kring->rhead, kring->rcur, kring->nr_hwtail);
1408 * Callbacks for ptnet drivers: nm_krings_create, nm_krings_delete, nm_dtor.
1411 ptnet_nm_krings_create(struct netmap_adapter *na)
1413 struct netmap_pt_guest_adapter *ptna =
1414 (struct netmap_pt_guest_adapter *)na; /* Upcast. */
1415 struct netmap_adapter *na_nm = &ptna->hwup.up;
1416 struct netmap_adapter *na_dr = &ptna->dr.up;
1419 if (ptna->backend_regifs) {
1423 /* Create krings on the public netmap adapter. */
1424 ret = netmap_hw_krings_create(na_nm);
1429 /* Copy krings into the netmap adapter private to the driver. */
1430 na_dr->tx_rings = na_nm->tx_rings;
1431 na_dr->rx_rings = na_nm->rx_rings;
1437 ptnet_nm_krings_delete(struct netmap_adapter *na)
1439 struct netmap_pt_guest_adapter *ptna =
1440 (struct netmap_pt_guest_adapter *)na; /* Upcast. */
1441 struct netmap_adapter *na_nm = &ptna->hwup.up;
1442 struct netmap_adapter *na_dr = &ptna->dr.up;
1444 if (ptna->backend_regifs) {
1448 na_dr->tx_rings = NULL;
1449 na_dr->rx_rings = NULL;
1451 netmap_hw_krings_delete(na_nm);
1455 ptnet_nm_dtor(struct netmap_adapter *na)
1457 struct netmap_pt_guest_adapter *ptna =
1458 (struct netmap_pt_guest_adapter *)na;
1460 netmap_mem_put(ptna->dr.up.nm_mem);
1461 memset(&ptna->dr, 0, sizeof(ptna->dr));
1462 netmap_mem_pt_guest_ifp_del(na->nm_mem, na->ifp);
1466 netmap_pt_guest_attach(struct netmap_adapter *arg,
1467 unsigned int nifp_offset, unsigned int memid)
1469 struct netmap_pt_guest_adapter *ptna;
1470 struct ifnet *ifp = arg ? arg->ifp : NULL;
1474 arg->nm_mem = netmap_mem_pt_guest_new(ifp, nifp_offset, memid);
1475 if (arg->nm_mem == NULL)
1477 arg->na_flags |= NAF_MEM_OWNER;
1478 error = netmap_attach_ext(arg, sizeof(struct netmap_pt_guest_adapter), 1);
1482 /* get the netmap_pt_guest_adapter */
1483 ptna = (struct netmap_pt_guest_adapter *) NA(ifp);
1485 /* Initialize a separate pass-through netmap adapter that is going to
1486 * be used by the ptnet driver only, and so never exposed to netmap
1487 * applications. We only need a subset of the available fields. */
1488 memset(&ptna->dr, 0, sizeof(ptna->dr));
1489 ptna->dr.up.ifp = ifp;
1490 ptna->dr.up.nm_mem = netmap_mem_get(ptna->hwup.up.nm_mem);
1491 ptna->dr.up.nm_config = ptna->hwup.up.nm_config;
1493 ptna->backend_regifs = 0;
1498 #endif /* WITH_PTNETMAP_GUEST */