2 * Copyright (C) 2013 Intel Corporation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
31 #include <sys/kernel.h>
32 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/mutex.h>
39 #include <sys/queue.h>
40 #include <sys/socket.h>
41 #include <sys/sockio.h>
42 #include <sys/sysctl.h>
43 #include <sys/taskqueue.h>
45 #include <net/if_media.h>
46 #include <net/if_types.h>
47 #include <net/if_var.h>
49 #include <net/ethernet.h>
52 #include <machine/bus.h>
53 #include <machine/cpufunc.h>
54 #include <machine/pmap.h>
56 #include "../ntb_hw/ntb_hw.h"
59 * The Non-Transparent Bridge (NTB) is a device on some Intel processors that
60 * allows you to connect two systems using a PCI-e link.
62 * This module contains a protocol for sending and receiving messages, and
63 * exposes that protocol through a simulated ethernet device called ntb.
65 * NOTE: Much of the code in this module is shared with Linux. Any patches may
66 * be picked up and redistributed in Linux with a dual GPL/BSD license.
69 /* TODO: These functions should really be part of the kernel */
70 #define test_bit(pos, bitmap_addr) (*(bitmap_addr) & 1UL << (pos))
71 #define set_bit(pos, bitmap_addr) *(bitmap_addr) |= 1UL << (pos)
72 #define clear_bit(pos, bitmap_addr) *(bitmap_addr) &= ~(1UL << (pos))
74 #define KTR_NTB KTR_SPARE3
76 #define NTB_TRANSPORT_VERSION 3
77 #define NTB_RX_MAX_PKTS 64
78 #define NTB_RXQ_SIZE 300
80 static unsigned int transport_mtu = 0x4000 + ETHER_HDR_LEN + ETHER_CRC_LEN;
83 * This is an oversimplification to work around Xeon Errata. The second client
84 * may be usable for unidirectional traffic.
86 static unsigned int max_num_clients = 1;
88 STAILQ_HEAD(ntb_queue_list, ntb_queue_entry);
90 struct ntb_queue_entry {
91 /* ntb_queue list reference */
92 STAILQ_ENTRY(ntb_queue_entry) entry;
94 /* info on data to be transfered */
105 struct ntb_transport_qp {
106 struct ntb_netdev *transport;
107 struct ntb_softc *ntb;
113 uint8_t qp_num; /* Only 64 QPs are allowed. 0-63 */
115 struct ntb_rx_info *rx_info;
116 struct ntb_rx_info *remote_rx_info;
118 void (*tx_handler) (struct ntb_transport_qp *qp, void *qp_data,
119 void *data, int len);
120 struct ntb_queue_list tx_free_q;
121 struct mtx ntb_tx_free_q_lock;
124 uint64_t tx_max_entry;
125 uint64_t tx_max_frame;
127 void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data,
128 void *data, int len);
129 struct ntb_queue_list rx_pend_q;
130 struct ntb_queue_list rx_free_q;
131 struct mtx ntb_rx_pend_q_lock;
132 struct mtx ntb_rx_free_q_lock;
133 struct task rx_completion_task;
136 uint64_t rx_max_entry;
137 uint64_t rx_max_frame;
139 void (*event_handler) (void *data, int status);
140 struct callout link_work;
141 struct callout queue_full;
142 struct callout rx_full;
144 uint64_t last_rx_no_buf;
149 uint64_t rx_ring_empty;
150 uint64_t rx_err_no_buf;
151 uint64_t rx_err_oflow;
155 uint64_t tx_ring_full;
158 struct ntb_queue_handlers {
159 void (*rx_handler) (struct ntb_transport_qp *qp, void *qp_data,
160 void *data, int len);
161 void (*tx_handler) (struct ntb_transport_qp *qp, void *qp_data,
162 void *data, int len);
163 void (*event_handler) (void *data, int status);
167 struct ntb_transport_mw {
174 struct ntb_softc *ntb;
176 struct ntb_transport_mw mw[NTB_NUM_MW];
177 struct ntb_transport_qp *qps;
181 struct callout link_work;
182 struct ntb_transport_qp *qp;
184 u_char eaddr[ETHER_ADDR_LEN];
189 static struct ntb_netdev net_softc;
192 IF_NTB_DESC_DONE_FLAG = 1 << 0,
193 IF_NTB_LINK_DOWN_FLAG = 1 << 1,
196 struct ntb_payload_header {
204 * The order of this enum is part of the if_ntb remote protocol. Do
205 * not reorder without bumping protocol version (and it's probably best
206 * to keep the protocol in lock-step with the Linux NTB driver.
213 * N.B.: transport_link_work assumes MW1 enums = MW0 + 2.
222 #define QP_TO_MW(qp) ((qp) % NTB_NUM_MW)
223 #define NTB_QP_DEF_NUM_ENTRIES 100
224 #define NTB_LINK_DOWN_TIMEOUT 10
226 static int ntb_handle_module_events(struct module *m, int what, void *arg);
227 static int ntb_setup_interface(void);
228 static int ntb_teardown_interface(void);
229 static void ntb_net_init(void *arg);
230 static int ntb_ioctl(struct ifnet *ifp, u_long command, caddr_t data);
231 static void ntb_start(struct ifnet *ifp);
232 static void ntb_net_tx_handler(struct ntb_transport_qp *qp, void *qp_data,
233 void *data, int len);
234 static void ntb_net_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
235 void *data, int len);
236 static void ntb_net_event_handler(void *data, int status);
237 static int ntb_transport_init(struct ntb_softc *ntb);
238 static void ntb_transport_free(void *transport);
239 static void ntb_transport_init_queue(struct ntb_netdev *nt,
240 unsigned int qp_num);
241 static void ntb_transport_free_queue(struct ntb_transport_qp *qp);
242 static struct ntb_transport_qp * ntb_transport_create_queue(void *data,
243 struct ntb_softc *pdev, const struct ntb_queue_handlers *handlers);
244 static void ntb_transport_link_up(struct ntb_transport_qp *qp);
245 static int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb,
246 void *data, unsigned int len);
247 static int ntb_process_tx(struct ntb_transport_qp *qp,
248 struct ntb_queue_entry *entry);
249 static void ntb_tx_copy_task(struct ntb_transport_qp *qp,
250 struct ntb_queue_entry *entry, void *offset);
251 static void ntb_qp_full(void *arg);
252 static void ntb_transport_rxc_db(void *data, int db_num);
253 static void ntb_rx_pendq_full(void *arg);
254 static void ntb_transport_rx(struct ntb_transport_qp *qp);
255 static int ntb_process_rxc(struct ntb_transport_qp *qp);
256 static void ntb_rx_copy_task(struct ntb_transport_qp *qp,
257 struct ntb_queue_entry *entry, void *offset);
258 static void ntb_rx_completion_task(void *arg, int pending);
259 static void ntb_transport_event_callback(void *data, enum ntb_hw_event event);
260 static void ntb_transport_link_work(void *arg);
261 static int ntb_set_mw(struct ntb_netdev *nt, int num_mw, unsigned int size);
262 static void ntb_free_mw(struct ntb_netdev *nt, int num_mw);
263 static void ntb_transport_setup_qp_mw(struct ntb_netdev *nt,
264 unsigned int qp_num);
265 static void ntb_qp_link_work(void *arg);
266 static void ntb_transport_link_cleanup(struct ntb_netdev *nt);
267 static void ntb_qp_link_down(struct ntb_transport_qp *qp);
268 static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp);
269 static void ntb_transport_link_down(struct ntb_transport_qp *qp);
270 static void ntb_send_link_down(struct ntb_transport_qp *qp);
271 static void ntb_list_add(struct mtx *lock, struct ntb_queue_entry *entry,
272 struct ntb_queue_list *list);
273 static struct ntb_queue_entry *ntb_list_rm(struct mtx *lock,
274 struct ntb_queue_list *list);
275 static void create_random_local_eui48(u_char *eaddr);
276 static unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp);
278 MALLOC_DEFINE(M_NTB_IF, "if_ntb", "ntb network driver");
280 /* Module setup and teardown */
282 ntb_handle_module_events(struct module *m, int what, void *arg)
288 err = ntb_setup_interface();
291 err = ntb_teardown_interface();
300 static moduledata_t if_ntb_mod = {
302 ntb_handle_module_events,
306 DECLARE_MODULE(if_ntb, if_ntb_mod, SI_SUB_KLD, SI_ORDER_ANY);
307 MODULE_DEPEND(if_ntb, ntb_hw, 1, 1, 1);
310 ntb_setup_interface(void)
313 struct ntb_queue_handlers handlers = { ntb_net_rx_handler,
314 ntb_net_tx_handler, ntb_net_event_handler };
316 net_softc.ntb = devclass_get_softc(devclass_find("ntb_hw"), 0);
317 if (net_softc.ntb == NULL) {
318 printf("ntb: Cannot find devclass\n");
322 ntb_transport_init(net_softc.ntb);
324 ifp = net_softc.ifp = if_alloc(IFT_ETHER);
326 printf("ntb: cannot allocate ifnet structure\n");
330 net_softc.qp = ntb_transport_create_queue(ifp, net_softc.ntb,
332 if_initname(ifp, "ntb", 0);
333 ifp->if_init = ntb_net_init;
334 ifp->if_softc = &net_softc;
335 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX;
336 ifp->if_ioctl = ntb_ioctl;
337 ifp->if_start = ntb_start;
338 IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
339 ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
340 IFQ_SET_READY(&ifp->if_snd);
341 create_random_local_eui48(net_softc.eaddr);
342 ether_ifattach(ifp, net_softc.eaddr);
343 ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_JUMBO_MTU;
344 ifp->if_capenable = ifp->if_capabilities;
346 ntb_transport_link_up(net_softc.qp);
347 net_softc.bufsize = ntb_transport_max_size(net_softc.qp) +
348 sizeof(struct ether_header);
353 ntb_teardown_interface(void)
356 if (net_softc.qp != NULL)
357 ntb_transport_link_down(net_softc.qp);
359 if (net_softc.ifp != NULL) {
360 ether_ifdetach(net_softc.ifp);
361 if_free(net_softc.ifp);
364 if (net_softc.qp != NULL) {
365 ntb_transport_free_queue(net_softc.qp);
366 ntb_transport_free(&net_softc);
372 /* Network device interface */
375 ntb_net_init(void *arg)
377 struct ntb_netdev *ntb_softc = arg;
378 struct ifnet *ifp = ntb_softc->ifp;
380 ifp->if_drv_flags |= IFF_DRV_RUNNING;
381 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
382 ifp->if_flags |= IFF_UP;
383 if_link_state_change(ifp, LINK_STATE_UP);
387 ntb_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
389 struct ntb_netdev *nt = ifp->if_softc;
390 struct ifreq *ifr = (struct ifreq *)data;
396 if (ifr->ifr_mtu > ntb_transport_max_size(nt->qp) -
397 ETHER_HDR_LEN - ETHER_CRC_LEN) {
402 ifp->if_mtu = ifr->ifr_mtu;
406 error = ether_ioctl(ifp, command, data);
415 ntb_start(struct ifnet *ifp)
418 struct ntb_netdev *nt = ifp->if_softc;
421 mtx_lock(&nt->tx_lock);
422 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
423 CTR0(KTR_NTB, "TX: ntb_start");
424 while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
425 IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
426 CTR1(KTR_NTB, "TX: start mbuf %p", m_head);
427 rc = ntb_transport_tx_enqueue(nt->qp, m_head, m_head,
428 m_length(m_head, NULL));
431 "TX: could not tx mbuf %p. Returning to snd q",
434 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
435 IFQ_DRV_PREPEND(&ifp->if_snd, m_head);
436 callout_reset(&nt->qp->queue_full, hz / 1000,
443 mtx_unlock(&nt->tx_lock);
446 /* Network Device Callbacks */
448 ntb_net_tx_handler(struct ntb_transport_qp *qp, void *qp_data, void *data,
453 CTR1(KTR_NTB, "TX: tx_handler freeing mbuf %p", data);
457 ntb_net_rx_handler(struct ntb_transport_qp *qp, void *qp_data, void *data,
460 struct mbuf *m = data;
461 struct ifnet *ifp = qp_data;
463 CTR0(KTR_NTB, "RX: rx handler");
464 (*ifp->if_input)(ifp, m);
468 ntb_net_event_handler(void *data, int status)
473 /* Transport Init and teardown */
476 ntb_transport_init(struct ntb_softc *ntb)
478 struct ntb_netdev *nt = &net_softc;
481 nt->max_qps = max_num_clients;
482 ntb_register_transport(ntb, nt);
483 mtx_init(&nt->tx_lock, "ntb transport tx", NULL, MTX_DEF);
484 mtx_init(&nt->rx_lock, "ntb transport rx", NULL, MTX_DEF);
486 nt->qps = malloc(nt->max_qps * sizeof(struct ntb_transport_qp),
487 M_NTB_IF, M_WAITOK|M_ZERO);
489 nt->qp_bitmap = ((uint64_t) 1 << nt->max_qps) - 1;
491 for (i = 0; i < nt->max_qps; i++)
492 ntb_transport_init_queue(nt, i);
494 callout_init(&nt->link_work, 0);
496 rc = ntb_register_event_callback(ntb,
497 ntb_transport_event_callback);
501 if (ntb_query_link_status(ntb)) {
503 device_printf(ntb_get_device(ntb), "link up\n");
504 callout_reset(&nt->link_work, 0, ntb_transport_link_work, nt);
510 free(nt->qps, M_NTB_IF);
511 ntb_unregister_transport(ntb);
516 ntb_transport_free(void *transport)
518 struct ntb_netdev *nt = transport;
519 struct ntb_softc *ntb = nt->ntb;
522 nt->transport_link = NTB_LINK_DOWN;
524 callout_drain(&nt->link_work);
526 /* verify that all the qps are freed */
527 for (i = 0; i < nt->max_qps; i++)
528 if (!test_bit(i, &nt->qp_bitmap))
529 ntb_transport_free_queue(&nt->qps[i]);
531 ntb_unregister_event_callback(ntb);
533 for (i = 0; i < NTB_NUM_MW; i++)
536 free(nt->qps, M_NTB_IF);
537 ntb_unregister_transport(ntb);
541 ntb_transport_init_queue(struct ntb_netdev *nt, unsigned int qp_num)
543 struct ntb_transport_qp *qp;
544 unsigned int num_qps_mw, tx_size;
545 uint8_t mw_num = QP_TO_MW(qp_num);
547 qp = &nt->qps[qp_num];
551 qp->qp_link = NTB_LINK_DOWN;
552 qp->client_ready = NTB_LINK_DOWN;
553 qp->event_handler = NULL;
555 if (nt->max_qps % NTB_NUM_MW && mw_num < nt->max_qps % NTB_NUM_MW)
556 num_qps_mw = nt->max_qps / NTB_NUM_MW + 1;
558 num_qps_mw = nt->max_qps / NTB_NUM_MW;
560 tx_size = (unsigned int) ntb_get_mw_size(qp->ntb, mw_num) / num_qps_mw;
561 qp->rx_info = (struct ntb_rx_info *)
562 ((char *)ntb_get_mw_vbase(qp->ntb, mw_num) +
563 (qp_num / NTB_NUM_MW * tx_size));
564 tx_size -= sizeof(struct ntb_rx_info);
566 qp->tx_mw = qp->rx_info + 1;
567 /* Due to house-keeping, there must be at least 2 buffs */
568 qp->tx_max_frame = min(transport_mtu + sizeof(struct ntb_payload_header),
570 qp->tx_max_entry = tx_size / qp->tx_max_frame;
572 callout_init(&qp->link_work, 0);
573 callout_init(&qp->queue_full, 1);
574 callout_init(&qp->rx_full, 1);
576 mtx_init(&qp->ntb_rx_pend_q_lock, "ntb rx pend q", NULL, MTX_SPIN);
577 mtx_init(&qp->ntb_rx_free_q_lock, "ntb rx free q", NULL, MTX_SPIN);
578 mtx_init(&qp->ntb_tx_free_q_lock, "ntb tx free q", NULL, MTX_SPIN);
579 TASK_INIT(&qp->rx_completion_task, 0, ntb_rx_completion_task, qp);
581 STAILQ_INIT(&qp->rx_pend_q);
582 STAILQ_INIT(&qp->rx_free_q);
583 STAILQ_INIT(&qp->tx_free_q);
587 ntb_transport_free_queue(struct ntb_transport_qp *qp)
589 struct ntb_queue_entry *entry;
594 callout_drain(&qp->link_work);
596 ntb_unregister_db_callback(qp->ntb, qp->qp_num);
598 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
599 free(entry, M_NTB_IF);
601 while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q)))
602 free(entry, M_NTB_IF);
604 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
605 free(entry, M_NTB_IF);
607 set_bit(qp->qp_num, &qp->transport->qp_bitmap);
611 * ntb_transport_create_queue - Create a new NTB transport layer queue
612 * @rx_handler: receive callback function
613 * @tx_handler: transmit callback function
614 * @event_handler: event callback function
616 * Create a new NTB transport layer queue and provide the queue with a callback
617 * routine for both transmit and receive. The receive callback routine will be
618 * used to pass up data when the transport has received it on the queue. The
619 * transmit callback routine will be called when the transport has completed the
620 * transmission of the data on the queue and the data is ready to be freed.
622 * RETURNS: pointer to newly created ntb_queue, NULL on error.
624 static struct ntb_transport_qp *
625 ntb_transport_create_queue(void *data, struct ntb_softc *pdev,
626 const struct ntb_queue_handlers *handlers)
628 struct ntb_queue_entry *entry;
629 struct ntb_transport_qp *qp;
630 struct ntb_netdev *nt;
631 unsigned int free_queue;
634 nt = ntb_find_transport(pdev);
638 free_queue = ffs(nt->qp_bitmap);
642 /* decrement free_queue to make it zero based */
645 clear_bit(free_queue, &nt->qp_bitmap);
647 qp = &nt->qps[free_queue];
649 qp->rx_handler = handlers->rx_handler;
650 qp->tx_handler = handlers->tx_handler;
651 qp->event_handler = handlers->event_handler;
653 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
654 entry = malloc(sizeof(struct ntb_queue_entry), M_NTB_IF,
656 entry->cb_data = nt->ifp;
658 entry->len = transport_mtu;
659 ntb_list_add(&qp->ntb_rx_pend_q_lock, entry, &qp->rx_pend_q);
662 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
663 entry = malloc(sizeof(struct ntb_queue_entry), M_NTB_IF,
665 ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q);
668 rc = ntb_register_db_callback(qp->ntb, free_queue, qp,
669 ntb_transport_rxc_db);
676 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
677 free(entry, M_NTB_IF);
678 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q)))
679 free(entry, M_NTB_IF);
680 set_bit(free_queue, &nt->qp_bitmap);
686 * ntb_transport_link_up - Notify NTB transport of client readiness to use queue
687 * @qp: NTB transport layer queue to be enabled
689 * Notify NTB transport layer of client readiness to use queue
692 ntb_transport_link_up(struct ntb_transport_qp *qp)
698 qp->client_ready = NTB_LINK_UP;
700 device_printf(ntb_get_device(qp->ntb), "qp client ready\n");
702 if (qp->transport->transport_link == NTB_LINK_UP)
703 callout_reset(&qp->link_work, 0, ntb_qp_link_work, qp);
711 * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
712 * @qp: NTB transport layer queue the entry is to be enqueued on
713 * @cb: per buffer pointer for callback function to use
714 * @data: pointer to data buffer that will be sent
715 * @len: length of the data buffer
717 * Enqueue a new transmit buffer onto the transport queue from which a NTB
718 * payload will be transmitted. This assumes that a lock is being held to
719 * serialize access to the qp.
721 * RETURNS: An appropriate ERRNO error value on error, or zero for success.
724 ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
727 struct ntb_queue_entry *entry;
730 if (qp == NULL || qp->qp_link != NTB_LINK_UP || len == 0) {
731 CTR0(KTR_NTB, "TX: link not up");
735 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
737 CTR0(KTR_NTB, "TX: could not get entry from tx_free_q");
740 CTR1(KTR_NTB, "TX: got entry %p from tx_free_q", entry);
747 rc = ntb_process_tx(qp, entry);
749 ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q);
751 "TX: process_tx failed. Returning entry %p to tx_free_q",
758 ntb_process_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry)
762 offset = (char *)qp->tx_mw + qp->tx_max_frame * qp->tx_index;
764 "TX: process_tx: tx_pkts=%u, tx_index=%u, remote entry=%u",
765 qp->tx_pkts, qp->tx_index, qp->remote_rx_info->entry);
766 if (qp->tx_index == qp->remote_rx_info->entry) {
767 CTR0(KTR_NTB, "TX: ring full");
772 if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) {
773 if (qp->tx_handler != NULL)
774 qp->tx_handler(qp, qp->cb_data, entry->buf,
777 ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q);
779 "TX: frame too big. returning entry %p to tx_free_q",
783 CTR2(KTR_NTB, "TX: copying entry %p to offset %p", entry, offset);
784 ntb_tx_copy_task(qp, entry, offset);
787 qp->tx_index %= qp->tx_max_entry;
795 ntb_tx_copy_task(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry,
798 struct ntb_payload_header *hdr;
800 CTR2(KTR_NTB, "TX: copying %d bytes to offset %p", entry->len, offset);
801 if (entry->buf != NULL)
802 m_copydata((struct mbuf *)entry->buf, 0, entry->len, offset);
804 hdr = (struct ntb_payload_header *)((char *)offset + qp->tx_max_frame -
805 sizeof(struct ntb_payload_header));
806 hdr->len = entry->len; /* TODO: replace with bus_space_write */
807 hdr->ver = qp->tx_pkts; /* TODO: replace with bus_space_write */
809 /* TODO: replace with bus_space_write */
810 hdr->flags = entry->flags | IF_NTB_DESC_DONE_FLAG;
812 ntb_ring_doorbell(qp->ntb, qp->qp_num);
815 * The entry length can only be zero if the packet is intended to be a
816 * "link down" or similar. Since no payload is being sent in these
817 * cases, there is nothing to add to the completion queue.
819 if (entry->len > 0) {
820 qp->tx_bytes += entry->len;
823 qp->tx_handler(qp, qp->cb_data, entry->cb_data,
828 "TX: entry %p sent. hdr->ver = %d, Returning to tx_free_q", entry,
830 ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q);
834 ntb_qp_full(void *arg)
837 CTR0(KTR_NTB, "TX: qp_full callout");
843 ntb_transport_rxc_db(void *data, int db_num)
845 struct ntb_transport_qp *qp = data;
847 ntb_transport_rx(qp);
851 ntb_rx_pendq_full(void *arg)
854 CTR0(KTR_NTB, "RX: ntb_rx_pendq_full callout");
855 ntb_transport_rx(arg);
859 ntb_transport_rx(struct ntb_transport_qp *qp)
865 * Limit the number of packets processed in a single interrupt to
866 * provide fairness to others
868 mtx_lock(&qp->transport->rx_lock);
869 CTR0(KTR_NTB, "RX: transport_rx");
870 for (i = 0; i < qp->rx_max_entry; i++) {
871 rc = ntb_process_rxc(qp);
873 CTR0(KTR_NTB, "RX: process_rxc failed");
877 mtx_unlock(&qp->transport->rx_lock);
881 ntb_process_rxc(struct ntb_transport_qp *qp)
883 struct ntb_payload_header *hdr;
884 struct ntb_queue_entry *entry;
888 ((char *)qp->rx_buff + qp->rx_max_frame * qp->rx_index);
890 ((char *)offset + qp->rx_max_frame -
891 sizeof(struct ntb_payload_header));
893 CTR1(KTR_NTB, "RX: process_rxc rx_index = %u", qp->rx_index);
894 entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q);
897 CTR0(KTR_NTB, "RX: No entries in rx_pend_q");
900 callout_stop(&qp->rx_full);
901 CTR1(KTR_NTB, "RX: rx entry %p from rx_pend_q", entry);
903 if ((hdr->flags & IF_NTB_DESC_DONE_FLAG) == 0) {
905 "RX: hdr not done. Returning entry %p to rx_pend_q", entry);
906 ntb_list_add(&qp->ntb_rx_pend_q_lock, entry, &qp->rx_pend_q);
911 if (hdr->ver != (uint32_t) qp->rx_pkts) {
912 CTR3(KTR_NTB,"RX: ver != rx_pkts (%x != %lx). "
913 "Returning entry %p to rx_pend_q", hdr->ver, qp->rx_pkts,
915 ntb_list_add(&qp->ntb_rx_pend_q_lock, entry, &qp->rx_pend_q);
920 if ((hdr->flags & IF_NTB_LINK_DOWN_FLAG) != 0) {
921 ntb_qp_link_down(qp);
923 "RX: link down. adding entry %p back to rx_pend_q", entry);
924 ntb_list_add(&qp->ntb_rx_pend_q_lock, entry, &qp->rx_pend_q);
928 if (hdr->len <= entry->len) {
929 entry->len = hdr->len;
930 ntb_rx_copy_task(qp, entry, offset);
933 "RX: len too long. Returning entry %p to rx_pend_q", entry);
934 ntb_list_add(&qp->ntb_rx_pend_q_lock, entry, &qp->rx_pend_q);
939 qp->rx_bytes += hdr->len;
941 CTR1(KTR_NTB, "RX: received %ld rx_pkts", qp->rx_pkts);
945 /* Ensure that the data is globally visible before clearing the flag */
948 /* TODO: replace with bus_space_write */
949 qp->rx_info->entry = qp->rx_index;
952 qp->rx_index %= qp->rx_max_entry;
958 ntb_rx_copy_task(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry,
961 struct ifnet *ifp = entry->cb_data;
962 unsigned int len = entry->len;
965 CTR2(KTR_NTB, "RX: copying %d bytes from offset %p", len, offset);
966 m = m_devget(offset, len, 0, ifp, NULL);
967 m->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID;
969 entry->buf = (void *)m;
972 "RX: copied entry %p to mbuf %p. Adding entry to rx_free_q", entry,
974 ntb_list_add(&qp->ntb_rx_free_q_lock, entry, &qp->rx_free_q);
976 taskqueue_enqueue(taskqueue_swi, &qp->rx_completion_task);
980 ntb_rx_completion_task(void *arg, int pending)
982 struct ntb_transport_qp *qp = arg;
984 struct ntb_queue_entry *entry;
986 CTR0(KTR_NTB, "RX: rx_completion_task");
988 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) {
990 CTR2(KTR_NTB, "RX: completing entry %p, mbuf %p", entry, m);
991 if (qp->rx_handler && qp->client_ready == NTB_LINK_UP)
992 qp->rx_handler(qp, qp->cb_data, m, entry->len);
995 entry->len = qp->transport->bufsize;
997 CTR1(KTR_NTB,"RX: entry %p removed from rx_free_q "
998 "and added to rx_pend_q", entry);
999 ntb_list_add(&qp->ntb_rx_pend_q_lock, entry, &qp->rx_pend_q);
1000 if (qp->rx_err_no_buf > qp->last_rx_no_buf) {
1001 qp->last_rx_no_buf = qp->rx_err_no_buf;
1002 CTR0(KTR_NTB, "RX: could spawn rx task");
1003 callout_reset(&qp->rx_full, hz / 1000, ntb_rx_pendq_full,
1009 /* Link Event handler */
1011 ntb_transport_event_callback(void *data, enum ntb_hw_event event)
1013 struct ntb_netdev *nt = data;
1016 case NTB_EVENT_HW_LINK_UP:
1018 device_printf(ntb_get_device(nt->ntb), "HW link up\n");
1019 callout_reset(&nt->link_work, 0, ntb_transport_link_work, nt);
1021 case NTB_EVENT_HW_LINK_DOWN:
1023 device_printf(ntb_get_device(nt->ntb), "HW link down\n");
1024 ntb_transport_link_cleanup(nt);
1027 panic("ntb: Unknown NTB event");
1033 ntb_transport_link_work(void *arg)
1035 struct ntb_netdev *nt = arg;
1036 struct ntb_softc *ntb = nt->ntb;
1037 struct ntb_transport_qp *qp;
1039 uint32_t val, i, num_mw;
1042 if (ntb_has_feature(ntb, NTB_REGS_THRU_MW))
1043 num_mw = NTB_NUM_MW - 1;
1045 num_mw = NTB_NUM_MW;
1047 /* send the local info, in the opposite order of the way we read it */
1048 for (i = 0; i < num_mw; i++) {
1049 rc = ntb_write_remote_spad(ntb, IF_NTB_MW0_SZ_HIGH + (i * 2),
1050 (uint64_t)ntb_get_mw_size(ntb, i) >> 32);
1054 rc = ntb_write_remote_spad(ntb, IF_NTB_MW0_SZ_LOW + (i * 2),
1055 (uint32_t)ntb_get_mw_size(ntb, i));
1060 rc = ntb_write_remote_spad(ntb, IF_NTB_NUM_MWS, num_mw);
1064 rc = ntb_write_remote_spad(ntb, IF_NTB_NUM_QPS, nt->max_qps);
1068 rc = ntb_write_remote_spad(ntb, IF_NTB_VERSION, NTB_TRANSPORT_VERSION);
1072 /* Query the remote side for its info */
1073 rc = ntb_read_local_spad(ntb, IF_NTB_VERSION, &val);
1077 if (val != NTB_TRANSPORT_VERSION)
1080 rc = ntb_read_local_spad(ntb, IF_NTB_NUM_QPS, &val);
1084 if (val != nt->max_qps)
1087 rc = ntb_read_local_spad(ntb, IF_NTB_NUM_MWS, &val);
1094 for (i = 0; i < num_mw; i++) {
1095 rc = ntb_read_local_spad(ntb, IF_NTB_MW0_SZ_HIGH + (i * 2),
1100 val64 = (uint64_t)val << 32;
1102 rc = ntb_read_local_spad(ntb, IF_NTB_MW0_SZ_LOW + (i * 2),
1109 rc = ntb_set_mw(nt, i, val64);
1114 nt->transport_link = NTB_LINK_UP;
1116 device_printf(ntb_get_device(ntb), "transport link up\n");
1118 for (i = 0; i < nt->max_qps; i++) {
1121 ntb_transport_setup_qp_mw(nt, i);
1123 if (qp->client_ready == NTB_LINK_UP)
1124 callout_reset(&qp->link_work, 0, ntb_qp_link_work, qp);
1130 for (i = 0; i < NTB_NUM_MW; i++)
1133 if (ntb_query_link_status(ntb))
1134 callout_reset(&nt->link_work,
1135 NTB_LINK_DOWN_TIMEOUT * hz / 1000, ntb_transport_link_work, nt);
1139 ntb_set_mw(struct ntb_netdev *nt, int num_mw, unsigned int size)
1141 struct ntb_transport_mw *mw = &nt->mw[num_mw];
1143 /* No need to re-setup */
1144 if (mw->size == size)
1148 ntb_free_mw(nt, num_mw);
1150 /* Alloc memory for receiving data. Must be 4k aligned */
1153 mw->virt_addr = contigmalloc(mw->size, M_NTB_IF, M_ZERO, 0,
1154 BUS_SPACE_MAXADDR, mw->size, 0);
1155 if (mw->virt_addr == NULL) {
1157 printf("ntb: Unable to allocate MW buffer of size %d\n",
1161 /* TODO: replace with bus_space_* functions */
1162 mw->dma_addr = vtophys(mw->virt_addr);
1164 /* Notify HW the memory location of the receive buffer */
1165 ntb_set_mw_addr(nt->ntb, num_mw, mw->dma_addr);
1171 ntb_free_mw(struct ntb_netdev *nt, int num_mw)
1173 struct ntb_transport_mw *mw = &nt->mw[num_mw];
1175 if (mw->virt_addr == NULL)
1178 contigfree(mw->virt_addr, mw->size, M_NTB_IF);
1179 mw->virt_addr = NULL;
1183 ntb_transport_setup_qp_mw(struct ntb_netdev *nt, unsigned int qp_num)
1185 struct ntb_transport_qp *qp = &nt->qps[qp_num];
1187 unsigned int rx_size, num_qps_mw;
1188 uint8_t mw_num = QP_TO_MW(qp_num);
1191 if (nt->max_qps % NTB_NUM_MW && mw_num < nt->max_qps % NTB_NUM_MW)
1192 num_qps_mw = nt->max_qps / NTB_NUM_MW + 1;
1194 num_qps_mw = nt->max_qps / NTB_NUM_MW;
1196 rx_size = (unsigned int) nt->mw[mw_num].size / num_qps_mw;
1197 qp->remote_rx_info = (void *)((uint8_t *)nt->mw[mw_num].virt_addr +
1198 (qp_num / NTB_NUM_MW * rx_size));
1199 rx_size -= sizeof(struct ntb_rx_info);
1201 qp->rx_buff = qp->remote_rx_info + 1;
1202 /* Due to house-keeping, there must be at least 2 buffs */
1203 qp->rx_max_frame = min(transport_mtu + sizeof(struct ntb_payload_header),
1205 qp->rx_max_entry = rx_size / qp->rx_max_frame;
1208 qp->remote_rx_info->entry = qp->rx_max_entry - 1;
1210 /* setup the hdr offsets with 0's */
1211 for (i = 0; i < qp->rx_max_entry; i++) {
1212 offset = (void *)((uint8_t *)qp->rx_buff +
1213 qp->rx_max_frame * (i + 1) -
1214 sizeof(struct ntb_payload_header));
1215 memset(offset, 0, sizeof(struct ntb_payload_header));
1224 ntb_qp_link_work(void *arg)
1226 struct ntb_transport_qp *qp = arg;
1227 struct ntb_softc *ntb = qp->ntb;
1228 struct ntb_netdev *nt = qp->transport;
1232 rc = ntb_read_remote_spad(ntb, IF_NTB_QP_LINKS, &val);
1236 rc = ntb_write_remote_spad(ntb, IF_NTB_QP_LINKS, val | 1 << qp->qp_num);
1238 /* query remote spad for qp ready bits */
1239 rc = ntb_read_local_spad(ntb, IF_NTB_QP_LINKS, &val);
1241 /* See if the remote side is up */
1242 if ((1 << qp->qp_num & val) != 0) {
1243 qp->qp_link = NTB_LINK_UP;
1244 if (qp->event_handler != NULL)
1245 qp->event_handler(qp->cb_data, NTB_LINK_UP);
1247 device_printf(ntb_get_device(ntb), "qp link up\n");
1248 } else if (nt->transport_link == NTB_LINK_UP) {
1249 callout_reset(&qp->link_work,
1250 NTB_LINK_DOWN_TIMEOUT * hz / 1000, ntb_qp_link_work, qp);
1254 /* Link down event*/
1256 ntb_transport_link_cleanup(struct ntb_netdev *nt)
1260 if (nt->transport_link == NTB_LINK_DOWN)
1261 callout_drain(&nt->link_work);
1263 nt->transport_link = NTB_LINK_DOWN;
1265 /* Pass along the info to any clients */
1266 for (i = 0; i < nt->max_qps; i++)
1267 if (!test_bit(i, &nt->qp_bitmap))
1268 ntb_qp_link_down(&nt->qps[i]);
1271 * The scratchpad registers keep the values if the remote side
1272 * goes down, blast them now to give them a sane value the next
1273 * time they are accessed
1275 for (i = 0; i < IF_NTB_MAX_SPAD; i++)
1276 ntb_write_local_spad(nt->ntb, i, 0);
1281 ntb_qp_link_down(struct ntb_transport_qp *qp)
1284 ntb_qp_link_cleanup(qp);
1288 ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
1290 struct ntb_netdev *nt = qp->transport;
1292 if (qp->qp_link == NTB_LINK_DOWN) {
1293 callout_drain(&qp->link_work);
1297 if (qp->event_handler != NULL)
1298 qp->event_handler(qp->cb_data, NTB_LINK_DOWN);
1300 qp->qp_link = NTB_LINK_DOWN;
1302 if (nt->transport_link == NTB_LINK_UP)
1303 callout_reset(&qp->link_work,
1304 NTB_LINK_DOWN_TIMEOUT * hz / 1000, ntb_qp_link_work, qp);
1307 /* Link commanded down */
1309 * ntb_transport_link_down - Notify NTB transport to no longer enqueue data
1310 * @qp: NTB transport layer queue to be disabled
1312 * Notify NTB transport layer of client's desire to no longer receive data on
1313 * transport queue specified. It is the client's responsibility to ensure all
1314 * entries on queue are purged or otherwise handled appropriately.
1317 ntb_transport_link_down(struct ntb_transport_qp *qp)
1324 qp->client_ready = NTB_LINK_DOWN;
1326 rc = ntb_read_remote_spad(qp->ntb, IF_NTB_QP_LINKS, &val);
1330 rc = ntb_write_remote_spad(qp->ntb, IF_NTB_QP_LINKS,
1331 val & ~(1 << qp->qp_num));
1333 if (qp->qp_link == NTB_LINK_UP)
1334 ntb_send_link_down(qp);
1336 callout_drain(&qp->link_work);
1341 ntb_send_link_down(struct ntb_transport_qp *qp)
1343 struct ntb_queue_entry *entry;
1346 if (qp->qp_link == NTB_LINK_DOWN)
1349 qp->qp_link = NTB_LINK_DOWN;
1351 for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) {
1352 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
1355 pause("NTB Wait for link down", hz / 10);
1361 entry->cb_data = NULL;
1364 entry->flags = IF_NTB_LINK_DOWN_FLAG;
1366 mtx_lock(&qp->transport->tx_lock);
1367 rc = ntb_process_tx(qp, entry);
1369 printf("ntb: Failed to send link down\n");
1370 mtx_unlock(&qp->transport->tx_lock);
1374 /* List Management */
1377 ntb_list_add(struct mtx *lock, struct ntb_queue_entry *entry,
1378 struct ntb_queue_list *list)
1381 mtx_lock_spin(lock);
1382 STAILQ_INSERT_TAIL(list, entry, entry);
1383 mtx_unlock_spin(lock);
1386 static struct ntb_queue_entry *
1387 ntb_list_rm(struct mtx *lock, struct ntb_queue_list *list)
1389 struct ntb_queue_entry *entry;
1391 mtx_lock_spin(lock);
1392 if (STAILQ_EMPTY(list)) {
1396 entry = STAILQ_FIRST(list);
1397 STAILQ_REMOVE_HEAD(list, entry);
1399 mtx_unlock_spin(lock);
1404 /* Helper functions */
1405 /* TODO: This too should really be part of the kernel */
1406 #define EUI48_MULTICAST 1 << 0
1407 #define EUI48_LOCALLY_ADMINISTERED 1 << 1
1409 create_random_local_eui48(u_char *eaddr)
1411 static uint8_t counter = 0;
1412 uint32_t seed = ticks;
1414 eaddr[0] = EUI48_LOCALLY_ADMINISTERED;
1415 memcpy(&eaddr[1], &seed, sizeof(uint32_t));
1416 eaddr[5] = counter++;
1420 * ntb_transport_max_size - Query the max payload size of a qp
1421 * @qp: NTB transport layer queue to be queried
1423 * Query the maximum payload size permissible on the given qp
1425 * RETURNS: the max payload size of a qp
1428 ntb_transport_max_size(struct ntb_transport_qp *qp)
1434 return (qp->tx_max_frame - sizeof(struct ntb_payload_header));