2 * Copyright (c) 2016 Alexander Motin <mav@FreeBSD.org>
3 * Copyright (C) 2013 Intel Corporation
4 * Copyright (C) 2015 EMC Corporation
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * The Non-Transparent Bridge (NTB) is a device that allows you to connect
31 * two or more systems using a PCI-e links, providing remote memory access.
33 * This module contains a transport for sending and receiving messages by
34 * writing to remote memory window(s) provided by underlying NTB device.
36 * NOTE: Much of the code in this module is shared with Linux. Any patches may
37 * be picked up and redistributed in Linux with a dual GPL/BSD license.
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
43 #include <sys/param.h>
44 #include <sys/kernel.h>
45 #include <sys/systm.h>
48 #include <sys/limits.h>
50 #include <sys/malloc.h>
52 #include <sys/module.h>
53 #include <sys/mutex.h>
54 #include <sys/queue.h>
55 #include <sys/sysctl.h>
56 #include <sys/taskqueue.h>
61 #include <machine/bus.h>
64 #include "ntb_transport.h"
66 #define KTR_NTB KTR_SPARE3
68 #define NTB_TRANSPORT_VERSION 4
70 static SYSCTL_NODE(_hw, OID_AUTO, ntb_transport, CTLFLAG_RW, 0, "ntb_transport");
72 static unsigned g_ntb_transport_debug_level;
73 SYSCTL_UINT(_hw_ntb_transport, OID_AUTO, debug_level, CTLFLAG_RWTUN,
74 &g_ntb_transport_debug_level, 0,
75 "ntb_transport log level -- higher is more verbose");
76 #define ntb_printf(lvl, ...) do { \
77 if ((lvl) <= g_ntb_transport_debug_level) { \
78 printf(__VA_ARGS__); \
82 static unsigned transport_mtu = 0x10000;
84 static uint64_t max_mw_size;
85 SYSCTL_UQUAD(_hw_ntb_transport, OID_AUTO, max_mw_size, CTLFLAG_RDTUN, &max_mw_size, 0,
86 "If enabled (non-zero), limit the size of large memory windows. "
87 "Both sides of the NTB MUST set the same value here.");
89 static unsigned enable_xeon_watchdog;
90 SYSCTL_UINT(_hw_ntb_transport, OID_AUTO, enable_xeon_watchdog, CTLFLAG_RDTUN,
91 &enable_xeon_watchdog, 0, "If non-zero, write a register every second to "
92 "keep a watchdog from tearing down the NTB link");
94 STAILQ_HEAD(ntb_queue_list, ntb_queue_entry);
96 typedef uint32_t ntb_q_idx_t;
98 struct ntb_queue_entry {
99 /* ntb_queue list reference */
100 STAILQ_ENTRY(ntb_queue_entry) entry;
102 /* info on data to be transferred */
108 struct ntb_transport_qp *qp;
109 struct ntb_payload_header *x_hdr;
117 struct ntb_transport_qp {
118 struct ntb_transport_ctx *transport;
124 volatile bool link_is_up;
125 uint8_t qp_num; /* Only 64 QPs are allowed. 0-63 */
127 struct ntb_rx_info *rx_info;
128 struct ntb_rx_info *remote_rx_info;
130 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
131 void *data, int len);
132 struct ntb_queue_list tx_free_q;
133 struct mtx ntb_tx_free_q_lock;
135 bus_addr_t tx_mw_phys;
136 ntb_q_idx_t tx_index;
137 ntb_q_idx_t tx_max_entry;
138 uint64_t tx_max_frame;
140 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
141 void *data, int len);
142 struct ntb_queue_list rx_post_q;
143 struct ntb_queue_list rx_pend_q;
144 /* ntb_rx_q_lock: synchronize access to rx_XXXX_q */
145 struct mtx ntb_rx_q_lock;
146 struct task rxc_db_work;
147 struct taskqueue *rxc_tq;
149 ntb_q_idx_t rx_index;
150 ntb_q_idx_t rx_max_entry;
151 uint64_t rx_max_frame;
153 void (*event_handler)(void *data, enum ntb_link_event status);
154 struct callout link_work;
155 struct callout rx_full;
157 uint64_t last_rx_no_buf;
162 uint64_t rx_ring_empty;
163 uint64_t rx_err_no_buf;
164 uint64_t rx_err_oflow;
168 uint64_t tx_ring_full;
169 uint64_t tx_err_no_buf;
174 struct ntb_transport_mw {
175 vm_paddr_t phys_addr;
178 size_t xlat_align_size;
179 bus_addr_t addr_limit;
180 /* Tx buff is off vbase / phys_addr */
184 /* Rx buff is off virt_addr / dma_addr */
189 struct ntb_transport_child {
193 struct ntb_transport_child *next;
196 struct ntb_transport_ctx {
198 struct ntb_transport_child *child;
199 struct ntb_transport_mw *mw_vec;
200 struct ntb_transport_qp *qp_vec;
204 volatile bool link_is_up;
205 struct callout link_work;
206 struct callout link_watchdog;
207 struct task link_cleanup;
211 NTBT_DESC_DONE_FLAG = 1 << 0,
212 NTBT_LINK_DOWN_FLAG = 1 << 1,
215 struct ntb_payload_header {
223 * The order of this enum is part of the remote protocol. Do not
224 * reorder without bumping protocol version (and it's probably best
225 * to keep the protocol in lock-step with the Linux NTB driver.
232 * N.B.: transport_link_work assumes MW1 enums = MW0 + 2.
240 * Some NTB-using hardware have a watchdog to work around NTB hangs; if
241 * a register or doorbell isn't written every few seconds, the link is
242 * torn down. Write an otherwise unused register every few seconds to
243 * work around this watchdog.
245 NTBT_WATCHDOG_SPAD = 15
248 #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count)
249 #define NTB_QP_DEF_NUM_ENTRIES 100
250 #define NTB_LINK_DOWN_TIMEOUT 10
252 static int ntb_transport_probe(device_t dev);
253 static int ntb_transport_attach(device_t dev);
254 static int ntb_transport_detach(device_t dev);
255 static void ntb_transport_init_queue(struct ntb_transport_ctx *nt,
256 unsigned int qp_num);
257 static int ntb_process_tx(struct ntb_transport_qp *qp,
258 struct ntb_queue_entry *entry);
259 static void ntb_transport_rxc_db(void *arg, int pending);
260 static int ntb_process_rxc(struct ntb_transport_qp *qp);
261 static void ntb_memcpy_rx(struct ntb_transport_qp *qp,
262 struct ntb_queue_entry *entry, void *offset);
263 static inline void ntb_rx_copy_callback(struct ntb_transport_qp *qp,
265 static void ntb_complete_rxc(struct ntb_transport_qp *qp);
266 static void ntb_transport_doorbell_callback(void *data, uint32_t vector);
267 static void ntb_transport_event_callback(void *data);
268 static void ntb_transport_link_work(void *arg);
269 static int ntb_set_mw(struct ntb_transport_ctx *, int num_mw, size_t size);
270 static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw);
271 static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
272 unsigned int qp_num);
273 static void ntb_qp_link_work(void *arg);
274 static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt);
275 static void ntb_transport_link_cleanup_work(void *, int);
276 static void ntb_qp_link_down(struct ntb_transport_qp *qp);
277 static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp);
278 static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp);
279 static void ntb_send_link_down(struct ntb_transport_qp *qp);
280 static void ntb_list_add(struct mtx *lock, struct ntb_queue_entry *entry,
281 struct ntb_queue_list *list);
282 static struct ntb_queue_entry *ntb_list_rm(struct mtx *lock,
283 struct ntb_queue_list *list);
284 static struct ntb_queue_entry *ntb_list_mv(struct mtx *lock,
285 struct ntb_queue_list *from, struct ntb_queue_list *to);
286 static void xeon_link_watchdog_hb(void *);
288 static const struct ntb_ctx_ops ntb_transport_ops = {
289 .link_event = ntb_transport_event_callback,
290 .db_event = ntb_transport_doorbell_callback,
293 MALLOC_DEFINE(M_NTB_T, "ntb_transport", "ntb transport driver");
296 iowrite32(uint32_t val, void *addr)
299 bus_space_write_4(X86_BUS_SPACE_MEM, 0/* HACK */, (uintptr_t)addr,
303 /* Transport Init and teardown */
306 xeon_link_watchdog_hb(void *arg)
308 struct ntb_transport_ctx *nt;
311 ntb_spad_write(nt->dev, NTBT_WATCHDOG_SPAD, 0);
312 callout_reset(&nt->link_watchdog, 1 * hz, xeon_link_watchdog_hb, nt);
316 ntb_transport_probe(device_t dev)
319 device_set_desc(dev, "NTB Transport");
324 ntb_transport_attach(device_t dev)
326 struct ntb_transport_ctx *nt = device_get_softc(dev);
327 struct ntb_transport_child **cpp = &nt->child;
328 struct ntb_transport_child *nc;
329 struct ntb_transport_mw *mw;
331 int rc, i, db_count, spad_count, qp, qpu, qpo, qpt;
334 char *n, *np, *c, *name;
337 nt->mw_count = ntb_mw_count(dev);
338 spad_count = ntb_spad_count(dev);
339 db_bitmap = ntb_db_valid_mask(dev);
340 db_count = flsll(db_bitmap);
341 KASSERT(db_bitmap == (1 << db_count) - 1,
342 ("Doorbells are not sequential (%jx).\n", db_bitmap));
344 device_printf(dev, "%d memory windows, %d scratchpads, "
345 "%d doorbells\n", nt->mw_count, spad_count, db_count);
347 if (nt->mw_count == 0) {
348 device_printf(dev, "At least 1 memory window required.\n");
351 if (spad_count < 6) {
352 device_printf(dev, "At least 6 scratchpads required.\n");
355 if (spad_count < 4 + 2 * nt->mw_count) {
356 nt->mw_count = (spad_count - 4) / 2;
357 device_printf(dev, "Scratchpads enough only for %d "
358 "memory windows.\n", nt->mw_count);
360 if (db_bitmap == 0) {
361 device_printf(dev, "At least one doorbell required.\n");
365 nt->mw_vec = malloc(nt->mw_count * sizeof(*nt->mw_vec), M_NTB_T,
367 for (i = 0; i < nt->mw_count; i++) {
370 rc = ntb_mw_get_range(dev, i, &mw->phys_addr, &mw->vbase,
371 &mw->phys_size, &mw->xlat_align, &mw->xlat_align_size,
378 mw->virt_addr = NULL;
381 rc = ntb_mw_set_wc(dev, i, VM_MEMATTR_WRITE_COMBINING);
383 ntb_printf(0, "Unable to set mw%d caching\n", i);
387 qpo = imin(db_count, nt->mw_count);
390 snprintf(buf, sizeof(buf), "hint.%s.%d.config", device_get_name(dev),
391 device_get_unit(dev));
392 TUNABLE_STR_FETCH(buf, cfg, sizeof(cfg));
395 while ((c = strsep(&n, ",")) != NULL) {
397 name = strsep(&np, ":");
398 if (name != NULL && name[0] == 0)
400 qp = (np && np[0] != 0) ? strtol(np, NULL, 10) : qpo - qpu;
404 if (qp > qpt - qpu) {
405 device_printf(dev, "Not enough resources for config\n");
409 nc = malloc(sizeof(*nc), M_DEVBUF, M_WAITOK | M_ZERO);
412 nc->dev = device_add_child(dev, name, -1);
413 if (nc->dev == NULL) {
414 device_printf(dev, "Can not add child.\n");
417 device_set_ivars(nc->dev, nc);
422 device_printf(dev, "%d \"%s\": queues %d",
425 printf("-%d", qpu + qp - 1);
434 nt->qp_vec = malloc(nt->qp_count * sizeof(*nt->qp_vec), M_NTB_T,
437 for (i = 0; i < nt->qp_count; i++)
438 ntb_transport_init_queue(nt, i);
440 callout_init(&nt->link_work, 0);
441 callout_init(&nt->link_watchdog, 0);
442 TASK_INIT(&nt->link_cleanup, 0, ntb_transport_link_cleanup_work, nt);
444 rc = ntb_set_ctx(dev, nt, &ntb_transport_ops);
448 nt->link_is_up = false;
449 ntb_link_enable(dev, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
451 if (enable_xeon_watchdog != 0)
452 callout_reset(&nt->link_watchdog, 0, xeon_link_watchdog_hb, nt);
454 bus_generic_attach(dev);
458 free(nt->qp_vec, M_NTB_T);
459 free(nt->mw_vec, M_NTB_T);
464 ntb_transport_detach(device_t dev)
466 struct ntb_transport_ctx *nt = device_get_softc(dev);
467 struct ntb_transport_child **cpp = &nt->child;
468 struct ntb_transport_child *nc;
471 while ((nc = *cpp) != NULL) {
473 error = device_delete_child(dev, nc->dev);
478 KASSERT(nt->qp_bitmap == 0,
479 ("Some queues not freed on detach (%jx)", nt->qp_bitmap));
481 ntb_transport_link_cleanup(nt);
482 taskqueue_drain(taskqueue_swi, &nt->link_cleanup);
483 callout_drain(&nt->link_work);
484 callout_drain(&nt->link_watchdog);
486 ntb_link_disable(dev);
489 for (i = 0; i < nt->mw_count; i++)
492 free(nt->qp_vec, M_NTB_T);
493 free(nt->mw_vec, M_NTB_T);
498 ntb_transport_queue_count(device_t dev)
500 struct ntb_transport_child *nc = device_get_ivars(dev);
506 ntb_transport_init_queue(struct ntb_transport_ctx *nt, unsigned int qp_num)
508 struct ntb_transport_mw *mw;
509 struct ntb_transport_qp *qp;
511 uint64_t mw_size, qp_offset;
513 unsigned num_qps_mw, mw_num, mw_count;
515 mw_count = nt->mw_count;
516 mw_num = QP_TO_MW(nt, qp_num);
517 mw = &nt->mw_vec[mw_num];
519 qp = &nt->qp_vec[qp_num];
523 qp->client_ready = false;
524 qp->event_handler = NULL;
525 ntb_qp_link_down_reset(qp);
527 if (mw_num < nt->qp_count % mw_count)
528 num_qps_mw = nt->qp_count / mw_count + 1;
530 num_qps_mw = nt->qp_count / mw_count;
532 mw_base = mw->phys_addr;
533 mw_size = mw->phys_size;
535 tx_size = mw_size / num_qps_mw;
536 qp_offset = tx_size * (qp_num / mw_count);
538 qp->tx_mw = mw->vbase + qp_offset;
539 KASSERT(qp->tx_mw != NULL, ("uh oh?"));
541 /* XXX Assumes that a vm_paddr_t is equivalent to bus_addr_t */
542 qp->tx_mw_phys = mw_base + qp_offset;
543 KASSERT(qp->tx_mw_phys != 0, ("uh oh?"));
545 tx_size -= sizeof(struct ntb_rx_info);
546 qp->rx_info = (void *)(qp->tx_mw + tx_size);
548 /* Due to house-keeping, there must be at least 2 buffs */
549 qp->tx_max_frame = qmin(transport_mtu, tx_size / 2);
550 qp->tx_max_entry = tx_size / qp->tx_max_frame;
552 callout_init(&qp->link_work, 0);
553 callout_init(&qp->rx_full, 1);
555 mtx_init(&qp->ntb_rx_q_lock, "ntb rx q", NULL, MTX_SPIN);
556 mtx_init(&qp->ntb_tx_free_q_lock, "ntb tx free q", NULL, MTX_SPIN);
557 mtx_init(&qp->tx_lock, "ntb transport tx", NULL, MTX_DEF);
558 TASK_INIT(&qp->rxc_db_work, 0, ntb_transport_rxc_db, qp);
559 qp->rxc_tq = taskqueue_create("ntbt_rx", M_WAITOK,
560 taskqueue_thread_enqueue, &qp->rxc_tq);
561 taskqueue_start_threads(&qp->rxc_tq, 1, PI_NET, "%s rx%d",
562 device_get_nameunit(nt->dev), qp_num);
564 STAILQ_INIT(&qp->rx_post_q);
565 STAILQ_INIT(&qp->rx_pend_q);
566 STAILQ_INIT(&qp->tx_free_q);
570 ntb_transport_free_queue(struct ntb_transport_qp *qp)
572 struct ntb_transport_ctx *nt = qp->transport;
573 struct ntb_queue_entry *entry;
575 callout_drain(&qp->link_work);
577 ntb_db_set_mask(qp->dev, 1ull << qp->qp_num);
578 taskqueue_drain_all(qp->rxc_tq);
579 taskqueue_free(qp->rxc_tq);
582 qp->rx_handler = NULL;
583 qp->tx_handler = NULL;
584 qp->event_handler = NULL;
586 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q)))
587 free(entry, M_NTB_T);
589 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q)))
590 free(entry, M_NTB_T);
592 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
593 free(entry, M_NTB_T);
595 nt->qp_bitmap &= ~(1 << qp->qp_num);
599 * ntb_transport_create_queue - Create a new NTB transport layer queue
600 * @rx_handler: receive callback function
601 * @tx_handler: transmit callback function
602 * @event_handler: event callback function
604 * Create a new NTB transport layer queue and provide the queue with a callback
605 * routine for both transmit and receive. The receive callback routine will be
606 * used to pass up data when the transport has received it on the queue. The
607 * transmit callback routine will be called when the transport has completed the
608 * transmission of the data on the queue and the data is ready to be freed.
610 * RETURNS: pointer to newly created ntb_queue, NULL on error.
612 struct ntb_transport_qp *
613 ntb_transport_create_queue(device_t dev, int q,
614 const struct ntb_queue_handlers *handlers, void *data)
616 struct ntb_transport_child *nc = device_get_ivars(dev);
617 struct ntb_transport_ctx *nt = device_get_softc(device_get_parent(dev));
618 struct ntb_queue_entry *entry;
619 struct ntb_transport_qp *qp;
622 if (q < 0 || q >= nc->qpcnt)
625 qp = &nt->qp_vec[nc->qpoff + q];
626 nt->qp_bitmap |= (1 << qp->qp_num);
628 qp->rx_handler = handlers->rx_handler;
629 qp->tx_handler = handlers->tx_handler;
630 qp->event_handler = handlers->event_handler;
632 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
633 entry = malloc(sizeof(*entry), M_NTB_T, M_WAITOK | M_ZERO);
634 entry->cb_data = data;
636 entry->len = transport_mtu;
638 ntb_list_add(&qp->ntb_rx_q_lock, entry, &qp->rx_pend_q);
641 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
642 entry = malloc(sizeof(*entry), M_NTB_T, M_WAITOK | M_ZERO);
644 ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q);
647 ntb_db_clear(dev, 1ull << qp->qp_num);
652 * ntb_transport_link_up - Notify NTB transport of client readiness to use queue
653 * @qp: NTB transport layer queue to be enabled
655 * Notify NTB transport layer of client readiness to use queue
658 ntb_transport_link_up(struct ntb_transport_qp *qp)
660 struct ntb_transport_ctx *nt = qp->transport;
662 qp->client_ready = true;
664 ntb_printf(2, "qp %d client ready\n", qp->qp_num);
667 callout_reset(&qp->link_work, 0, ntb_qp_link_work, qp);
675 * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
676 * @qp: NTB transport layer queue the entry is to be enqueued on
677 * @cb: per buffer pointer for callback function to use
678 * @data: pointer to data buffer that will be sent
679 * @len: length of the data buffer
681 * Enqueue a new transmit buffer onto the transport queue from which a NTB
682 * payload will be transmitted. This assumes that a lock is being held to
683 * serialize access to the qp.
685 * RETURNS: An appropriate ERRNO error value on error, or zero for success.
688 ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
691 struct ntb_queue_entry *entry;
694 if (!qp->link_is_up || len == 0) {
695 CTR0(KTR_NTB, "TX: link not up");
699 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
701 CTR0(KTR_NTB, "TX: could not get entry from tx_free_q");
705 CTR1(KTR_NTB, "TX: got entry %p from tx_free_q", entry);
712 mtx_lock(&qp->tx_lock);
713 rc = ntb_process_tx(qp, entry);
714 mtx_unlock(&qp->tx_lock);
716 ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q);
718 "TX: process_tx failed. Returning entry %p to tx_free_q",
725 ntb_tx_copy_callback(void *data)
727 struct ntb_queue_entry *entry = data;
728 struct ntb_transport_qp *qp = entry->qp;
729 struct ntb_payload_header *hdr = entry->x_hdr;
731 iowrite32(entry->flags | NTBT_DESC_DONE_FLAG, &hdr->flags);
732 CTR1(KTR_NTB, "TX: hdr %p set DESC_DONE", hdr);
734 ntb_peer_db_set(qp->dev, 1ull << qp->qp_num);
737 * The entry length can only be zero if the packet is intended to be a
738 * "link down" or similar. Since no payload is being sent in these
739 * cases, there is nothing to add to the completion queue.
741 if (entry->len > 0) {
742 qp->tx_bytes += entry->len;
745 qp->tx_handler(qp, qp->cb_data, entry->buf,
753 "TX: entry %p sent. hdr->ver = %u, hdr->flags = 0x%x, Returning "
754 "to tx_free_q", entry, hdr->ver, hdr->flags);
755 ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q);
759 ntb_memcpy_tx(struct ntb_queue_entry *entry, void *offset)
762 CTR2(KTR_NTB, "TX: copying %d bytes to offset %p", entry->len, offset);
763 if (entry->buf != NULL) {
764 m_copydata((struct mbuf *)entry->buf, 0, entry->len, offset);
767 * Ensure that the data is fully copied before setting the
773 ntb_tx_copy_callback(entry);
777 ntb_async_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry)
779 struct ntb_payload_header *hdr;
782 offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
783 hdr = (struct ntb_payload_header *)((char *)offset + qp->tx_max_frame -
784 sizeof(struct ntb_payload_header));
787 iowrite32(entry->len, &hdr->len);
788 iowrite32(qp->tx_pkts, &hdr->ver);
790 ntb_memcpy_tx(entry, offset);
794 ntb_process_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry)
798 "TX: process_tx: tx_pkts=%lu, tx_index=%u, remote entry=%u",
799 qp->tx_pkts, qp->tx_index, qp->remote_rx_info->entry);
800 if (qp->tx_index == qp->remote_rx_info->entry) {
801 CTR0(KTR_NTB, "TX: ring full");
806 if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) {
807 if (qp->tx_handler != NULL)
808 qp->tx_handler(qp, qp->cb_data, entry->buf,
814 ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q);
816 "TX: frame too big. returning entry %p to tx_free_q",
820 CTR2(KTR_NTB, "TX: copying entry %p to index %u", entry, qp->tx_index);
821 ntb_async_tx(qp, entry);
824 qp->tx_index %= qp->tx_max_entry;
833 ntb_transport_rxc_db(void *arg, int pending __unused)
835 struct ntb_transport_qp *qp = arg;
838 CTR0(KTR_NTB, "RX: transport_rx");
840 while ((rc = ntb_process_rxc(qp)) == 0)
842 CTR1(KTR_NTB, "RX: process_rxc returned %d", rc);
844 if ((ntb_db_read(qp->dev) & (1ull << qp->qp_num)) != 0) {
845 /* If db is set, clear it and check queue once more. */
846 ntb_db_clear(qp->dev, 1ull << qp->qp_num);
852 ntb_process_rxc(struct ntb_transport_qp *qp)
854 struct ntb_payload_header *hdr;
855 struct ntb_queue_entry *entry;
858 offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
859 hdr = (void *)(offset + qp->rx_max_frame -
860 sizeof(struct ntb_payload_header));
862 CTR1(KTR_NTB, "RX: process_rxc rx_index = %u", qp->rx_index);
863 if ((hdr->flags & NTBT_DESC_DONE_FLAG) == 0) {
864 CTR0(KTR_NTB, "RX: hdr not done");
869 if ((hdr->flags & NTBT_LINK_DOWN_FLAG) != 0) {
870 CTR0(KTR_NTB, "RX: link down");
871 ntb_qp_link_down(qp);
876 if (hdr->ver != (uint32_t)qp->rx_pkts) {
877 CTR2(KTR_NTB,"RX: ver != rx_pkts (%x != %lx). "
878 "Returning entry to rx_pend_q", hdr->ver, qp->rx_pkts);
883 entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q);
886 CTR0(KTR_NTB, "RX: No entries in rx_pend_q");
889 callout_stop(&qp->rx_full);
890 CTR1(KTR_NTB, "RX: rx entry %p from rx_pend_q", entry);
893 entry->index = qp->rx_index;
895 if (hdr->len > entry->len) {
896 CTR2(KTR_NTB, "RX: len too long. Wanted %ju got %ju",
897 (uintmax_t)hdr->len, (uintmax_t)entry->len);
901 entry->flags |= NTBT_DESC_DONE_FLAG;
903 ntb_complete_rxc(qp);
905 qp->rx_bytes += hdr->len;
908 CTR1(KTR_NTB, "RX: received %ld rx_pkts", qp->rx_pkts);
910 entry->len = hdr->len;
912 ntb_memcpy_rx(qp, entry, offset);
916 qp->rx_index %= qp->rx_max_entry;
921 ntb_memcpy_rx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry,
924 struct ifnet *ifp = entry->cb_data;
925 unsigned int len = entry->len;
927 CTR2(KTR_NTB, "RX: copying %d bytes from offset %p", len, offset);
929 entry->buf = (void *)m_devget(offset, len, 0, ifp, NULL);
930 if (entry->buf == NULL)
931 entry->len = -ENOMEM;
933 /* Ensure that the data is globally visible before clearing the flag */
936 CTR2(KTR_NTB, "RX: copied entry %p to mbuf %p.", entry, entry->buf);
937 ntb_rx_copy_callback(qp, entry);
941 ntb_rx_copy_callback(struct ntb_transport_qp *qp, void *data)
943 struct ntb_queue_entry *entry;
946 entry->flags |= NTBT_DESC_DONE_FLAG;
947 ntb_complete_rxc(qp);
951 ntb_complete_rxc(struct ntb_transport_qp *qp)
953 struct ntb_queue_entry *entry;
957 CTR0(KTR_NTB, "RX: rx_completion_task");
959 mtx_lock_spin(&qp->ntb_rx_q_lock);
961 while (!STAILQ_EMPTY(&qp->rx_post_q)) {
962 entry = STAILQ_FIRST(&qp->rx_post_q);
963 if ((entry->flags & NTBT_DESC_DONE_FLAG) == 0)
966 entry->x_hdr->flags = 0;
967 iowrite32(entry->index, &qp->rx_info->entry);
969 STAILQ_REMOVE_HEAD(&qp->rx_post_q, entry);
975 * Re-initialize queue_entry for reuse; rx_handler takes
976 * ownership of the mbuf.
979 entry->len = transport_mtu;
980 entry->cb_data = qp->cb_data;
982 STAILQ_INSERT_TAIL(&qp->rx_pend_q, entry, entry);
984 mtx_unlock_spin(&qp->ntb_rx_q_lock);
986 CTR2(KTR_NTB, "RX: completing entry %p, mbuf %p", entry, m);
987 if (qp->rx_handler != NULL && qp->client_ready)
988 qp->rx_handler(qp, qp->cb_data, m, len);
992 mtx_lock_spin(&qp->ntb_rx_q_lock);
995 mtx_unlock_spin(&qp->ntb_rx_q_lock);
999 ntb_transport_doorbell_callback(void *data, uint32_t vector)
1001 struct ntb_transport_ctx *nt = data;
1002 struct ntb_transport_qp *qp;
1006 vec_mask = ntb_db_vector_mask(nt->dev, vector);
1007 vec_mask &= nt->qp_bitmap;
1008 if ((vec_mask & (vec_mask - 1)) != 0)
1009 vec_mask &= ntb_db_read(nt->dev);
1010 while (vec_mask != 0) {
1011 qp_num = ffsll(vec_mask) - 1;
1013 qp = &nt->qp_vec[qp_num];
1015 taskqueue_enqueue(qp->rxc_tq, &qp->rxc_db_work);
1017 vec_mask &= ~(1ull << qp_num);
1021 /* Link Event handler */
1023 ntb_transport_event_callback(void *data)
1025 struct ntb_transport_ctx *nt = data;
1027 if (ntb_link_is_up(nt->dev, NULL, NULL)) {
1028 ntb_printf(1, "HW link up\n");
1029 callout_reset(&nt->link_work, 0, ntb_transport_link_work, nt);
1031 ntb_printf(1, "HW link down\n");
1032 taskqueue_enqueue(taskqueue_swi, &nt->link_cleanup);
1038 ntb_transport_link_work(void *arg)
1040 struct ntb_transport_ctx *nt = arg;
1041 device_t dev = nt->dev;
1042 struct ntb_transport_qp *qp;
1043 uint64_t val64, size;
1048 /* send the local info, in the opposite order of the way we read it */
1049 for (i = 0; i < nt->mw_count; i++) {
1050 size = nt->mw_vec[i].phys_size;
1052 if (max_mw_size != 0 && size > max_mw_size)
1055 ntb_peer_spad_write(dev, NTBT_MW0_SZ_HIGH + (i * 2),
1057 ntb_peer_spad_write(dev, NTBT_MW0_SZ_LOW + (i * 2), size);
1059 ntb_peer_spad_write(dev, NTBT_NUM_MWS, nt->mw_count);
1060 ntb_peer_spad_write(dev, NTBT_NUM_QPS, nt->qp_count);
1061 ntb_peer_spad_write(dev, NTBT_QP_LINKS, 0);
1062 ntb_peer_spad_write(dev, NTBT_VERSION, NTB_TRANSPORT_VERSION);
1064 /* Query the remote side for its info */
1066 ntb_spad_read(dev, NTBT_VERSION, &val);
1067 if (val != NTB_TRANSPORT_VERSION)
1070 ntb_spad_read(dev, NTBT_NUM_QPS, &val);
1071 if (val != nt->qp_count)
1074 ntb_spad_read(dev, NTBT_NUM_MWS, &val);
1075 if (val != nt->mw_count)
1078 for (i = 0; i < nt->mw_count; i++) {
1079 ntb_spad_read(dev, NTBT_MW0_SZ_HIGH + (i * 2), &val);
1080 val64 = (uint64_t)val << 32;
1082 ntb_spad_read(dev, NTBT_MW0_SZ_LOW + (i * 2), &val);
1085 rc = ntb_set_mw(nt, i, val64);
1090 nt->link_is_up = true;
1091 ntb_printf(1, "transport link up\n");
1093 for (i = 0; i < nt->qp_count; i++) {
1094 qp = &nt->qp_vec[i];
1096 ntb_transport_setup_qp_mw(nt, i);
1098 if (qp->client_ready)
1099 callout_reset(&qp->link_work, 0, ntb_qp_link_work, qp);
1105 for (i = 0; i < nt->mw_count; i++)
1108 if (ntb_link_is_up(dev, NULL, NULL))
1109 callout_reset(&nt->link_work,
1110 NTB_LINK_DOWN_TIMEOUT * hz / 1000, ntb_transport_link_work, nt);
1114 ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, size_t size)
1116 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
1117 size_t xlat_size, buff_size;
1123 xlat_size = roundup(size, mw->xlat_align_size);
1124 buff_size = xlat_size;
1126 /* No need to re-setup */
1127 if (mw->xlat_size == xlat_size)
1130 if (mw->buff_size != 0)
1131 ntb_free_mw(nt, num_mw);
1133 /* Alloc memory for receiving data. Must be aligned */
1134 mw->xlat_size = xlat_size;
1135 mw->buff_size = buff_size;
1137 mw->virt_addr = contigmalloc(mw->buff_size, M_NTB_T, M_ZERO, 0,
1138 mw->addr_limit, mw->xlat_align, 0);
1139 if (mw->virt_addr == NULL) {
1140 ntb_printf(0, "Unable to allocate MW buffer of size %zu/%zu\n",
1141 mw->buff_size, mw->xlat_size);
1146 /* TODO: replace with bus_space_* functions */
1147 mw->dma_addr = vtophys(mw->virt_addr);
1150 * Ensure that the allocation from contigmalloc is aligned as
1151 * requested. XXX: This may not be needed -- brought in for parity
1152 * with the Linux driver.
1154 if (mw->dma_addr % mw->xlat_align != 0) {
1156 "DMA memory 0x%jx not aligned to BAR size 0x%zx\n",
1157 (uintmax_t)mw->dma_addr, size);
1158 ntb_free_mw(nt, num_mw);
1162 /* Notify HW the memory location of the receive buffer */
1163 rc = ntb_mw_set_trans(nt->dev, num_mw, mw->dma_addr, mw->xlat_size);
1165 ntb_printf(0, "Unable to set mw%d translation\n", num_mw);
1166 ntb_free_mw(nt, num_mw);
1174 ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw)
1176 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
1178 if (mw->virt_addr == NULL)
1181 ntb_mw_clear_trans(nt->dev, num_mw);
1182 contigfree(mw->virt_addr, mw->xlat_size, M_NTB_T);
1185 mw->virt_addr = NULL;
1189 ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, unsigned int qp_num)
1191 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num];
1192 struct ntb_transport_mw *mw;
1196 unsigned num_qps_mw, mw_num, mw_count;
1198 mw_count = nt->mw_count;
1199 mw_num = QP_TO_MW(nt, qp_num);
1200 mw = &nt->mw_vec[mw_num];
1202 if (mw->virt_addr == NULL)
1205 if (mw_num < nt->qp_count % mw_count)
1206 num_qps_mw = nt->qp_count / mw_count + 1;
1208 num_qps_mw = nt->qp_count / mw_count;
1210 rx_size = mw->xlat_size / num_qps_mw;
1211 qp->rx_buff = mw->virt_addr + rx_size * (qp_num / mw_count);
1212 rx_size -= sizeof(struct ntb_rx_info);
1214 qp->remote_rx_info = (void*)(qp->rx_buff + rx_size);
1216 /* Due to house-keeping, there must be at least 2 buffs */
1217 qp->rx_max_frame = qmin(transport_mtu, rx_size / 2);
1218 qp->rx_max_entry = rx_size / qp->rx_max_frame;
1221 qp->remote_rx_info->entry = qp->rx_max_entry - 1;
1223 /* Set up the hdr offsets with 0s */
1224 for (i = 0; i < qp->rx_max_entry; i++) {
1225 offset = (void *)(qp->rx_buff + qp->rx_max_frame * (i + 1) -
1226 sizeof(struct ntb_payload_header));
1227 memset(offset, 0, sizeof(struct ntb_payload_header));
1238 ntb_qp_link_work(void *arg)
1240 struct ntb_transport_qp *qp = arg;
1241 device_t dev = qp->dev;
1242 struct ntb_transport_ctx *nt = qp->transport;
1246 /* Report queues that are up on our side */
1247 for (i = 0, val = 0; i < nt->qp_count; i++) {
1248 if (nt->qp_vec[i].client_ready)
1251 ntb_peer_spad_write(dev, NTBT_QP_LINKS, val);
1253 /* See if the remote side is up */
1254 ntb_spad_read(dev, NTBT_QP_LINKS, &val);
1255 if ((val & (1ull << qp->qp_num)) != 0) {
1256 ntb_printf(2, "qp %d link up\n", qp->qp_num);
1257 qp->link_is_up = true;
1259 if (qp->event_handler != NULL)
1260 qp->event_handler(qp->cb_data, NTB_LINK_UP);
1262 ntb_db_clear_mask(dev, 1ull << qp->qp_num);
1263 } else if (nt->link_is_up)
1264 callout_reset(&qp->link_work,
1265 NTB_LINK_DOWN_TIMEOUT * hz / 1000, ntb_qp_link_work, qp);
1268 /* Link down event*/
1270 ntb_transport_link_cleanup(struct ntb_transport_ctx *nt)
1272 struct ntb_transport_qp *qp;
1275 /* Pass along the info to any clients */
1276 for (i = 0; i < nt->qp_count; i++) {
1277 if ((nt->qp_bitmap & (1 << i)) != 0) {
1278 qp = &nt->qp_vec[i];
1279 ntb_qp_link_cleanup(qp);
1280 callout_drain(&qp->link_work);
1284 if (!nt->link_is_up)
1285 callout_drain(&nt->link_work);
1288 * The scratchpad registers keep the values if the remote side
1289 * goes down, blast them now to give them a sane value the next
1290 * time they are accessed
1292 ntb_spad_clear(nt->dev);
1296 ntb_transport_link_cleanup_work(void *arg, int pending __unused)
1299 ntb_transport_link_cleanup(arg);
1303 ntb_qp_link_down(struct ntb_transport_qp *qp)
1306 ntb_qp_link_cleanup(qp);
1310 ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
1313 qp->link_is_up = false;
1314 ntb_db_set_mask(qp->dev, 1ull << qp->qp_num);
1316 qp->tx_index = qp->rx_index = 0;
1317 qp->tx_bytes = qp->rx_bytes = 0;
1318 qp->tx_pkts = qp->rx_pkts = 0;
1320 qp->rx_ring_empty = 0;
1321 qp->tx_ring_full = 0;
1323 qp->rx_err_no_buf = qp->tx_err_no_buf = 0;
1324 qp->rx_err_oflow = qp->rx_err_ver = 0;
1328 ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
1331 callout_drain(&qp->link_work);
1332 ntb_qp_link_down_reset(qp);
1334 if (qp->event_handler != NULL)
1335 qp->event_handler(qp->cb_data, NTB_LINK_DOWN);
1338 /* Link commanded down */
1340 * ntb_transport_link_down - Notify NTB transport to no longer enqueue data
1341 * @qp: NTB transport layer queue to be disabled
1343 * Notify NTB transport layer of client's desire to no longer receive data on
1344 * transport queue specified. It is the client's responsibility to ensure all
1345 * entries on queue are purged or otherwise handled appropriately.
1348 ntb_transport_link_down(struct ntb_transport_qp *qp)
1350 struct ntb_transport_ctx *nt = qp->transport;
1354 qp->client_ready = false;
1355 for (i = 0, val = 0; i < nt->qp_count; i++) {
1356 if (nt->qp_vec[i].client_ready)
1359 ntb_peer_spad_write(qp->dev, NTBT_QP_LINKS, val);
1362 ntb_send_link_down(qp);
1364 callout_drain(&qp->link_work);
1368 * ntb_transport_link_query - Query transport link state
1369 * @qp: NTB transport layer queue to be queried
1371 * Query connectivity to the remote system of the NTB transport queue
1373 * RETURNS: true for link up or false for link down
1376 ntb_transport_link_query(struct ntb_transport_qp *qp)
1379 return (qp->link_is_up);
1383 ntb_send_link_down(struct ntb_transport_qp *qp)
1385 struct ntb_queue_entry *entry;
1388 if (!qp->link_is_up)
1391 for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) {
1392 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
1395 pause("NTB Wait for link down", hz / 10);
1401 entry->cb_data = NULL;
1404 entry->flags = NTBT_LINK_DOWN_FLAG;
1406 mtx_lock(&qp->tx_lock);
1407 rc = ntb_process_tx(qp, entry);
1408 mtx_unlock(&qp->tx_lock);
1410 printf("ntb: Failed to send link down\n");
1412 ntb_qp_link_down_reset(qp);
1416 /* List Management */
1419 ntb_list_add(struct mtx *lock, struct ntb_queue_entry *entry,
1420 struct ntb_queue_list *list)
1423 mtx_lock_spin(lock);
1424 STAILQ_INSERT_TAIL(list, entry, entry);
1425 mtx_unlock_spin(lock);
1428 static struct ntb_queue_entry *
1429 ntb_list_rm(struct mtx *lock, struct ntb_queue_list *list)
1431 struct ntb_queue_entry *entry;
1433 mtx_lock_spin(lock);
1434 if (STAILQ_EMPTY(list)) {
1438 entry = STAILQ_FIRST(list);
1439 STAILQ_REMOVE_HEAD(list, entry);
1441 mtx_unlock_spin(lock);
1446 static struct ntb_queue_entry *
1447 ntb_list_mv(struct mtx *lock, struct ntb_queue_list *from,
1448 struct ntb_queue_list *to)
1450 struct ntb_queue_entry *entry;
1452 mtx_lock_spin(lock);
1453 if (STAILQ_EMPTY(from)) {
1457 entry = STAILQ_FIRST(from);
1458 STAILQ_REMOVE_HEAD(from, entry);
1459 STAILQ_INSERT_TAIL(to, entry, entry);
1462 mtx_unlock_spin(lock);
1467 * ntb_transport_qp_num - Query the qp number
1468 * @qp: NTB transport layer queue to be queried
1470 * Query qp number of the NTB transport queue
1472 * RETURNS: a zero based number specifying the qp number
1474 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
1477 return (qp->qp_num);
1481 * ntb_transport_max_size - Query the max payload size of a qp
1482 * @qp: NTB transport layer queue to be queried
1484 * Query the maximum payload size permissible on the given qp
1486 * RETURNS: the max payload size of a qp
1489 ntb_transport_max_size(struct ntb_transport_qp *qp)
1492 return (qp->tx_max_frame - sizeof(struct ntb_payload_header));
1496 ntb_transport_tx_free_entry(struct ntb_transport_qp *qp)
1498 unsigned int head = qp->tx_index;
1499 unsigned int tail = qp->remote_rx_info->entry;
1501 return (tail >= head ? tail - head : qp->tx_max_entry + tail - head);
1504 static device_method_t ntb_transport_methods[] = {
1505 /* Device interface */
1506 DEVMETHOD(device_probe, ntb_transport_probe),
1507 DEVMETHOD(device_attach, ntb_transport_attach),
1508 DEVMETHOD(device_detach, ntb_transport_detach),
1512 devclass_t ntb_transport_devclass;
1513 static DEFINE_CLASS_0(ntb_transport, ntb_transport_driver,
1514 ntb_transport_methods, sizeof(struct ntb_transport_ctx));
1515 DRIVER_MODULE(ntb_transport, ntb_hw, ntb_transport_driver,
1516 ntb_transport_devclass, NULL, NULL);
1517 MODULE_DEPEND(ntb_transport, ntb, 1, 1, 1);
1518 MODULE_VERSION(ntb_transport, 1);