2 * Copyright (c) 2016-2017 Alexander Motin <mav@FreeBSD.org>
3 * Copyright (C) 2013 Intel Corporation
4 * Copyright (C) 2015 EMC Corporation
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * The Non-Transparent Bridge (NTB) is a device that allows you to connect
31 * two or more systems using a PCI-e links, providing remote memory access.
33 * This module contains a transport for sending and receiving messages by
34 * writing to remote memory window(s) provided by underlying NTB device.
36 * NOTE: Much of the code in this module is shared with Linux. Any patches may
37 * be picked up and redistributed in Linux with a dual GPL/BSD license.
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
43 #include <sys/param.h>
44 #include <sys/kernel.h>
45 #include <sys/systm.h>
48 #include <sys/limits.h>
50 #include <sys/malloc.h>
52 #include <sys/module.h>
53 #include <sys/mutex.h>
54 #include <sys/queue.h>
55 #include <sys/sysctl.h>
56 #include <sys/taskqueue.h>
61 #include <machine/bus.h>
64 #include "ntb_transport.h"
66 #define KTR_NTB KTR_SPARE3
68 #define NTB_TRANSPORT_VERSION 4
70 static SYSCTL_NODE(_hw, OID_AUTO, ntb_transport,
71 CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
74 static unsigned g_ntb_transport_debug_level;
75 SYSCTL_UINT(_hw_ntb_transport, OID_AUTO, debug_level, CTLFLAG_RWTUN,
76 &g_ntb_transport_debug_level, 0,
77 "ntb_transport log level -- higher is more verbose");
78 #define ntb_printf(lvl, ...) do { \
79 if ((lvl) <= g_ntb_transport_debug_level) { \
80 printf(__VA_ARGS__); \
84 static unsigned transport_mtu = 0x10000;
86 static uint64_t max_mw_size = 256*1024*1024;
87 SYSCTL_UQUAD(_hw_ntb_transport, OID_AUTO, max_mw_size, CTLFLAG_RDTUN, &max_mw_size, 0,
88 "If enabled (non-zero), limit the size of large memory windows. "
89 "Both sides of the NTB MUST set the same value here.");
91 static unsigned enable_xeon_watchdog;
92 SYSCTL_UINT(_hw_ntb_transport, OID_AUTO, enable_xeon_watchdog, CTLFLAG_RDTUN,
93 &enable_xeon_watchdog, 0, "If non-zero, write a register every second to "
94 "keep a watchdog from tearing down the NTB link");
96 STAILQ_HEAD(ntb_queue_list, ntb_queue_entry);
98 typedef uint32_t ntb_q_idx_t;
100 struct ntb_queue_entry {
101 /* ntb_queue list reference */
102 STAILQ_ENTRY(ntb_queue_entry) entry;
104 /* info on data to be transferred */
110 struct ntb_transport_qp *qp;
111 struct ntb_payload_header *x_hdr;
119 struct ntb_transport_qp {
120 struct ntb_transport_ctx *transport;
126 volatile bool link_is_up;
127 uint8_t qp_num; /* Only 64 QPs are allowed. 0-63 */
129 struct ntb_rx_info *rx_info;
130 struct ntb_rx_info *remote_rx_info;
132 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data,
133 void *data, int len);
134 struct ntb_queue_list tx_free_q;
135 struct mtx ntb_tx_free_q_lock;
137 bus_addr_t tx_mw_phys;
138 ntb_q_idx_t tx_index;
139 ntb_q_idx_t tx_max_entry;
140 uint64_t tx_max_frame;
142 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
143 void *data, int len);
144 struct ntb_queue_list rx_post_q;
145 struct ntb_queue_list rx_pend_q;
146 /* ntb_rx_q_lock: synchronize access to rx_XXXX_q */
147 struct mtx ntb_rx_q_lock;
148 struct task rxc_db_work;
149 struct taskqueue *rxc_tq;
151 ntb_q_idx_t rx_index;
152 ntb_q_idx_t rx_max_entry;
153 uint64_t rx_max_frame;
155 void (*event_handler)(void *data, enum ntb_link_event status);
156 struct callout link_work;
157 struct callout rx_full;
159 uint64_t last_rx_no_buf;
164 uint64_t rx_ring_empty;
165 uint64_t rx_err_no_buf;
166 uint64_t rx_err_oflow;
170 uint64_t tx_ring_full;
171 uint64_t tx_err_no_buf;
176 struct ntb_transport_mw {
177 vm_paddr_t phys_addr;
180 size_t xlat_align_size;
181 bus_addr_t addr_limit;
182 /* Tx buff is vbase / phys_addr / tx_size */
185 /* Rx buff is virt_addr / dma_addr / rx_size */
186 bus_dma_tag_t dma_tag;
187 bus_dmamap_t dma_map;
191 /* rx_size increased to size alignment requirements of the hardware. */
195 struct ntb_transport_child {
200 struct ntb_transport_child *next;
203 struct ntb_transport_ctx {
205 struct ntb_transport_child *child;
206 struct ntb_transport_mw *mw_vec;
207 struct ntb_transport_qp *qp_vec;
212 volatile bool link_is_up;
213 enum ntb_speed link_speed;
214 enum ntb_width link_width;
215 struct callout link_work;
216 struct callout link_watchdog;
217 struct task link_cleanup;
221 NTBT_DESC_DONE_FLAG = 1 << 0,
222 NTBT_LINK_DOWN_FLAG = 1 << 1,
225 struct ntb_payload_header {
233 * The order of this enum is part of the remote protocol. Do not
234 * reorder without bumping protocol version (and it's probably best
235 * to keep the protocol in lock-step with the Linux NTB driver.
242 * N.B.: transport_link_work assumes MW1 enums = MW0 + 2.
250 * Some NTB-using hardware have a watchdog to work around NTB hangs; if
251 * a register or doorbell isn't written every few seconds, the link is
252 * torn down. Write an otherwise unused register every few seconds to
253 * work around this watchdog.
255 NTBT_WATCHDOG_SPAD = 15
259 * Compart version of sratchpad protocol, using twice less registers.
262 NTBTC_PARAMS = 0, /* NUM_QPS << 24 + NUM_MWS << 16 + VERSION */
263 NTBTC_QP_LINKS, /* QP links status */
264 NTBTC_MW0_SZ, /* MW size limited to 32 bits. */
267 #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count)
268 #define NTB_QP_DEF_NUM_ENTRIES 100
269 #define NTB_LINK_DOWN_TIMEOUT 100
271 static int ntb_transport_probe(device_t dev);
272 static int ntb_transport_attach(device_t dev);
273 static int ntb_transport_detach(device_t dev);
274 static void ntb_transport_init_queue(struct ntb_transport_ctx *nt,
275 unsigned int qp_num);
276 static int ntb_process_tx(struct ntb_transport_qp *qp,
277 struct ntb_queue_entry *entry);
278 static void ntb_transport_rxc_db(void *arg, int pending);
279 static int ntb_process_rxc(struct ntb_transport_qp *qp);
280 static void ntb_memcpy_rx(struct ntb_transport_qp *qp,
281 struct ntb_queue_entry *entry, void *offset);
282 static inline void ntb_rx_copy_callback(struct ntb_transport_qp *qp,
284 static void ntb_complete_rxc(struct ntb_transport_qp *qp);
285 static void ntb_transport_doorbell_callback(void *data, uint32_t vector);
286 static void ntb_transport_event_callback(void *data);
287 static void ntb_transport_link_work(void *arg);
288 static int ntb_set_mw(struct ntb_transport_ctx *, int num_mw, size_t size);
289 static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw);
290 static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
291 unsigned int qp_num);
292 static void ntb_qp_link_work(void *arg);
293 static void ntb_transport_link_cleanup(struct ntb_transport_ctx *nt);
294 static void ntb_transport_link_cleanup_work(void *, int);
295 static void ntb_qp_link_down(struct ntb_transport_qp *qp);
296 static void ntb_qp_link_down_reset(struct ntb_transport_qp *qp);
297 static void ntb_qp_link_cleanup(struct ntb_transport_qp *qp);
298 static void ntb_send_link_down(struct ntb_transport_qp *qp);
299 static void ntb_list_add(struct mtx *lock, struct ntb_queue_entry *entry,
300 struct ntb_queue_list *list);
301 static struct ntb_queue_entry *ntb_list_rm(struct mtx *lock,
302 struct ntb_queue_list *list);
303 static struct ntb_queue_entry *ntb_list_mv(struct mtx *lock,
304 struct ntb_queue_list *from, struct ntb_queue_list *to);
305 static void xeon_link_watchdog_hb(void *);
307 static const struct ntb_ctx_ops ntb_transport_ops = {
308 .link_event = ntb_transport_event_callback,
309 .db_event = ntb_transport_doorbell_callback,
312 MALLOC_DEFINE(M_NTB_T, "ntb_transport", "ntb transport driver");
315 iowrite32(uint32_t val, void *addr)
318 bus_space_write_4(X86_BUS_SPACE_MEM, 0/* HACK */, (uintptr_t)addr,
322 /* Transport Init and teardown */
325 xeon_link_watchdog_hb(void *arg)
327 struct ntb_transport_ctx *nt;
330 ntb_spad_write(nt->dev, NTBT_WATCHDOG_SPAD, 0);
331 callout_reset(&nt->link_watchdog, 1 * hz, xeon_link_watchdog_hb, nt);
335 ntb_transport_probe(device_t dev)
338 device_set_desc(dev, "NTB Transport");
343 ntb_transport_attach(device_t dev)
345 struct ntb_transport_ctx *nt = device_get_softc(dev);
346 struct ntb_transport_child **cpp = &nt->child;
347 struct ntb_transport_child *nc;
348 struct ntb_transport_mw *mw;
350 int rc, i, db_count, spad_count, qp, qpu, qpo, qpt;
353 char *n, *np, *c, *name;
356 nt->mw_count = ntb_mw_count(dev);
357 spad_count = ntb_spad_count(dev);
358 db_bitmap = ntb_db_valid_mask(dev);
359 db_count = flsll(db_bitmap);
360 KASSERT(db_bitmap == (1 << db_count) - 1,
361 ("Doorbells are not sequential (%jx).\n", db_bitmap));
363 if (nt->mw_count == 0) {
364 device_printf(dev, "At least 1 memory window required.\n");
367 nt->compact = (spad_count < 4 + 2 * nt->mw_count);
368 snprintf(buf, sizeof(buf), "hint.%s.%d.compact", device_get_name(dev),
369 device_get_unit(dev));
370 TUNABLE_INT_FETCH(buf, &nt->compact);
372 if (spad_count < 3) {
373 device_printf(dev, "At least 3 scratchpads required.\n");
376 if (spad_count < 2 + nt->mw_count) {
377 nt->mw_count = spad_count - 2;
378 device_printf(dev, "Scratchpads enough only for %d "
379 "memory windows.\n", nt->mw_count);
382 if (spad_count < 6) {
383 device_printf(dev, "At least 6 scratchpads required.\n");
386 if (spad_count < 4 + 2 * nt->mw_count) {
387 nt->mw_count = (spad_count - 4) / 2;
388 device_printf(dev, "Scratchpads enough only for %d "
389 "memory windows.\n", nt->mw_count);
392 if (db_bitmap == 0) {
393 device_printf(dev, "At least one doorbell required.\n");
397 nt->mw_vec = malloc(nt->mw_count * sizeof(*nt->mw_vec), M_NTB_T,
399 for (i = 0; i < nt->mw_count; i++) {
402 rc = ntb_mw_get_range(dev, i, &mw->phys_addr, &mw->vbase,
403 &mw->phys_size, &mw->xlat_align, &mw->xlat_align_size,
408 mw->tx_size = mw->phys_size;
409 if (max_mw_size != 0 && mw->tx_size > max_mw_size) {
410 device_printf(dev, "Memory window %d limited from "
411 "%ju to %ju\n", i, (uintmax_t)mw->tx_size,
413 mw->tx_size = max_mw_size;
415 if (nt->compact && mw->tx_size > UINT32_MAX) {
416 device_printf(dev, "Memory window %d is too big "
417 "(%ju)\n", i, (uintmax_t)mw->tx_size);
424 mw->virt_addr = NULL;
427 rc = ntb_mw_set_wc(dev, i, VM_MEMATTR_WRITE_COMBINING);
429 ntb_printf(0, "Unable to set mw%d caching\n", i);
432 * Try to preallocate receive memory early, since there may
433 * be not enough contiguous memory later. It is quite likely
434 * that NTB windows are symmetric and this allocation remain,
435 * but even if not, we will just reallocate it later.
437 ntb_set_mw(nt, i, mw->tx_size);
441 qpo = imin(db_count, nt->mw_count);
444 snprintf(buf, sizeof(buf), "hint.%s.%d.config", device_get_name(dev),
445 device_get_unit(dev));
446 TUNABLE_STR_FETCH(buf, cfg, sizeof(cfg));
449 while ((c = strsep(&n, ",")) != NULL) {
451 name = strsep(&np, ":");
452 if (name != NULL && name[0] == 0)
454 qp = (np && np[0] != 0) ? strtol(np, NULL, 10) : qpo - qpu;
458 if (qp > qpt - qpu) {
459 device_printf(dev, "Not enough resources for config\n");
463 nc = malloc(sizeof(*nc), M_DEVBUF, M_WAITOK | M_ZERO);
467 nc->dev = device_add_child(dev, name, -1);
468 if (nc->dev == NULL) {
469 device_printf(dev, "Can not add child.\n");
472 device_set_ivars(nc->dev, nc);
477 device_printf(dev, "%d \"%s\": queues %d",
480 printf("-%d", qpu + qp - 1);
489 nt->qp_vec = malloc(nt->qp_count * sizeof(*nt->qp_vec), M_NTB_T,
492 for (i = 0; i < nt->qp_count; i++)
493 ntb_transport_init_queue(nt, i);
495 callout_init(&nt->link_work, 0);
496 callout_init(&nt->link_watchdog, 0);
497 TASK_INIT(&nt->link_cleanup, 0, ntb_transport_link_cleanup_work, nt);
498 nt->link_is_up = false;
500 rc = ntb_set_ctx(dev, nt, &ntb_transport_ops);
504 ntb_link_enable(dev, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
506 for (i = 0; i < nt->mw_count; i++) {
508 rc = ntb_mw_set_trans(nt->dev, i, mw->dma_addr, mw->buff_size);
510 ntb_printf(0, "load time mw%d xlat fails, rc %d\n", i, rc);
513 if (enable_xeon_watchdog != 0)
514 callout_reset(&nt->link_watchdog, 0, xeon_link_watchdog_hb, nt);
516 bus_generic_attach(dev);
520 free(nt->qp_vec, M_NTB_T);
521 free(nt->mw_vec, M_NTB_T);
526 ntb_transport_detach(device_t dev)
528 struct ntb_transport_ctx *nt = device_get_softc(dev);
529 struct ntb_transport_child **cpp = &nt->child;
530 struct ntb_transport_child *nc;
533 while ((nc = *cpp) != NULL) {
535 error = device_delete_child(dev, nc->dev);
540 KASSERT(nt->qp_bitmap == 0,
541 ("Some queues not freed on detach (%jx)", nt->qp_bitmap));
543 ntb_transport_link_cleanup(nt);
544 taskqueue_drain(taskqueue_swi, &nt->link_cleanup);
545 callout_drain(&nt->link_work);
546 callout_drain(&nt->link_watchdog);
548 ntb_link_disable(dev);
551 for (i = 0; i < nt->mw_count; i++)
554 free(nt->qp_vec, M_NTB_T);
555 free(nt->mw_vec, M_NTB_T);
560 ntb_transport_print_child(device_t dev, device_t child)
562 struct ntb_transport_child *nc = device_get_ivars(child);
565 retval = bus_print_child_header(dev, child);
567 printf(" queue %d", nc->qpoff);
569 printf("-%d", nc->qpoff + nc->qpcnt - 1);
571 retval += printf(" at consumer %d", nc->consumer);
572 retval += bus_print_child_domain(dev, child);
573 retval += bus_print_child_footer(dev, child);
579 ntb_transport_child_location_str(device_t dev, device_t child, char *buf,
582 struct ntb_transport_child *nc = device_get_ivars(child);
584 snprintf(buf, buflen, "consumer=%d", nc->consumer);
589 ntb_transport_queue_count(device_t dev)
591 struct ntb_transport_child *nc = device_get_ivars(dev);
597 ntb_transport_init_queue(struct ntb_transport_ctx *nt, unsigned int qp_num)
599 struct ntb_transport_mw *mw;
600 struct ntb_transport_qp *qp;
604 unsigned num_qps_mw, mw_num, mw_count;
606 mw_count = nt->mw_count;
607 mw_num = QP_TO_MW(nt, qp_num);
608 mw = &nt->mw_vec[mw_num];
610 qp = &nt->qp_vec[qp_num];
614 qp->client_ready = false;
615 qp->event_handler = NULL;
616 ntb_qp_link_down_reset(qp);
618 if (mw_num < nt->qp_count % mw_count)
619 num_qps_mw = nt->qp_count / mw_count + 1;
621 num_qps_mw = nt->qp_count / mw_count;
623 mw_base = mw->phys_addr;
625 tx_size = mw->tx_size / num_qps_mw;
626 qp_offset = tx_size * (qp_num / mw_count);
628 qp->tx_mw = mw->vbase + qp_offset;
629 KASSERT(qp->tx_mw != NULL, ("uh oh?"));
631 /* XXX Assumes that a vm_paddr_t is equivalent to bus_addr_t */
632 qp->tx_mw_phys = mw_base + qp_offset;
633 KASSERT(qp->tx_mw_phys != 0, ("uh oh?"));
635 tx_size -= sizeof(struct ntb_rx_info);
636 qp->rx_info = (void *)(qp->tx_mw + tx_size);
638 /* Due to house-keeping, there must be at least 2 buffs */
639 qp->tx_max_frame = qmin(transport_mtu, tx_size / 2);
640 qp->tx_max_entry = tx_size / qp->tx_max_frame;
642 callout_init(&qp->link_work, 0);
643 callout_init(&qp->rx_full, 1);
645 mtx_init(&qp->ntb_rx_q_lock, "ntb rx q", NULL, MTX_SPIN);
646 mtx_init(&qp->ntb_tx_free_q_lock, "ntb tx free q", NULL, MTX_SPIN);
647 mtx_init(&qp->tx_lock, "ntb transport tx", NULL, MTX_DEF);
648 TASK_INIT(&qp->rxc_db_work, 0, ntb_transport_rxc_db, qp);
649 qp->rxc_tq = taskqueue_create("ntbt_rx", M_WAITOK,
650 taskqueue_thread_enqueue, &qp->rxc_tq);
651 taskqueue_start_threads(&qp->rxc_tq, 1, PI_NET, "%s rx%d",
652 device_get_nameunit(nt->dev), qp_num);
654 STAILQ_INIT(&qp->rx_post_q);
655 STAILQ_INIT(&qp->rx_pend_q);
656 STAILQ_INIT(&qp->tx_free_q);
660 ntb_transport_free_queue(struct ntb_transport_qp *qp)
662 struct ntb_transport_ctx *nt = qp->transport;
663 struct ntb_queue_entry *entry;
665 callout_drain(&qp->link_work);
667 ntb_db_set_mask(qp->dev, 1ull << qp->qp_num);
668 taskqueue_drain_all(qp->rxc_tq);
669 taskqueue_free(qp->rxc_tq);
672 qp->rx_handler = NULL;
673 qp->tx_handler = NULL;
674 qp->event_handler = NULL;
676 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q)))
677 free(entry, M_NTB_T);
679 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q)))
680 free(entry, M_NTB_T);
682 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
683 free(entry, M_NTB_T);
685 nt->qp_bitmap &= ~(1 << qp->qp_num);
689 * ntb_transport_create_queue - Create a new NTB transport layer queue
690 * @rx_handler: receive callback function
691 * @tx_handler: transmit callback function
692 * @event_handler: event callback function
694 * Create a new NTB transport layer queue and provide the queue with a callback
695 * routine for both transmit and receive. The receive callback routine will be
696 * used to pass up data when the transport has received it on the queue. The
697 * transmit callback routine will be called when the transport has completed the
698 * transmission of the data on the queue and the data is ready to be freed.
700 * RETURNS: pointer to newly created ntb_queue, NULL on error.
702 struct ntb_transport_qp *
703 ntb_transport_create_queue(device_t dev, int q,
704 const struct ntb_queue_handlers *handlers, void *data)
706 struct ntb_transport_child *nc = device_get_ivars(dev);
707 struct ntb_transport_ctx *nt = device_get_softc(device_get_parent(dev));
708 struct ntb_queue_entry *entry;
709 struct ntb_transport_qp *qp;
712 if (q < 0 || q >= nc->qpcnt)
715 qp = &nt->qp_vec[nc->qpoff + q];
716 nt->qp_bitmap |= (1 << qp->qp_num);
718 qp->rx_handler = handlers->rx_handler;
719 qp->tx_handler = handlers->tx_handler;
720 qp->event_handler = handlers->event_handler;
722 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
723 entry = malloc(sizeof(*entry), M_NTB_T, M_WAITOK | M_ZERO);
724 entry->cb_data = data;
726 entry->len = transport_mtu;
728 ntb_list_add(&qp->ntb_rx_q_lock, entry, &qp->rx_pend_q);
731 for (i = 0; i < NTB_QP_DEF_NUM_ENTRIES; i++) {
732 entry = malloc(sizeof(*entry), M_NTB_T, M_WAITOK | M_ZERO);
734 ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q);
737 ntb_db_clear(dev, 1ull << qp->qp_num);
742 * ntb_transport_link_up - Notify NTB transport of client readiness to use queue
743 * @qp: NTB transport layer queue to be enabled
745 * Notify NTB transport layer of client readiness to use queue
748 ntb_transport_link_up(struct ntb_transport_qp *qp)
750 struct ntb_transport_ctx *nt = qp->transport;
752 qp->client_ready = true;
754 ntb_printf(2, "qp %d client ready\n", qp->qp_num);
757 callout_reset(&qp->link_work, 0, ntb_qp_link_work, qp);
765 * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry
766 * @qp: NTB transport layer queue the entry is to be enqueued on
767 * @cb: per buffer pointer for callback function to use
768 * @data: pointer to data buffer that will be sent
769 * @len: length of the data buffer
771 * Enqueue a new transmit buffer onto the transport queue from which a NTB
772 * payload will be transmitted. This assumes that a lock is being held to
773 * serialize access to the qp.
775 * RETURNS: An appropriate ERRNO error value on error, or zero for success.
778 ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
781 struct ntb_queue_entry *entry;
784 if (!qp->link_is_up || len == 0) {
785 CTR0(KTR_NTB, "TX: link not up");
789 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
791 CTR0(KTR_NTB, "TX: could not get entry from tx_free_q");
795 CTR1(KTR_NTB, "TX: got entry %p from tx_free_q", entry);
802 mtx_lock(&qp->tx_lock);
803 rc = ntb_process_tx(qp, entry);
804 mtx_unlock(&qp->tx_lock);
806 ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q);
808 "TX: process_tx failed. Returning entry %p to tx_free_q",
815 ntb_tx_copy_callback(void *data)
817 struct ntb_queue_entry *entry = data;
818 struct ntb_transport_qp *qp = entry->qp;
819 struct ntb_payload_header *hdr = entry->x_hdr;
821 iowrite32(entry->flags | NTBT_DESC_DONE_FLAG, &hdr->flags);
822 CTR1(KTR_NTB, "TX: hdr %p set DESC_DONE", hdr);
824 ntb_peer_db_set(qp->dev, 1ull << qp->qp_num);
827 * The entry length can only be zero if the packet is intended to be a
828 * "link down" or similar. Since no payload is being sent in these
829 * cases, there is nothing to add to the completion queue.
831 if (entry->len > 0) {
832 qp->tx_bytes += entry->len;
835 qp->tx_handler(qp, qp->cb_data, entry->buf,
843 "TX: entry %p sent. hdr->ver = %u, hdr->flags = 0x%x, Returning "
844 "to tx_free_q", entry, hdr->ver, hdr->flags);
845 ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q);
849 ntb_memcpy_tx(struct ntb_queue_entry *entry, void *offset)
852 CTR2(KTR_NTB, "TX: copying %d bytes to offset %p", entry->len, offset);
853 if (entry->buf != NULL) {
854 m_copydata((struct mbuf *)entry->buf, 0, entry->len, offset);
857 * Ensure that the data is fully copied before setting the
863 ntb_tx_copy_callback(entry);
867 ntb_async_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry)
869 struct ntb_payload_header *hdr;
872 offset = qp->tx_mw + qp->tx_max_frame * qp->tx_index;
873 hdr = (struct ntb_payload_header *)((char *)offset + qp->tx_max_frame -
874 sizeof(struct ntb_payload_header));
877 iowrite32(entry->len, &hdr->len);
878 iowrite32(qp->tx_pkts, &hdr->ver);
880 ntb_memcpy_tx(entry, offset);
884 ntb_process_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry)
888 "TX: process_tx: tx_pkts=%lu, tx_index=%u, remote entry=%u",
889 qp->tx_pkts, qp->tx_index, qp->remote_rx_info->entry);
890 if (qp->tx_index == qp->remote_rx_info->entry) {
891 CTR0(KTR_NTB, "TX: ring full");
896 if (entry->len > qp->tx_max_frame - sizeof(struct ntb_payload_header)) {
897 if (qp->tx_handler != NULL)
898 qp->tx_handler(qp, qp->cb_data, entry->buf,
904 ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q);
906 "TX: frame too big. returning entry %p to tx_free_q",
910 CTR2(KTR_NTB, "TX: copying entry %p to index %u", entry, qp->tx_index);
911 ntb_async_tx(qp, entry);
914 qp->tx_index %= qp->tx_max_entry;
923 ntb_transport_rxc_db(void *arg, int pending __unused)
925 struct ntb_transport_qp *qp = arg;
926 uint64_t qp_mask = 1ull << qp->qp_num;
929 CTR0(KTR_NTB, "RX: transport_rx");
931 while ((rc = ntb_process_rxc(qp)) == 0)
933 CTR1(KTR_NTB, "RX: process_rxc returned %d", rc);
935 if ((ntb_db_read(qp->dev) & qp_mask) != 0) {
936 /* If db is set, clear it and check queue once more. */
937 ntb_db_clear(qp->dev, qp_mask);
941 ntb_db_clear_mask(qp->dev, qp_mask);
945 ntb_process_rxc(struct ntb_transport_qp *qp)
947 struct ntb_payload_header *hdr;
948 struct ntb_queue_entry *entry;
951 offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
952 hdr = (void *)(offset + qp->rx_max_frame -
953 sizeof(struct ntb_payload_header));
955 CTR1(KTR_NTB, "RX: process_rxc rx_index = %u", qp->rx_index);
956 if ((hdr->flags & NTBT_DESC_DONE_FLAG) == 0) {
957 CTR0(KTR_NTB, "RX: hdr not done");
962 if ((hdr->flags & NTBT_LINK_DOWN_FLAG) != 0) {
963 CTR0(KTR_NTB, "RX: link down");
964 ntb_qp_link_down(qp);
969 if (hdr->ver != (uint32_t)qp->rx_pkts) {
970 CTR2(KTR_NTB,"RX: ver != rx_pkts (%x != %lx). "
971 "Returning entry to rx_pend_q", hdr->ver, qp->rx_pkts);
976 entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q);
979 CTR0(KTR_NTB, "RX: No entries in rx_pend_q");
982 callout_stop(&qp->rx_full);
983 CTR1(KTR_NTB, "RX: rx entry %p from rx_pend_q", entry);
986 entry->index = qp->rx_index;
988 if (hdr->len > entry->len) {
989 CTR2(KTR_NTB, "RX: len too long. Wanted %ju got %ju",
990 (uintmax_t)hdr->len, (uintmax_t)entry->len);
994 entry->flags |= NTBT_DESC_DONE_FLAG;
996 ntb_complete_rxc(qp);
998 qp->rx_bytes += hdr->len;
1001 CTR1(KTR_NTB, "RX: received %ld rx_pkts", qp->rx_pkts);
1003 entry->len = hdr->len;
1005 ntb_memcpy_rx(qp, entry, offset);
1009 qp->rx_index %= qp->rx_max_entry;
1014 ntb_memcpy_rx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry,
1017 struct ifnet *ifp = entry->cb_data;
1018 unsigned int len = entry->len;
1020 CTR2(KTR_NTB, "RX: copying %d bytes from offset %p", len, offset);
1022 entry->buf = (void *)m_devget(offset, len, 0, ifp, NULL);
1023 if (entry->buf == NULL)
1024 entry->len = -ENOMEM;
1026 /* Ensure that the data is globally visible before clearing the flag */
1029 CTR2(KTR_NTB, "RX: copied entry %p to mbuf %p.", entry, entry->buf);
1030 ntb_rx_copy_callback(qp, entry);
1034 ntb_rx_copy_callback(struct ntb_transport_qp *qp, void *data)
1036 struct ntb_queue_entry *entry;
1039 entry->flags |= NTBT_DESC_DONE_FLAG;
1040 ntb_complete_rxc(qp);
1044 ntb_complete_rxc(struct ntb_transport_qp *qp)
1046 struct ntb_queue_entry *entry;
1050 CTR0(KTR_NTB, "RX: rx_completion_task");
1052 mtx_lock_spin(&qp->ntb_rx_q_lock);
1054 while (!STAILQ_EMPTY(&qp->rx_post_q)) {
1055 entry = STAILQ_FIRST(&qp->rx_post_q);
1056 if ((entry->flags & NTBT_DESC_DONE_FLAG) == 0)
1059 entry->x_hdr->flags = 0;
1060 iowrite32(entry->index, &qp->rx_info->entry);
1062 STAILQ_REMOVE_HEAD(&qp->rx_post_q, entry);
1068 * Re-initialize queue_entry for reuse; rx_handler takes
1069 * ownership of the mbuf.
1072 entry->len = transport_mtu;
1073 entry->cb_data = qp->cb_data;
1075 STAILQ_INSERT_TAIL(&qp->rx_pend_q, entry, entry);
1077 mtx_unlock_spin(&qp->ntb_rx_q_lock);
1079 CTR2(KTR_NTB, "RX: completing entry %p, mbuf %p", entry, m);
1080 if (qp->rx_handler != NULL && qp->client_ready)
1081 qp->rx_handler(qp, qp->cb_data, m, len);
1085 mtx_lock_spin(&qp->ntb_rx_q_lock);
1088 mtx_unlock_spin(&qp->ntb_rx_q_lock);
1092 ntb_transport_doorbell_callback(void *data, uint32_t vector)
1094 struct ntb_transport_ctx *nt = data;
1095 struct ntb_transport_qp *qp;
1099 vec_mask = ntb_db_vector_mask(nt->dev, vector);
1100 vec_mask &= nt->qp_bitmap;
1101 if ((vec_mask & (vec_mask - 1)) != 0)
1102 vec_mask &= ntb_db_read(nt->dev);
1103 if (vec_mask != 0) {
1104 ntb_db_set_mask(nt->dev, vec_mask);
1105 ntb_db_clear(nt->dev, vec_mask);
1107 while (vec_mask != 0) {
1108 qp_num = ffsll(vec_mask) - 1;
1110 qp = &nt->qp_vec[qp_num];
1112 taskqueue_enqueue(qp->rxc_tq, &qp->rxc_db_work);
1114 vec_mask &= ~(1ull << qp_num);
1118 /* Link Event handler */
1120 ntb_transport_event_callback(void *data)
1122 struct ntb_transport_ctx *nt = data;
1124 if (ntb_link_is_up(nt->dev, &nt->link_speed, &nt->link_width)) {
1125 ntb_printf(1, "HW link up\n");
1126 callout_reset(&nt->link_work, 0, ntb_transport_link_work, nt);
1128 ntb_printf(1, "HW link down\n");
1129 taskqueue_enqueue(taskqueue_swi, &nt->link_cleanup);
1135 ntb_transport_link_work(void *arg)
1137 struct ntb_transport_ctx *nt = arg;
1138 struct ntb_transport_mw *mw;
1139 device_t dev = nt->dev;
1140 struct ntb_transport_qp *qp;
1141 uint64_t val64, size;
1146 /* send the local info, in the opposite order of the way we read it */
1148 for (i = 0; i < nt->mw_count; i++) {
1149 size = nt->mw_vec[i].tx_size;
1150 KASSERT(size <= UINT32_MAX, ("size too big (%jx)", size));
1151 ntb_peer_spad_write(dev, NTBTC_MW0_SZ + i, size);
1153 ntb_peer_spad_write(dev, NTBTC_QP_LINKS, 0);
1154 ntb_peer_spad_write(dev, NTBTC_PARAMS,
1155 (nt->qp_count << 24) | (nt->mw_count << 16) |
1156 NTB_TRANSPORT_VERSION);
1158 for (i = 0; i < nt->mw_count; i++) {
1159 size = nt->mw_vec[i].tx_size;
1160 ntb_peer_spad_write(dev, NTBT_MW0_SZ_HIGH + (i * 2),
1162 ntb_peer_spad_write(dev, NTBT_MW0_SZ_LOW + (i * 2), size);
1164 ntb_peer_spad_write(dev, NTBT_NUM_MWS, nt->mw_count);
1165 ntb_peer_spad_write(dev, NTBT_NUM_QPS, nt->qp_count);
1166 ntb_peer_spad_write(dev, NTBT_QP_LINKS, 0);
1167 ntb_peer_spad_write(dev, NTBT_VERSION, NTB_TRANSPORT_VERSION);
1170 /* Query the remote side for its info */
1173 ntb_spad_read(dev, NTBTC_PARAMS, &val);
1174 if (val != ((nt->qp_count << 24) | (nt->mw_count << 16) |
1175 NTB_TRANSPORT_VERSION))
1178 ntb_spad_read(dev, NTBT_VERSION, &val);
1179 if (val != NTB_TRANSPORT_VERSION)
1182 ntb_spad_read(dev, NTBT_NUM_QPS, &val);
1183 if (val != nt->qp_count)
1186 ntb_spad_read(dev, NTBT_NUM_MWS, &val);
1187 if (val != nt->mw_count)
1191 for (i = 0; i < nt->mw_count; i++) {
1193 ntb_spad_read(dev, NTBTC_MW0_SZ + i, &val);
1196 ntb_spad_read(dev, NTBT_MW0_SZ_HIGH + (i * 2), &val);
1197 val64 = (uint64_t)val << 32;
1199 ntb_spad_read(dev, NTBT_MW0_SZ_LOW + (i * 2), &val);
1203 mw = &nt->mw_vec[i];
1204 mw->rx_size = val64;
1205 val64 = roundup(val64, mw->xlat_align_size);
1206 if (mw->buff_size != val64) {
1208 rc = ntb_set_mw(nt, i, val64);
1210 ntb_printf(0, "link up set mw%d fails, rc %d\n",
1215 /* Notify HW the memory location of the receive buffer */
1216 rc = ntb_mw_set_trans(nt->dev, i, mw->dma_addr,
1219 ntb_printf(0, "link up mw%d xlat fails, rc %d\n",
1226 nt->link_is_up = true;
1227 ntb_printf(1, "transport link up\n");
1229 for (i = 0; i < nt->qp_count; i++) {
1230 qp = &nt->qp_vec[i];
1232 ntb_transport_setup_qp_mw(nt, i);
1234 if (qp->client_ready)
1235 callout_reset(&qp->link_work, 0, ntb_qp_link_work, qp);
1241 for (i = 0; i < nt->mw_count; i++)
1244 if (ntb_link_is_up(dev, &nt->link_speed, &nt->link_width))
1245 callout_reset(&nt->link_work,
1246 NTB_LINK_DOWN_TIMEOUT * hz / 1000, ntb_transport_link_work, nt);
1249 struct ntb_load_cb_args {
1255 ntb_load_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
1257 struct ntb_load_cb_args *cba = (struct ntb_load_cb_args *)xsc;
1259 if (!(cba->error = error))
1260 cba->addr = segs[0].ds_addr;
1264 ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, size_t size)
1266 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
1267 struct ntb_load_cb_args cba;
1273 buff_size = roundup(size, mw->xlat_align_size);
1275 /* No need to re-setup */
1276 if (mw->buff_size == buff_size)
1279 if (mw->buff_size != 0)
1280 ntb_free_mw(nt, num_mw);
1282 /* Alloc memory for receiving data. Must be aligned */
1283 mw->buff_size = buff_size;
1285 if (bus_dma_tag_create(bus_get_dma_tag(nt->dev), mw->xlat_align, 0,
1286 mw->addr_limit, BUS_SPACE_MAXADDR,
1287 NULL, NULL, mw->buff_size, 1, mw->buff_size,
1288 0, NULL, NULL, &mw->dma_tag)) {
1289 ntb_printf(0, "Unable to create MW tag of size %zu\n",
1294 if (bus_dmamem_alloc(mw->dma_tag, (void **)&mw->virt_addr,
1295 BUS_DMA_WAITOK | BUS_DMA_ZERO, &mw->dma_map)) {
1296 bus_dma_tag_destroy(mw->dma_tag);
1297 ntb_printf(0, "Unable to allocate MW buffer of size %zu\n",
1302 if (bus_dmamap_load(mw->dma_tag, mw->dma_map, mw->virt_addr,
1303 mw->buff_size, ntb_load_cb, &cba, BUS_DMA_NOWAIT) || cba.error) {
1304 bus_dmamem_free(mw->dma_tag, mw->virt_addr, mw->dma_map);
1305 bus_dma_tag_destroy(mw->dma_tag);
1306 ntb_printf(0, "Unable to load MW buffer of size %zu\n",
1311 mw->dma_addr = cba.addr;
1317 ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw)
1319 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
1321 if (mw->virt_addr == NULL)
1324 ntb_mw_clear_trans(nt->dev, num_mw);
1325 bus_dmamap_unload(mw->dma_tag, mw->dma_map);
1326 bus_dmamem_free(mw->dma_tag, mw->virt_addr, mw->dma_map);
1327 bus_dma_tag_destroy(mw->dma_tag);
1329 mw->virt_addr = NULL;
1333 ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, unsigned int qp_num)
1335 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num];
1336 struct ntb_transport_mw *mw;
1340 unsigned num_qps_mw, mw_num, mw_count;
1342 mw_count = nt->mw_count;
1343 mw_num = QP_TO_MW(nt, qp_num);
1344 mw = &nt->mw_vec[mw_num];
1346 if (mw->virt_addr == NULL)
1349 if (mw_num < nt->qp_count % mw_count)
1350 num_qps_mw = nt->qp_count / mw_count + 1;
1352 num_qps_mw = nt->qp_count / mw_count;
1354 rx_size = mw->rx_size / num_qps_mw;
1355 qp->rx_buff = mw->virt_addr + rx_size * (qp_num / mw_count);
1356 rx_size -= sizeof(struct ntb_rx_info);
1358 qp->remote_rx_info = (void*)(qp->rx_buff + rx_size);
1360 /* Due to house-keeping, there must be at least 2 buffs */
1361 qp->rx_max_frame = qmin(transport_mtu, rx_size / 2);
1362 qp->rx_max_entry = rx_size / qp->rx_max_frame;
1365 qp->remote_rx_info->entry = qp->rx_max_entry - 1;
1367 /* Set up the hdr offsets with 0s */
1368 for (i = 0; i < qp->rx_max_entry; i++) {
1369 offset = (void *)(qp->rx_buff + qp->rx_max_frame * (i + 1) -
1370 sizeof(struct ntb_payload_header));
1371 memset(offset, 0, sizeof(struct ntb_payload_header));
1382 ntb_qp_link_work(void *arg)
1384 struct ntb_transport_qp *qp = arg;
1385 device_t dev = qp->dev;
1386 struct ntb_transport_ctx *nt = qp->transport;
1390 /* Report queues that are up on our side */
1391 for (i = 0, val = 0; i < nt->qp_count; i++) {
1392 if (nt->qp_vec[i].client_ready)
1395 ntb_peer_spad_write(dev, NTBT_QP_LINKS, val);
1397 /* See if the remote side is up */
1398 ntb_spad_read(dev, NTBT_QP_LINKS, &val);
1399 if ((val & (1ull << qp->qp_num)) != 0) {
1400 ntb_printf(2, "qp %d link up\n", qp->qp_num);
1401 qp->link_is_up = true;
1403 if (qp->event_handler != NULL)
1404 qp->event_handler(qp->cb_data, NTB_LINK_UP);
1406 ntb_db_clear_mask(dev, 1ull << qp->qp_num);
1407 } else if (nt->link_is_up)
1408 callout_reset(&qp->link_work,
1409 NTB_LINK_DOWN_TIMEOUT * hz / 1000, ntb_qp_link_work, qp);
1412 /* Link down event*/
1414 ntb_transport_link_cleanup(struct ntb_transport_ctx *nt)
1416 struct ntb_transport_qp *qp;
1419 callout_drain(&nt->link_work);
1422 /* Pass along the info to any clients */
1423 for (i = 0; i < nt->qp_count; i++) {
1424 if ((nt->qp_bitmap & (1 << i)) != 0) {
1425 qp = &nt->qp_vec[i];
1426 ntb_qp_link_cleanup(qp);
1427 callout_drain(&qp->link_work);
1432 * The scratchpad registers keep the values if the remote side
1433 * goes down, blast them now to give them a sane value the next
1434 * time they are accessed
1436 ntb_spad_clear(nt->dev);
1440 ntb_transport_link_cleanup_work(void *arg, int pending __unused)
1443 ntb_transport_link_cleanup(arg);
1447 ntb_qp_link_down(struct ntb_transport_qp *qp)
1450 ntb_qp_link_cleanup(qp);
1454 ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
1457 qp->link_is_up = false;
1458 ntb_db_set_mask(qp->dev, 1ull << qp->qp_num);
1460 qp->tx_index = qp->rx_index = 0;
1461 qp->tx_bytes = qp->rx_bytes = 0;
1462 qp->tx_pkts = qp->rx_pkts = 0;
1464 qp->rx_ring_empty = 0;
1465 qp->tx_ring_full = 0;
1467 qp->rx_err_no_buf = qp->tx_err_no_buf = 0;
1468 qp->rx_err_oflow = qp->rx_err_ver = 0;
1472 ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
1475 callout_drain(&qp->link_work);
1476 ntb_qp_link_down_reset(qp);
1478 if (qp->event_handler != NULL)
1479 qp->event_handler(qp->cb_data, NTB_LINK_DOWN);
1482 /* Link commanded down */
1484 * ntb_transport_link_down - Notify NTB transport to no longer enqueue data
1485 * @qp: NTB transport layer queue to be disabled
1487 * Notify NTB transport layer of client's desire to no longer receive data on
1488 * transport queue specified. It is the client's responsibility to ensure all
1489 * entries on queue are purged or otherwise handled appropriately.
1492 ntb_transport_link_down(struct ntb_transport_qp *qp)
1494 struct ntb_transport_ctx *nt = qp->transport;
1498 qp->client_ready = false;
1499 for (i = 0, val = 0; i < nt->qp_count; i++) {
1500 if (nt->qp_vec[i].client_ready)
1503 ntb_peer_spad_write(qp->dev, NTBT_QP_LINKS, val);
1506 ntb_send_link_down(qp);
1508 callout_drain(&qp->link_work);
1512 * ntb_transport_link_query - Query transport link state
1513 * @qp: NTB transport layer queue to be queried
1515 * Query connectivity to the remote system of the NTB transport queue
1517 * RETURNS: true for link up or false for link down
1520 ntb_transport_link_query(struct ntb_transport_qp *qp)
1523 return (qp->link_is_up);
1527 * ntb_transport_link_speed - Query transport link speed
1528 * @qp: NTB transport layer queue to be queried
1530 * Query connection speed to the remote system of the NTB transport queue
1532 * RETURNS: link speed in bits per second
1535 ntb_transport_link_speed(struct ntb_transport_qp *qp)
1537 struct ntb_transport_ctx *nt = qp->transport;
1540 if (!nt->link_is_up)
1542 switch (nt->link_speed) {
1543 case NTB_SPEED_GEN1:
1544 rate = 2500000000 * 8 / 10;
1546 case NTB_SPEED_GEN2:
1547 rate = 5000000000 * 8 / 10;
1549 case NTB_SPEED_GEN3:
1550 rate = 8000000000 * 128 / 130;
1552 case NTB_SPEED_GEN4:
1553 rate = 16000000000 * 128 / 130;
1558 if (nt->link_width <= 0)
1560 return (rate * nt->link_width);
1564 ntb_send_link_down(struct ntb_transport_qp *qp)
1566 struct ntb_queue_entry *entry;
1569 if (!qp->link_is_up)
1572 for (i = 0; i < NTB_LINK_DOWN_TIMEOUT; i++) {
1573 entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q);
1576 pause("NTB Wait for link down", hz / 10);
1582 entry->cb_data = NULL;
1585 entry->flags = NTBT_LINK_DOWN_FLAG;
1587 mtx_lock(&qp->tx_lock);
1588 rc = ntb_process_tx(qp, entry);
1589 mtx_unlock(&qp->tx_lock);
1591 printf("ntb: Failed to send link down\n");
1593 ntb_qp_link_down_reset(qp);
1597 /* List Management */
1600 ntb_list_add(struct mtx *lock, struct ntb_queue_entry *entry,
1601 struct ntb_queue_list *list)
1604 mtx_lock_spin(lock);
1605 STAILQ_INSERT_TAIL(list, entry, entry);
1606 mtx_unlock_spin(lock);
1609 static struct ntb_queue_entry *
1610 ntb_list_rm(struct mtx *lock, struct ntb_queue_list *list)
1612 struct ntb_queue_entry *entry;
1614 mtx_lock_spin(lock);
1615 if (STAILQ_EMPTY(list)) {
1619 entry = STAILQ_FIRST(list);
1620 STAILQ_REMOVE_HEAD(list, entry);
1622 mtx_unlock_spin(lock);
1627 static struct ntb_queue_entry *
1628 ntb_list_mv(struct mtx *lock, struct ntb_queue_list *from,
1629 struct ntb_queue_list *to)
1631 struct ntb_queue_entry *entry;
1633 mtx_lock_spin(lock);
1634 if (STAILQ_EMPTY(from)) {
1638 entry = STAILQ_FIRST(from);
1639 STAILQ_REMOVE_HEAD(from, entry);
1640 STAILQ_INSERT_TAIL(to, entry, entry);
1643 mtx_unlock_spin(lock);
1648 * ntb_transport_qp_num - Query the qp number
1649 * @qp: NTB transport layer queue to be queried
1651 * Query qp number of the NTB transport queue
1653 * RETURNS: a zero based number specifying the qp number
1655 unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
1658 return (qp->qp_num);
1662 * ntb_transport_max_size - Query the max payload size of a qp
1663 * @qp: NTB transport layer queue to be queried
1665 * Query the maximum payload size permissible on the given qp
1667 * RETURNS: the max payload size of a qp
1670 ntb_transport_max_size(struct ntb_transport_qp *qp)
1673 return (qp->tx_max_frame - sizeof(struct ntb_payload_header));
1677 ntb_transport_tx_free_entry(struct ntb_transport_qp *qp)
1679 unsigned int head = qp->tx_index;
1680 unsigned int tail = qp->remote_rx_info->entry;
1682 return (tail >= head ? tail - head : qp->tx_max_entry + tail - head);
1685 static device_method_t ntb_transport_methods[] = {
1686 /* Device interface */
1687 DEVMETHOD(device_probe, ntb_transport_probe),
1688 DEVMETHOD(device_attach, ntb_transport_attach),
1689 DEVMETHOD(device_detach, ntb_transport_detach),
1691 DEVMETHOD(bus_child_location_str, ntb_transport_child_location_str),
1692 DEVMETHOD(bus_print_child, ntb_transport_print_child),
1696 devclass_t ntb_transport_devclass;
1697 static DEFINE_CLASS_0(ntb_transport, ntb_transport_driver,
1698 ntb_transport_methods, sizeof(struct ntb_transport_ctx));
1699 DRIVER_MODULE(ntb_transport, ntb_hw, ntb_transport_driver,
1700 ntb_transport_devclass, NULL, NULL);
1701 MODULE_DEPEND(ntb_transport, ntb, 1, 1, 1);
1702 MODULE_VERSION(ntb_transport, 1);