2 * Copyright (c) 2006 Mellanox Technologies. All rights reserved
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #ifdef CONFIG_INFINIBAND_IPOIB_CM
37 #include <netinet/ip.h>
38 #include <netinet/ip_icmp.h>
39 #include <netinet/icmp6.h>
41 #include <rdma/ib_cm.h>
42 #include <rdma/ib_cache.h>
43 #include <linux/delay.h>
45 int ipoib_max_conn_qp = 128;
47 module_param_named(max_nonsrq_conn_qp, ipoib_max_conn_qp, int, 0444);
48 MODULE_PARM_DESC(max_nonsrq_conn_qp,
49 "Max number of connected-mode QPs per interface "
50 "(applied only if shared receive queue is not available)");
52 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
53 static int data_debug_level;
55 module_param_named(cm_data_debug_level, data_debug_level, int, 0644);
56 MODULE_PARM_DESC(cm_data_debug_level,
57 "Enable data path debug tracing for connected mode if > 0");
60 #define IPOIB_CM_IETF_ID 0x1000000000000000ULL
62 #define IPOIB_CM_RX_UPDATE_TIME (256 * HZ)
63 #define IPOIB_CM_RX_TIMEOUT (2 * 256 * HZ)
64 #define IPOIB_CM_RX_DELAY (3 * 256 * HZ)
65 #define IPOIB_CM_RX_UPDATE_MASK (0x3)
67 static struct ib_qp_attr ipoib_cm_err_attr = {
68 .qp_state = IB_QPS_ERR
71 #define IPOIB_CM_RX_DRAIN_WRID 0xffffffff
73 static struct ib_send_wr ipoib_cm_rx_drain_wr = {
74 .wr_id = IPOIB_CM_RX_DRAIN_WRID,
78 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
79 struct ib_cm_event *event);
81 static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, struct ipoib_cm_rx_buf *rx_req)
84 ipoib_dma_unmap_rx(priv, (struct ipoib_rx_buf *)rx_req);
88 static int ipoib_cm_post_receive_srq(struct ipoib_dev_priv *priv, int id)
90 struct ib_recv_wr *bad_wr;
91 struct ipoib_rx_buf *rx_req;
96 rx_req = (struct ipoib_rx_buf *)&priv->cm.srq_ring[id];
97 for (m = rx_req->mb, i = 0; m != NULL; m = m->m_next, i++) {
98 priv->cm.rx_sge[i].addr = rx_req->mapping[i];
99 priv->cm.rx_sge[i].length = m->m_len;
102 priv->cm.rx_wr.num_sge = i;
103 priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
105 ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr);
107 ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret);
108 ipoib_dma_unmap_rx(priv, rx_req);
109 m_freem(priv->cm.srq_ring[id].mb);
110 priv->cm.srq_ring[id].mb = NULL;
116 static int ipoib_cm_post_receive_nonsrq(struct ipoib_dev_priv *priv,
117 struct ipoib_cm_rx *rx,
118 struct ib_recv_wr *wr,
119 struct ib_sge *sge, int id)
121 struct ipoib_rx_buf *rx_req;
122 struct ib_recv_wr *bad_wr;
127 rx_req = (struct ipoib_rx_buf *)&rx->rx_ring[id];
128 for (m = rx_req->mb, i = 0; m != NULL; m = m->m_next, i++) {
129 sge[i].addr = rx_req->mapping[i];
130 sge[i].length = m->m_len;
134 wr->wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
136 ret = ib_post_recv(rx->qp, wr, &bad_wr);
138 ipoib_warn(priv, "post recv failed for buf %d (%d)\n", id, ret);
139 ipoib_dma_unmap_rx(priv, rx_req);
140 m_freem(rx->rx_ring[id].mb);
141 rx->rx_ring[id].mb = NULL;
148 ipoib_cm_alloc_rx_mb(struct ipoib_dev_priv *priv, struct ipoib_cm_rx_buf *rx_req)
150 return ipoib_alloc_map_mb(priv, (struct ipoib_rx_buf *)rx_req,
151 priv->cm.max_cm_mtu);
154 static void ipoib_cm_free_rx_ring(struct ipoib_dev_priv *priv,
155 struct ipoib_cm_rx_buf *rx_ring)
159 for (i = 0; i < ipoib_recvq_size; ++i)
161 ipoib_cm_dma_unmap_rx(priv, &rx_ring[i]);
162 m_freem(rx_ring[i].mb);
168 static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv)
170 struct ib_send_wr *bad_wr;
171 struct ipoib_cm_rx *p;
173 /* We only reserved 1 extra slot in CQ for drain WRs, so
174 * make sure we have at most 1 outstanding WR. */
175 if (list_empty(&priv->cm.rx_flush_list) ||
176 !list_empty(&priv->cm.rx_drain_list))
180 * QPs on flush list are error state. This way, a "flush
181 * error" WC will be immediately generated for each WR we post.
183 p = list_entry(priv->cm.rx_flush_list.next, typeof(*p), list);
184 if (ib_post_send(p->qp, &ipoib_cm_rx_drain_wr, &bad_wr))
185 ipoib_warn(priv, "failed to post drain wr\n");
187 list_splice_init(&priv->cm.rx_flush_list, &priv->cm.rx_drain_list);
190 static void ipoib_cm_rx_event_handler(struct ib_event *event, void *ctx)
192 struct ipoib_cm_rx *p = ctx;
193 struct ipoib_dev_priv *priv = p->priv;
196 if (event->event != IB_EVENT_QP_LAST_WQE_REACHED)
199 spin_lock_irqsave(&priv->lock, flags);
200 list_move(&p->list, &priv->cm.rx_flush_list);
201 p->state = IPOIB_CM_RX_FLUSH;
202 ipoib_cm_start_rx_drain(priv);
203 spin_unlock_irqrestore(&priv->lock, flags);
206 static struct ib_qp *ipoib_cm_create_rx_qp(struct ipoib_dev_priv *priv,
207 struct ipoib_cm_rx *p)
209 struct ib_qp_init_attr attr = {
210 .event_handler = ipoib_cm_rx_event_handler,
211 .send_cq = priv->recv_cq, /* For drain WR */
212 .recv_cq = priv->recv_cq,
214 .cap.max_send_wr = 1, /* For drain WR */
215 .cap.max_send_sge = 1,
216 .sq_sig_type = IB_SIGNAL_ALL_WR,
217 .qp_type = IB_QPT_RC,
221 if (!ipoib_cm_has_srq(priv)) {
222 attr.cap.max_recv_wr = ipoib_recvq_size;
223 attr.cap.max_recv_sge = priv->cm.num_frags;
226 return ib_create_qp(priv->pd, &attr);
229 static int ipoib_cm_modify_rx_qp(struct ipoib_dev_priv *priv,
230 struct ib_cm_id *cm_id, struct ib_qp *qp,
233 struct ib_qp_attr qp_attr;
234 int qp_attr_mask, ret;
236 qp_attr.qp_state = IB_QPS_INIT;
237 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
239 ipoib_warn(priv, "failed to init QP attr for INIT: %d\n", ret);
242 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
244 ipoib_warn(priv, "failed to modify QP to INIT: %d\n", ret);
247 qp_attr.qp_state = IB_QPS_RTR;
248 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
250 ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
253 qp_attr.rq_psn = psn;
254 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
256 ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
261 * Current Mellanox HCA firmware won't generate completions
262 * with error for drain WRs unless the QP has been moved to
263 * RTS first. This work-around leaves a window where a QP has
264 * moved to error asynchronously, but this will eventually get
265 * fixed in firmware, so let's not error out if modify QP
268 qp_attr.qp_state = IB_QPS_RTS;
269 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
271 ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
274 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
276 ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
283 static void ipoib_cm_init_rx_wr(struct ipoib_dev_priv *priv,
284 struct ib_recv_wr *wr,
289 for (i = 0; i < IPOIB_CM_RX_SG; i++)
290 sge[i].lkey = priv->mr->lkey;
297 static int ipoib_cm_nonsrq_init_rx(struct ipoib_dev_priv *priv,
298 struct ib_cm_id *cm_id, struct ipoib_cm_rx *rx)
301 struct ib_recv_wr wr;
302 struct ib_sge sge[IPOIB_CM_RX_SG];
307 rx->rx_ring = kzalloc(ipoib_recvq_size * sizeof *rx->rx_ring, GFP_KERNEL);
309 printk(KERN_WARNING "%s: failed to allocate CM non-SRQ ring (%d entries)\n",
310 priv->ca->name, ipoib_recvq_size);
314 memset(rx->rx_ring, 0, ipoib_recvq_size * sizeof *rx->rx_ring);
316 t = kmalloc(sizeof *t, GFP_KERNEL);
322 ipoib_cm_init_rx_wr(priv, &t->wr, t->sge);
324 spin_lock_irq(&priv->lock);
326 if (priv->cm.nonsrq_conn_qp >= ipoib_max_conn_qp) {
327 spin_unlock_irq(&priv->lock);
328 ib_send_cm_rej(cm_id, IB_CM_REJ_NO_QP, NULL, 0, NULL, 0);
332 ++priv->cm.nonsrq_conn_qp;
334 spin_unlock_irq(&priv->lock);
336 for (i = 0; i < ipoib_recvq_size; ++i) {
337 if (!ipoib_cm_alloc_rx_mb(priv, &rx->rx_ring[i])) {
338 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
342 ret = ipoib_cm_post_receive_nonsrq(priv, rx, &t->wr, t->sge, i);
344 ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq "
345 "failed for buf %d\n", i);
351 rx->recv_count = ipoib_recvq_size;
358 spin_lock_irq(&priv->lock);
359 --priv->cm.nonsrq_conn_qp;
360 spin_unlock_irq(&priv->lock);
364 ipoib_cm_free_rx_ring(priv, rx->rx_ring);
369 static int ipoib_cm_send_rep(struct ipoib_dev_priv *priv, struct ib_cm_id *cm_id,
370 struct ib_qp *qp, struct ib_cm_req_event_param *req,
373 struct ipoib_cm_data data = {};
374 struct ib_cm_rep_param rep = {};
376 data.qpn = cpu_to_be32(priv->qp->qp_num);
377 data.mtu = cpu_to_be32(priv->cm.max_cm_mtu);
379 rep.private_data = &data;
380 rep.private_data_len = sizeof data;
381 rep.flow_control = 0;
382 rep.rnr_retry_count = req->rnr_retry_count;
383 rep.srq = ipoib_cm_has_srq(priv);
384 rep.qp_num = qp->qp_num;
385 rep.starting_psn = psn;
386 return ib_send_cm_rep(cm_id, &rep);
389 static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
391 struct ipoib_dev_priv *priv = cm_id->context;
392 struct ipoib_cm_rx *p;
396 ipoib_dbg(priv, "REQ arrived\n");
397 p = kzalloc(sizeof *p, GFP_KERNEL);
403 p->state = IPOIB_CM_RX_LIVE;
404 p->jiffies = jiffies;
405 INIT_LIST_HEAD(&p->list);
407 p->qp = ipoib_cm_create_rx_qp(priv, p);
409 ret = PTR_ERR(p->qp);
413 psn = random() & 0xffffff;
414 ret = ipoib_cm_modify_rx_qp(priv, cm_id, p->qp, psn);
418 if (!ipoib_cm_has_srq(priv)) {
419 ret = ipoib_cm_nonsrq_init_rx(priv, cm_id, p);
424 spin_lock_irq(&priv->lock);
425 queue_delayed_work(ipoib_workqueue,
426 &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
427 /* Add this entry to passive ids list head, but do not re-add it
428 * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
429 p->jiffies = jiffies;
430 if (p->state == IPOIB_CM_RX_LIVE)
431 list_move(&p->list, &priv->cm.passive_ids);
432 spin_unlock_irq(&priv->lock);
434 ret = ipoib_cm_send_rep(priv, cm_id, p->qp, &event->param.req_rcvd, psn);
436 ipoib_warn(priv, "failed to send REP: %d\n", ret);
437 if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
438 ipoib_warn(priv, "unable to move qp to error state\n");
443 ib_destroy_qp(p->qp);
449 static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
450 struct ib_cm_event *event)
452 struct ipoib_cm_rx *p;
453 struct ipoib_dev_priv *priv;
455 switch (event->event) {
456 case IB_CM_REQ_RECEIVED:
457 return ipoib_cm_req_handler(cm_id, event);
458 case IB_CM_DREQ_RECEIVED:
460 ib_send_cm_drep(cm_id, NULL, 0);
462 case IB_CM_REJ_RECEIVED:
465 if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
466 ipoib_warn(priv, "unable to move qp to error state\n");
473 void ipoib_cm_handle_rx_wc(struct ipoib_dev_priv *priv, struct ib_wc *wc)
475 struct ipoib_cm_rx_buf saverx;
476 struct ipoib_cm_rx_buf *rx_ring;
477 unsigned int wr_id = wc->wr_id & ~(IPOIB_OP_CM | IPOIB_OP_RECV);
478 struct ifnet *dev = priv->dev;
479 struct mbuf *mb, *newmb;
480 struct ipoib_cm_rx *p;
484 ipoib_dbg_data(priv, "cm recv completion: id %d, status: %d\n",
487 if (unlikely(wr_id >= ipoib_recvq_size)) {
488 if (wr_id == (IPOIB_CM_RX_DRAIN_WRID & ~(IPOIB_OP_CM | IPOIB_OP_RECV))) {
489 spin_lock(&priv->lock);
490 list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list);
491 ipoib_cm_start_rx_drain(priv);
492 if (priv->cm.id != NULL)
493 queue_work(ipoib_workqueue,
494 &priv->cm.rx_reap_task);
495 spin_unlock(&priv->lock);
497 ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
498 wr_id, ipoib_recvq_size);
502 p = wc->qp->qp_context;
504 has_srq = ipoib_cm_has_srq(priv);
505 rx_ring = has_srq ? priv->cm.srq_ring : p->rx_ring;
507 mb = rx_ring[wr_id].mb;
509 if (unlikely(wc->status != IB_WC_SUCCESS)) {
510 ipoib_dbg(priv, "cm recv error "
511 "(status=%d, wrid=%d vend_err %x)\n",
512 wc->status, wr_id, wc->vendor_err);
517 if (!--p->recv_count) {
518 spin_lock(&priv->lock);
519 list_move(&p->list, &priv->cm.rx_reap_list);
520 queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
521 spin_unlock(&priv->lock);
527 if (unlikely(!(wr_id & IPOIB_CM_RX_UPDATE_MASK))) {
528 if (p && time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) {
529 p->jiffies = jiffies;
530 /* Move this entry to list head, but do not re-add it
531 * if it has been moved out of list. */
532 if (p->state == IPOIB_CM_RX_LIVE)
533 list_move(&p->list, &priv->cm.passive_ids);
537 memcpy(&saverx, &rx_ring[wr_id], sizeof(saverx));
538 newmb = ipoib_cm_alloc_rx_mb(priv, &rx_ring[wr_id]);
539 if (unlikely(!newmb)) {
541 * If we can't allocate a new RX buffer, dump
542 * this packet and reuse the old buffer.
544 ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
546 memcpy(&rx_ring[wr_id], &saverx, sizeof(saverx));
550 ipoib_cm_dma_unmap_rx(priv, &saverx);
552 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
553 wc->byte_len, wc->slid);
555 ipoib_dma_mb(priv, mb, wc->byte_len);
558 dev->if_ibytes += mb->m_pkthdr.len;
560 mb->m_pkthdr.rcvif = dev;
561 proto = *mtod(mb, uint16_t *);
562 m_adj(mb, IPOIB_ENCAP_LEN);
564 IPOIB_MTAP_PROTO(dev, mb, proto);
565 ipoib_demux(dev, mb, ntohs(proto));
569 if (unlikely(ipoib_cm_post_receive_srq(priv, wr_id)))
570 ipoib_warn(priv, "ipoib_cm_post_receive_srq failed "
571 "for buf %d\n", wr_id);
573 if (unlikely(ipoib_cm_post_receive_nonsrq(priv, p,
578 ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq failed "
579 "for buf %d\n", wr_id);
584 static inline int post_send(struct ipoib_dev_priv *priv,
585 struct ipoib_cm_tx *tx,
586 struct ipoib_cm_tx_buf *tx_req,
589 struct ib_send_wr *bad_wr;
590 struct mbuf *mb = tx_req->mb;
591 u64 *mapping = tx_req->mapping;
595 for (m = mb, i = 0; m != NULL; m = m->m_next, i++) {
596 priv->tx_sge[i].addr = mapping[i];
597 priv->tx_sge[i].length = m->m_len;
599 priv->tx_wr.num_sge = i;
600 priv->tx_wr.wr_id = wr_id | IPOIB_OP_CM;
601 priv->tx_wr.opcode = IB_WR_SEND;
603 return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr);
606 void ipoib_cm_send(struct ipoib_dev_priv *priv, struct mbuf *mb, struct ipoib_cm_tx *tx)
608 struct ipoib_cm_tx_buf *tx_req;
609 struct ifnet *dev = priv->dev;
611 if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
612 while (ipoib_poll_tx(priv)); /* nothing */
614 m_adj(mb, sizeof(struct ipoib_pseudoheader));
615 if (unlikely(mb->m_pkthdr.len > tx->mtu)) {
616 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
617 mb->m_pkthdr.len, tx->mtu);
619 ipoib_cm_mb_too_long(priv, mb, IPOIB_CM_MTU(tx->mtu));
623 ipoib_dbg_data(priv, "sending packet: head 0x%x length %d connection 0x%x\n",
624 tx->tx_head, mb->m_pkthdr.len, tx->qp->qp_num);
628 * We put the mb into the tx_ring _before_ we call post_send()
629 * because it's entirely possible that the completion handler will
630 * run before we execute anything after the post_send(). That
631 * means we have to make sure everything is properly recorded and
632 * our state is consistent before we call post_send().
634 tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)];
636 if (unlikely(ipoib_dma_map_tx(priv->ca, (struct ipoib_tx_buf *)tx_req,
637 priv->cm.num_frags))) {
644 if (unlikely(post_send(priv, tx, tx_req, tx->tx_head & (ipoib_sendq_size - 1)))) {
645 ipoib_warn(priv, "post_send failed\n");
647 ipoib_dma_unmap_tx(priv->ca, (struct ipoib_tx_buf *)tx_req);
652 if (++priv->tx_outstanding == ipoib_sendq_size) {
653 ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
655 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
656 ipoib_warn(priv, "request notify on send CQ failed\n");
657 dev->if_drv_flags |= IFF_DRV_OACTIVE;
663 void ipoib_cm_handle_tx_wc(struct ipoib_dev_priv *priv, struct ib_wc *wc)
665 struct ipoib_cm_tx *tx = wc->qp->qp_context;
666 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM;
667 struct ifnet *dev = priv->dev;
668 struct ipoib_cm_tx_buf *tx_req;
670 ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n",
673 if (unlikely(wr_id >= ipoib_sendq_size)) {
674 ipoib_warn(priv, "cm send completion event with wrid %d (> %d)\n",
675 wr_id, ipoib_sendq_size);
679 tx_req = &tx->tx_ring[wr_id];
681 ipoib_dma_unmap_tx(priv->ca, (struct ipoib_tx_buf *)tx_req);
683 /* FIXME: is this right? Shouldn't we only increment on success? */
685 dev->if_obytes += tx_req->mb->m_pkthdr.len;
690 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
691 (dev->if_drv_flags & IFF_DRV_OACTIVE) != 0 &&
692 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
693 dev->if_drv_flags &= ~IFF_DRV_OACTIVE;
695 if (wc->status != IB_WC_SUCCESS &&
696 wc->status != IB_WC_WR_FLUSH_ERR) {
697 struct ipoib_path *path;
699 ipoib_dbg(priv, "failed cm send event "
700 "(status=%d, wrid=%d vend_err %x)\n",
701 wc->status, wr_id, wc->vendor_err);
707 rb_erase(&path->rb_node, &priv->path_tree);
708 list_del(&path->list);
711 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
712 list_move(&tx->list, &priv->cm.reap_list);
713 queue_work(ipoib_workqueue, &priv->cm.reap_task);
716 clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
721 int ipoib_cm_dev_open(struct ipoib_dev_priv *priv)
725 if (!IPOIB_CM_SUPPORTED(IF_LLADDR(priv->dev)))
728 priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, priv);
729 if (IS_ERR(priv->cm.id)) {
730 printk(KERN_WARNING "%s: failed to create CM ID\n", priv->ca->name);
731 ret = PTR_ERR(priv->cm.id);
735 ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num),
738 printk(KERN_WARNING "%s: failed to listen on ID 0x%llx\n", priv->ca->name,
739 IPOIB_CM_IETF_ID | priv->qp->qp_num);
746 ib_destroy_cm_id(priv->cm.id);
752 static void ipoib_cm_free_rx_reap_list(struct ipoib_dev_priv *priv)
754 struct ipoib_cm_rx *rx, *n;
757 spin_lock_irq(&priv->lock);
758 list_splice_init(&priv->cm.rx_reap_list, &list);
759 spin_unlock_irq(&priv->lock);
761 list_for_each_entry_safe(rx, n, &list, list) {
762 ib_destroy_cm_id(rx->id);
763 ib_destroy_qp(rx->qp);
764 if (!ipoib_cm_has_srq(priv)) {
765 ipoib_cm_free_rx_ring(priv, rx->rx_ring);
766 spin_lock_irq(&priv->lock);
767 --priv->cm.nonsrq_conn_qp;
768 spin_unlock_irq(&priv->lock);
774 void ipoib_cm_dev_stop(struct ipoib_dev_priv *priv)
776 struct ipoib_cm_rx *p;
780 if (!IPOIB_CM_SUPPORTED(IF_LLADDR(priv->dev)) || !priv->cm.id)
783 ib_destroy_cm_id(priv->cm.id);
786 cancel_work_sync(&priv->cm.rx_reap_task);
788 spin_lock_irq(&priv->lock);
789 while (!list_empty(&priv->cm.passive_ids)) {
790 p = list_entry(priv->cm.passive_ids.next, typeof(*p), list);
791 list_move(&p->list, &priv->cm.rx_error_list);
792 p->state = IPOIB_CM_RX_ERROR;
793 spin_unlock_irq(&priv->lock);
794 ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
796 ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
797 spin_lock_irq(&priv->lock);
800 /* Wait for all RX to be drained */
803 while (!list_empty(&priv->cm.rx_error_list) ||
804 !list_empty(&priv->cm.rx_flush_list) ||
805 !list_empty(&priv->cm.rx_drain_list)) {
806 if (time_after(jiffies, begin + 5 * HZ)) {
807 ipoib_warn(priv, "RX drain timing out\n");
810 * assume the HW is wedged and just free up everything.
812 list_splice_init(&priv->cm.rx_flush_list,
813 &priv->cm.rx_reap_list);
814 list_splice_init(&priv->cm.rx_error_list,
815 &priv->cm.rx_reap_list);
816 list_splice_init(&priv->cm.rx_drain_list,
817 &priv->cm.rx_reap_list);
820 spin_unlock_irq(&priv->lock);
822 ipoib_drain_cq(priv);
823 spin_lock_irq(&priv->lock);
826 spin_unlock_irq(&priv->lock);
828 ipoib_cm_free_rx_reap_list(priv);
830 cancel_delayed_work(&priv->cm.stale_task);
833 static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
835 struct ipoib_cm_tx *p = cm_id->context;
836 struct ipoib_dev_priv *priv = p->priv;
837 struct ipoib_cm_data *data = event->private_data;
838 struct ifqueue mbqueue;
839 struct ib_qp_attr qp_attr;
840 int qp_attr_mask, ret;
843 ipoib_dbg(priv, "cm rep handler\n");
844 p->mtu = be32_to_cpu(data->mtu);
846 if (p->mtu <= IPOIB_ENCAP_LEN) {
847 ipoib_warn(priv, "Rejecting connection: mtu %d <= %d\n",
848 p->mtu, IPOIB_ENCAP_LEN);
852 qp_attr.qp_state = IB_QPS_RTR;
853 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
855 ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
859 qp_attr.rq_psn = 0 /* FIXME */;
860 ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
862 ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
866 qp_attr.qp_state = IB_QPS_RTS;
867 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
869 ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
872 ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
874 ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
878 bzero(&mbqueue, sizeof(mbqueue));
880 spin_lock_irq(&priv->lock);
881 set_bit(IPOIB_FLAG_OPER_UP, &p->flags);
884 _IF_DEQUEUE(&p->path->queue, mb);
887 _IF_ENQUEUE(&mbqueue, mb);
889 spin_unlock_irq(&priv->lock);
892 struct ifnet *dev = p->priv->dev;
893 _IF_DEQUEUE(&mbqueue, mb);
896 mb->m_pkthdr.rcvif = dev;
897 if (dev->if_transmit(dev, mb))
898 ipoib_warn(priv, "dev_queue_xmit failed "
899 "to requeue packet\n");
902 ret = ib_send_cm_rtu(cm_id, NULL, 0);
904 ipoib_warn(priv, "failed to send RTU: %d\n", ret);
910 static struct ib_qp *ipoib_cm_create_tx_qp(struct ipoib_dev_priv *priv,
911 struct ipoib_cm_tx *tx)
913 struct ib_qp_init_attr attr = {
914 .send_cq = priv->send_cq,
915 .recv_cq = priv->recv_cq,
917 .cap.max_send_wr = ipoib_sendq_size,
918 .cap.max_send_sge = priv->cm.num_frags,
919 .sq_sig_type = IB_SIGNAL_ALL_WR,
920 .qp_type = IB_QPT_RC,
924 return ib_create_qp(priv->pd, &attr);
927 static int ipoib_cm_send_req(struct ipoib_dev_priv *priv,
928 struct ib_cm_id *id, struct ib_qp *qp,
930 struct ib_sa_path_rec *pathrec)
932 struct ipoib_cm_data data = {};
933 struct ib_cm_req_param req = {};
935 ipoib_dbg(priv, "cm send req\n");
937 data.qpn = cpu_to_be32(priv->qp->qp_num);
938 data.mtu = cpu_to_be32(priv->cm.max_cm_mtu);
940 req.primary_path = pathrec;
941 req.alternate_path = NULL;
942 req.service_id = cpu_to_be64(IPOIB_CM_IETF_ID | qpn);
943 req.qp_num = qp->qp_num;
944 req.qp_type = qp->qp_type;
945 req.private_data = &data;
946 req.private_data_len = sizeof data;
947 req.flow_control = 0;
949 req.starting_psn = 0; /* FIXME */
952 * Pick some arbitrary defaults here; we could make these
953 * module parameters if anyone cared about setting them.
955 req.responder_resources = 4;
956 req.remote_cm_response_timeout = 20;
957 req.local_cm_response_timeout = 20;
958 req.retry_count = 0; /* RFC draft warns against retries */
959 req.rnr_retry_count = 0; /* RFC draft warns against retries */
960 req.max_cm_retries = 15;
961 req.srq = ipoib_cm_has_srq(priv);
962 return ib_send_cm_req(id, &req);
965 static int ipoib_cm_modify_tx_init(struct ipoib_dev_priv *priv,
966 struct ib_cm_id *cm_id, struct ib_qp *qp)
968 struct ib_qp_attr qp_attr;
969 int qp_attr_mask, ret;
970 ret = ib_find_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index);
972 ipoib_warn(priv, "pkey 0x%x not found: %d\n", priv->pkey, ret);
976 qp_attr.qp_state = IB_QPS_INIT;
977 qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE;
978 qp_attr.port_num = priv->port;
979 qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT;
981 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
983 ipoib_warn(priv, "failed to modify tx QP to INIT: %d\n", ret);
989 static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
990 struct ib_sa_path_rec *pathrec)
992 struct ipoib_dev_priv *priv = p->priv;
995 p->tx_ring = kzalloc(ipoib_sendq_size * sizeof *p->tx_ring, GFP_KERNEL);
997 ipoib_warn(priv, "failed to allocate tx ring\n");
1001 memset(p->tx_ring, 0, ipoib_sendq_size * sizeof *p->tx_ring);
1003 p->qp = ipoib_cm_create_tx_qp(p->priv, p);
1004 if (IS_ERR(p->qp)) {
1005 ret = PTR_ERR(p->qp);
1006 ipoib_warn(priv, "failed to allocate tx qp: %d\n", ret);
1010 p->id = ib_create_cm_id(priv->ca, ipoib_cm_tx_handler, p);
1011 if (IS_ERR(p->id)) {
1012 ret = PTR_ERR(p->id);
1013 ipoib_warn(priv, "failed to create tx cm id: %d\n", ret);
1017 ret = ipoib_cm_modify_tx_init(p->priv, p->id, p->qp);
1019 ipoib_warn(priv, "failed to modify tx qp to rtr: %d\n", ret);
1023 ret = ipoib_cm_send_req(p->priv, p->id, p->qp, qpn, pathrec);
1025 ipoib_warn(priv, "failed to send cm req: %d\n", ret);
1029 ipoib_dbg(priv, "Request connection 0x%x for gid %pI6 qpn 0x%x\n",
1030 p->qp->qp_num, pathrec->dgid.raw, qpn);
1036 ib_destroy_cm_id(p->id);
1039 ib_destroy_qp(p->qp);
1047 static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
1049 struct ipoib_dev_priv *priv = p->priv;
1050 struct ifnet *dev = priv->dev;
1051 struct ipoib_cm_tx_buf *tx_req;
1052 unsigned long begin;
1054 ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n",
1055 p->qp ? p->qp->qp_num : 0, p->tx_head, p->tx_tail);
1058 ipoib_path_free(priv, p->path);
1061 ib_destroy_cm_id(p->id);
1064 /* Wait for all sends to complete */
1066 while ((int) p->tx_tail - (int) p->tx_head < 0) {
1067 if (time_after(jiffies, begin + 5 * HZ)) {
1068 ipoib_warn(priv, "timing out; %d sends not completed\n",
1069 p->tx_head - p->tx_tail);
1079 while ((int) p->tx_tail - (int) p->tx_head < 0) {
1080 tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
1081 ipoib_dma_unmap_tx(priv->ca, (struct ipoib_tx_buf *)tx_req);
1082 m_freem(tx_req->mb);
1084 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
1085 (dev->if_drv_flags & IFF_DRV_OACTIVE) != 0 &&
1086 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
1087 dev->if_drv_flags &= ~IFF_DRV_OACTIVE;
1091 ib_destroy_qp(p->qp);
1097 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
1098 struct ib_cm_event *event)
1100 struct ipoib_cm_tx *tx = cm_id->context;
1101 struct ipoib_dev_priv *priv = tx->priv;
1102 struct ipoib_path *path;
1103 unsigned long flags;
1106 switch (event->event) {
1107 case IB_CM_DREQ_RECEIVED:
1108 ipoib_dbg(priv, "DREQ received.\n");
1109 ib_send_cm_drep(cm_id, NULL, 0);
1111 case IB_CM_REP_RECEIVED:
1112 ipoib_dbg(priv, "REP received.\n");
1113 ret = ipoib_cm_rep_handler(cm_id, event);
1115 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
1118 case IB_CM_REQ_ERROR:
1119 case IB_CM_REJ_RECEIVED:
1120 case IB_CM_TIMEWAIT_EXIT:
1121 ipoib_dbg(priv, "CM error %d.\n", event->event);
1122 spin_lock_irqsave(&priv->lock, flags);
1128 rb_erase(&path->rb_node, &priv->path_tree);
1129 list_del(&path->list);
1132 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
1133 list_move(&tx->list, &priv->cm.reap_list);
1134 queue_work(ipoib_workqueue, &priv->cm.reap_task);
1137 spin_unlock_irqrestore(&priv->lock, flags);
1139 ipoib_path_free(tx->priv, path);
1148 struct ipoib_cm_tx *ipoib_cm_create_tx(struct ipoib_dev_priv *priv,
1149 struct ipoib_path *path)
1151 struct ipoib_cm_tx *tx;
1153 tx = kzalloc(sizeof *tx, GFP_ATOMIC);
1157 ipoib_dbg(priv, "Creating cm tx\n");
1161 list_add(&tx->list, &priv->cm.start_list);
1162 set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
1163 queue_work(ipoib_workqueue, &priv->cm.start_task);
1167 void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
1169 struct ipoib_dev_priv *priv = tx->priv;
1170 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
1171 spin_lock(&priv->lock);
1172 list_move(&tx->list, &priv->cm.reap_list);
1173 spin_unlock(&priv->lock);
1174 queue_work(ipoib_workqueue, &priv->cm.reap_task);
1175 ipoib_dbg(priv, "Reap connection for gid %pI6\n",
1176 tx->path->pathrec.dgid.raw);
1181 static void ipoib_cm_tx_start(struct work_struct *work)
1183 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1185 struct ipoib_path *path;
1186 struct ipoib_cm_tx *p;
1187 unsigned long flags;
1190 struct ib_sa_path_rec pathrec;
1193 ipoib_dbg(priv, "cm start task\n");
1194 spin_lock_irqsave(&priv->lock, flags);
1196 while (!list_empty(&priv->cm.start_list)) {
1197 p = list_entry(priv->cm.start_list.next, typeof(*p), list);
1198 list_del_init(&p->list);
1200 qpn = IPOIB_QPN(path->hwaddr);
1201 memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);
1203 spin_unlock_irqrestore(&priv->lock, flags);
1205 ret = ipoib_cm_tx_init(p, qpn, &pathrec);
1207 spin_lock_irqsave(&priv->lock, flags);
1213 rb_erase(&path->rb_node, &priv->path_tree);
1214 list_del(&path->list);
1215 ipoib_path_free(priv, path);
1222 spin_unlock_irqrestore(&priv->lock, flags);
1225 static void ipoib_cm_tx_reap(struct work_struct *work)
1227 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1229 struct ipoib_cm_tx *p;
1230 unsigned long flags;
1232 spin_lock_irqsave(&priv->lock, flags);
1234 while (!list_empty(&priv->cm.reap_list)) {
1235 p = list_entry(priv->cm.reap_list.next, typeof(*p), list);
1237 spin_unlock_irqrestore(&priv->lock, flags);
1238 ipoib_cm_tx_destroy(p);
1239 spin_lock_irqsave(&priv->lock, flags);
1242 spin_unlock_irqrestore(&priv->lock, flags);
1245 static void ipoib_cm_mb_reap(struct work_struct *work)
1247 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1250 unsigned long flags;
1251 unsigned mtu = priv->mcast_mtu;
1254 spin_lock_irqsave(&priv->lock, flags);
1257 IF_DEQUEUE(&priv->cm.mb_queue, mb);
1260 spin_unlock_irqrestore(&priv->lock, flags);
1262 proto = htons(*mtod(mb, uint16_t *));
1263 m_adj(mb, IPOIB_ENCAP_LEN);
1264 if (proto == ETHERTYPE_IP)
1265 icmp_error(mb, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0, mtu);
1267 else if (proto == ETHERTYPE_IPV6)
1268 icmp6_error(mb, ICMP6_PACKET_TOO_BIG, 0, mtu);
1273 spin_lock_irqsave(&priv->lock, flags);
1276 spin_unlock_irqrestore(&priv->lock, flags);
1280 ipoib_cm_mb_too_long(struct ipoib_dev_priv *priv, struct mbuf *mb, unsigned int mtu)
1282 int e = priv->cm.mb_queue.ifq_len;
1284 IF_ENQUEUE(&priv->cm.mb_queue, mb);
1286 queue_work(ipoib_workqueue, &priv->cm.mb_task);
1289 static void ipoib_cm_rx_reap(struct work_struct *work)
1291 ipoib_cm_free_rx_reap_list(container_of(work, struct ipoib_dev_priv,
1295 static void ipoib_cm_stale_task(struct work_struct *work)
1297 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1298 cm.stale_task.work);
1299 struct ipoib_cm_rx *p;
1302 spin_lock_irq(&priv->lock);
1303 while (!list_empty(&priv->cm.passive_ids)) {
1304 /* List is sorted by LRU, start from tail,
1305 * stop when we see a recently used entry */
1306 p = list_entry(priv->cm.passive_ids.prev, typeof(*p), list);
1307 if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT))
1309 list_move(&p->list, &priv->cm.rx_error_list);
1310 p->state = IPOIB_CM_RX_ERROR;
1311 spin_unlock_irq(&priv->lock);
1312 ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
1314 ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
1315 spin_lock_irq(&priv->lock);
1318 if (!list_empty(&priv->cm.passive_ids))
1319 queue_delayed_work(ipoib_workqueue,
1320 &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
1321 spin_unlock_irq(&priv->lock);
1325 static void ipoib_cm_create_srq(struct ipoib_dev_priv *priv, int max_sge)
1327 struct ib_srq_init_attr srq_init_attr = {
1329 .max_wr = ipoib_recvq_size,
1334 priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr);
1335 if (IS_ERR(priv->cm.srq)) {
1336 if (PTR_ERR(priv->cm.srq) != -ENOSYS)
1337 printk(KERN_WARNING "%s: failed to allocate SRQ, error %ld\n",
1338 priv->ca->name, PTR_ERR(priv->cm.srq));
1339 priv->cm.srq = NULL;
1343 priv->cm.srq_ring = kzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring, GFP_KERNEL);
1344 if (!priv->cm.srq_ring) {
1345 printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n",
1346 priv->ca->name, ipoib_recvq_size);
1347 ib_destroy_srq(priv->cm.srq);
1348 priv->cm.srq = NULL;
1352 memset(priv->cm.srq_ring, 0, ipoib_recvq_size * sizeof *priv->cm.srq_ring);
1355 int ipoib_cm_dev_init(struct ipoib_dev_priv *priv)
1357 struct ifnet *dev = priv->dev;
1359 struct ib_device_attr attr;
1361 INIT_LIST_HEAD(&priv->cm.passive_ids);
1362 INIT_LIST_HEAD(&priv->cm.reap_list);
1363 INIT_LIST_HEAD(&priv->cm.start_list);
1364 INIT_LIST_HEAD(&priv->cm.rx_error_list);
1365 INIT_LIST_HEAD(&priv->cm.rx_flush_list);
1366 INIT_LIST_HEAD(&priv->cm.rx_drain_list);
1367 INIT_LIST_HEAD(&priv->cm.rx_reap_list);
1368 INIT_WORK(&priv->cm.start_task, ipoib_cm_tx_start);
1369 INIT_WORK(&priv->cm.reap_task, ipoib_cm_tx_reap);
1370 INIT_WORK(&priv->cm.mb_task, ipoib_cm_mb_reap);
1371 INIT_WORK(&priv->cm.rx_reap_task, ipoib_cm_rx_reap);
1372 INIT_DELAYED_WORK(&priv->cm.stale_task, ipoib_cm_stale_task);
1374 bzero(&priv->cm.mb_queue, sizeof(priv->cm.mb_queue));
1375 mtx_init(&priv->cm.mb_queue.ifq_mtx,
1376 dev->if_xname, "if send queue", MTX_DEF);
1378 ret = ib_query_device(priv->ca, &attr);
1380 printk(KERN_WARNING "ib_query_device() failed with %d\n", ret);
1384 ipoib_dbg(priv, "max_srq_sge=%d\n", attr.max_srq_sge);
1386 attr.max_srq_sge = min_t(int, IPOIB_CM_RX_SG, attr.max_srq_sge);
1387 ipoib_cm_create_srq(priv, attr.max_srq_sge);
1388 if (ipoib_cm_has_srq(priv)) {
1389 priv->cm.max_cm_mtu = attr.max_srq_sge * MJUMPAGESIZE;
1390 priv->cm.num_frags = attr.max_srq_sge;
1391 ipoib_dbg(priv, "max_cm_mtu = 0x%x, num_frags=%d\n",
1392 priv->cm.max_cm_mtu, priv->cm.num_frags);
1394 priv->cm.max_cm_mtu = IPOIB_CM_MAX_MTU;
1395 priv->cm.num_frags = IPOIB_CM_RX_SG;
1398 ipoib_cm_init_rx_wr(priv, &priv->cm.rx_wr, priv->cm.rx_sge);
1400 if (ipoib_cm_has_srq(priv)) {
1401 for (i = 0; i < ipoib_recvq_size; ++i) {
1402 if (!ipoib_cm_alloc_rx_mb(priv, &priv->cm.srq_ring[i])) {
1403 ipoib_warn(priv, "failed to allocate "
1404 "receive buffer %d\n", i);
1405 ipoib_cm_dev_cleanup(priv);
1409 if (ipoib_cm_post_receive_srq(priv, i)) {
1410 ipoib_warn(priv, "ipoib_cm_post_receive_srq "
1411 "failed for buf %d\n", i);
1412 ipoib_cm_dev_cleanup(priv);
1418 IF_LLADDR(priv->dev)[0] = IPOIB_FLAGS_RC;
1422 void ipoib_cm_dev_cleanup(struct ipoib_dev_priv *priv)
1429 ipoib_dbg(priv, "Cleanup ipoib connected mode.\n");
1431 ret = ib_destroy_srq(priv->cm.srq);
1433 ipoib_warn(priv, "ib_destroy_srq failed: %d\n", ret);
1435 priv->cm.srq = NULL;
1436 if (!priv->cm.srq_ring)
1439 ipoib_cm_free_rx_ring(priv, priv->cm.srq_ring);
1440 priv->cm.srq_ring = NULL;
1442 mtx_destroy(&priv->cm.mb_queue.ifq_mtx);
1445 #endif /* CONFIG_INFINIBAND_IPOIB_CM */