2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0
4 * Copyright (c) 2006 Mellanox Technologies. All rights reserved
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #ifdef CONFIG_INFINIBAND_IPOIB_CM
39 #include <netinet/ip.h>
40 #include <netinet/ip_icmp.h>
41 #include <netinet/icmp6.h>
43 #include <rdma/ib_cm.h>
44 #include <rdma/ib_cache.h>
45 #include <linux/delay.h>
47 int ipoib_max_conn_qp = 128;
49 module_param_named(max_nonsrq_conn_qp, ipoib_max_conn_qp, int, 0444);
50 MODULE_PARM_DESC(max_nonsrq_conn_qp,
51 "Max number of connected-mode QPs per interface "
52 "(applied only if shared receive queue is not available)");
54 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
55 static int data_debug_level;
57 module_param_named(cm_data_debug_level, data_debug_level, int, 0644);
58 MODULE_PARM_DESC(cm_data_debug_level,
59 "Enable data path debug tracing for connected mode if > 0");
62 #define IPOIB_CM_IETF_ID 0x1000000000000000ULL
64 #define IPOIB_CM_RX_UPDATE_TIME (256 * HZ)
65 #define IPOIB_CM_RX_TIMEOUT (2 * 256 * HZ)
66 #define IPOIB_CM_RX_DELAY (3 * 256 * HZ)
67 #define IPOIB_CM_RX_UPDATE_MASK (0x3)
69 static struct ib_qp_attr ipoib_cm_err_attr = {
70 .qp_state = IB_QPS_ERR
73 #define IPOIB_CM_RX_DRAIN_WRID 0xffffffff
75 static struct ib_send_wr ipoib_cm_rx_drain_wr = {
76 .wr_id = IPOIB_CM_RX_DRAIN_WRID,
80 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
81 struct ib_cm_event *event);
83 static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, struct ipoib_cm_rx_buf *rx_req)
86 ipoib_dma_unmap_rx(priv, (struct ipoib_rx_buf *)rx_req);
90 static int ipoib_cm_post_receive_srq(struct ipoib_dev_priv *priv, int id)
92 struct ib_recv_wr *bad_wr;
93 struct ipoib_rx_buf *rx_req;
98 rx_req = (struct ipoib_rx_buf *)&priv->cm.srq_ring[id];
99 for (m = rx_req->mb, i = 0; m != NULL; m = m->m_next, i++) {
100 priv->cm.rx_sge[i].addr = rx_req->mapping[i];
101 priv->cm.rx_sge[i].length = m->m_len;
104 priv->cm.rx_wr.num_sge = i;
105 priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
107 ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr);
109 ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret);
110 ipoib_dma_unmap_rx(priv, rx_req);
111 m_freem(priv->cm.srq_ring[id].mb);
112 priv->cm.srq_ring[id].mb = NULL;
118 static int ipoib_cm_post_receive_nonsrq(struct ipoib_dev_priv *priv,
119 struct ipoib_cm_rx *rx,
120 struct ib_recv_wr *wr,
121 struct ib_sge *sge, int id)
123 struct ipoib_rx_buf *rx_req;
124 struct ib_recv_wr *bad_wr;
129 rx_req = (struct ipoib_rx_buf *)&rx->rx_ring[id];
130 for (m = rx_req->mb, i = 0; m != NULL; m = m->m_next, i++) {
131 sge[i].addr = rx_req->mapping[i];
132 sge[i].length = m->m_len;
136 wr->wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
138 ret = ib_post_recv(rx->qp, wr, &bad_wr);
140 ipoib_warn(priv, "post recv failed for buf %d (%d)\n", id, ret);
141 ipoib_dma_unmap_rx(priv, rx_req);
142 m_freem(rx->rx_ring[id].mb);
143 rx->rx_ring[id].mb = NULL;
150 ipoib_cm_alloc_rx_mb(struct ipoib_dev_priv *priv, struct ipoib_cm_rx_buf *rx_req)
152 return ipoib_alloc_map_mb(priv, (struct ipoib_rx_buf *)rx_req,
153 priv->cm.max_cm_mtu);
156 static void ipoib_cm_free_rx_ring(struct ipoib_dev_priv *priv,
157 struct ipoib_cm_rx_buf *rx_ring)
161 for (i = 0; i < ipoib_recvq_size; ++i)
163 ipoib_cm_dma_unmap_rx(priv, &rx_ring[i]);
164 m_freem(rx_ring[i].mb);
170 static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv)
172 struct ib_send_wr *bad_wr;
173 struct ipoib_cm_rx *p;
175 /* We only reserved 1 extra slot in CQ for drain WRs, so
176 * make sure we have at most 1 outstanding WR. */
177 if (list_empty(&priv->cm.rx_flush_list) ||
178 !list_empty(&priv->cm.rx_drain_list))
182 * QPs on flush list are error state. This way, a "flush
183 * error" WC will be immediately generated for each WR we post.
185 p = list_entry(priv->cm.rx_flush_list.next, typeof(*p), list);
186 if (ib_post_send(p->qp, &ipoib_cm_rx_drain_wr, &bad_wr))
187 ipoib_warn(priv, "failed to post drain wr\n");
189 list_splice_init(&priv->cm.rx_flush_list, &priv->cm.rx_drain_list);
192 static void ipoib_cm_rx_event_handler(struct ib_event *event, void *ctx)
194 struct ipoib_cm_rx *p = ctx;
195 struct ipoib_dev_priv *priv = p->priv;
198 if (event->event != IB_EVENT_QP_LAST_WQE_REACHED)
201 spin_lock_irqsave(&priv->lock, flags);
202 list_move(&p->list, &priv->cm.rx_flush_list);
203 p->state = IPOIB_CM_RX_FLUSH;
204 ipoib_cm_start_rx_drain(priv);
205 spin_unlock_irqrestore(&priv->lock, flags);
208 static struct ib_qp *ipoib_cm_create_rx_qp(struct ipoib_dev_priv *priv,
209 struct ipoib_cm_rx *p)
211 struct ib_qp_init_attr attr = {
212 .event_handler = ipoib_cm_rx_event_handler,
213 .send_cq = priv->recv_cq, /* For drain WR */
214 .recv_cq = priv->recv_cq,
216 .cap.max_send_wr = 1, /* For drain WR */
217 .cap.max_send_sge = 1,
218 .sq_sig_type = IB_SIGNAL_ALL_WR,
219 .qp_type = IB_QPT_RC,
223 if (!ipoib_cm_has_srq(priv)) {
224 attr.cap.max_recv_wr = ipoib_recvq_size;
225 attr.cap.max_recv_sge = priv->cm.num_frags;
228 return ib_create_qp(priv->pd, &attr);
231 static int ipoib_cm_modify_rx_qp(struct ipoib_dev_priv *priv,
232 struct ib_cm_id *cm_id, struct ib_qp *qp,
235 struct ib_qp_attr qp_attr;
236 int qp_attr_mask, ret;
238 qp_attr.qp_state = IB_QPS_INIT;
239 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
241 ipoib_warn(priv, "failed to init QP attr for INIT: %d\n", ret);
244 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
246 ipoib_warn(priv, "failed to modify QP to INIT: %d\n", ret);
249 qp_attr.qp_state = IB_QPS_RTR;
250 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
252 ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
255 qp_attr.rq_psn = psn;
256 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
258 ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
263 * Current Mellanox HCA firmware won't generate completions
264 * with error for drain WRs unless the QP has been moved to
265 * RTS first. This work-around leaves a window where a QP has
266 * moved to error asynchronously, but this will eventually get
267 * fixed in firmware, so let's not error out if modify QP
270 qp_attr.qp_state = IB_QPS_RTS;
271 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
273 ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
276 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
278 ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
285 static void ipoib_cm_init_rx_wr(struct ipoib_dev_priv *priv,
286 struct ib_recv_wr *wr,
291 for (i = 0; i < IPOIB_CM_RX_SG; i++)
292 sge[i].lkey = priv->pd->local_dma_lkey;
299 static int ipoib_cm_nonsrq_init_rx(struct ipoib_dev_priv *priv,
300 struct ib_cm_id *cm_id, struct ipoib_cm_rx *rx)
303 struct ib_recv_wr wr;
304 struct ib_sge sge[IPOIB_CM_RX_SG];
309 rx->rx_ring = kzalloc(ipoib_recvq_size * sizeof *rx->rx_ring, GFP_KERNEL);
311 printk(KERN_WARNING "%s: failed to allocate CM non-SRQ ring (%d entries)\n",
312 priv->ca->name, ipoib_recvq_size);
316 memset(rx->rx_ring, 0, ipoib_recvq_size * sizeof *rx->rx_ring);
318 t = kmalloc(sizeof *t, GFP_KERNEL);
324 ipoib_cm_init_rx_wr(priv, &t->wr, t->sge);
326 spin_lock_irq(&priv->lock);
328 if (priv->cm.nonsrq_conn_qp >= ipoib_max_conn_qp) {
329 spin_unlock_irq(&priv->lock);
330 ib_send_cm_rej(cm_id, IB_CM_REJ_NO_QP, NULL, 0, NULL, 0);
334 ++priv->cm.nonsrq_conn_qp;
336 spin_unlock_irq(&priv->lock);
338 for (i = 0; i < ipoib_recvq_size; ++i) {
339 if (!ipoib_cm_alloc_rx_mb(priv, &rx->rx_ring[i])) {
340 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
344 ret = ipoib_cm_post_receive_nonsrq(priv, rx, &t->wr, t->sge, i);
346 ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq "
347 "failed for buf %d\n", i);
353 rx->recv_count = ipoib_recvq_size;
360 spin_lock_irq(&priv->lock);
361 --priv->cm.nonsrq_conn_qp;
362 spin_unlock_irq(&priv->lock);
366 ipoib_cm_free_rx_ring(priv, rx->rx_ring);
371 static int ipoib_cm_send_rep(struct ipoib_dev_priv *priv, struct ib_cm_id *cm_id,
372 struct ib_qp *qp, struct ib_cm_req_event_param *req,
375 struct ipoib_cm_data data = {};
376 struct ib_cm_rep_param rep = {};
378 data.qpn = cpu_to_be32(priv->qp->qp_num);
379 data.mtu = cpu_to_be32(priv->cm.max_cm_mtu);
381 rep.private_data = &data;
382 rep.private_data_len = sizeof data;
383 rep.flow_control = 0;
384 rep.rnr_retry_count = req->rnr_retry_count;
385 rep.srq = ipoib_cm_has_srq(priv);
386 rep.qp_num = qp->qp_num;
387 rep.starting_psn = psn;
388 return ib_send_cm_rep(cm_id, &rep);
391 static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
393 struct ipoib_dev_priv *priv = cm_id->context;
394 struct ipoib_cm_rx *p;
398 ipoib_dbg(priv, "REQ arrived\n");
399 p = kzalloc(sizeof *p, GFP_KERNEL);
405 p->state = IPOIB_CM_RX_LIVE;
406 p->jiffies = jiffies;
407 INIT_LIST_HEAD(&p->list);
409 p->qp = ipoib_cm_create_rx_qp(priv, p);
411 ret = PTR_ERR(p->qp);
415 psn = random() & 0xffffff;
416 ret = ipoib_cm_modify_rx_qp(priv, cm_id, p->qp, psn);
420 if (!ipoib_cm_has_srq(priv)) {
421 ret = ipoib_cm_nonsrq_init_rx(priv, cm_id, p);
426 spin_lock_irq(&priv->lock);
427 queue_delayed_work(ipoib_workqueue,
428 &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
429 /* Add this entry to passive ids list head, but do not re-add it
430 * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
431 p->jiffies = jiffies;
432 if (p->state == IPOIB_CM_RX_LIVE)
433 list_move(&p->list, &priv->cm.passive_ids);
434 spin_unlock_irq(&priv->lock);
436 ret = ipoib_cm_send_rep(priv, cm_id, p->qp, &event->param.req_rcvd, psn);
438 ipoib_warn(priv, "failed to send REP: %d\n", ret);
439 if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
440 ipoib_warn(priv, "unable to move qp to error state\n");
445 ib_destroy_qp(p->qp);
451 static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
452 struct ib_cm_event *event)
454 struct ipoib_cm_rx *p;
455 struct ipoib_dev_priv *priv;
457 switch (event->event) {
458 case IB_CM_REQ_RECEIVED:
459 return ipoib_cm_req_handler(cm_id, event);
460 case IB_CM_DREQ_RECEIVED:
462 ib_send_cm_drep(cm_id, NULL, 0);
464 case IB_CM_REJ_RECEIVED:
467 if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
468 ipoib_warn(priv, "unable to move qp to error state\n");
475 void ipoib_cm_handle_rx_wc(struct ipoib_dev_priv *priv, struct ib_wc *wc)
477 struct ipoib_cm_rx_buf saverx;
478 struct ipoib_cm_rx_buf *rx_ring;
479 unsigned int wr_id = wc->wr_id & ~(IPOIB_OP_CM | IPOIB_OP_RECV);
480 struct ifnet *dev = priv->dev;
481 struct mbuf *mb, *newmb;
482 struct ipoib_cm_rx *p;
486 CURVNET_SET_QUIET(dev->if_vnet);
488 ipoib_dbg_data(priv, "cm recv completion: id %d, status: %d\n",
491 if (unlikely(wr_id >= ipoib_recvq_size)) {
492 if (wr_id == (IPOIB_CM_RX_DRAIN_WRID & ~(IPOIB_OP_CM | IPOIB_OP_RECV))) {
493 spin_lock(&priv->lock);
494 list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list);
495 ipoib_cm_start_rx_drain(priv);
496 if (priv->cm.id != NULL)
497 queue_work(ipoib_workqueue,
498 &priv->cm.rx_reap_task);
499 spin_unlock(&priv->lock);
501 ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
502 wr_id, ipoib_recvq_size);
506 p = wc->qp->qp_context;
508 has_srq = ipoib_cm_has_srq(priv);
509 rx_ring = has_srq ? priv->cm.srq_ring : p->rx_ring;
511 mb = rx_ring[wr_id].mb;
513 if (unlikely(wc->status != IB_WC_SUCCESS)) {
514 ipoib_dbg(priv, "cm recv error "
515 "(status=%d, wrid=%d vend_err %x)\n",
516 wc->status, wr_id, wc->vendor_err);
517 if_inc_counter(dev, IFCOUNTER_IERRORS, 1);
521 if (!--p->recv_count) {
522 spin_lock(&priv->lock);
523 list_move(&p->list, &priv->cm.rx_reap_list);
524 queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
525 spin_unlock(&priv->lock);
531 if (unlikely(!(wr_id & IPOIB_CM_RX_UPDATE_MASK))) {
532 if (p && time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) {
533 p->jiffies = jiffies;
534 /* Move this entry to list head, but do not re-add it
535 * if it has been moved out of list. */
536 if (p->state == IPOIB_CM_RX_LIVE)
537 list_move(&p->list, &priv->cm.passive_ids);
541 memcpy(&saverx, &rx_ring[wr_id], sizeof(saverx));
542 newmb = ipoib_cm_alloc_rx_mb(priv, &rx_ring[wr_id]);
543 if (unlikely(!newmb)) {
545 * If we can't allocate a new RX buffer, dump
546 * this packet and reuse the old buffer.
548 ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
549 if_inc_counter(dev, IFCOUNTER_IERRORS, 1);
550 memcpy(&rx_ring[wr_id], &saverx, sizeof(saverx));
554 ipoib_cm_dma_unmap_rx(priv, &saverx);
556 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
557 wc->byte_len, wc->slid);
559 ipoib_dma_mb(priv, mb, wc->byte_len);
561 if_inc_counter(dev, IFCOUNTER_IPACKETS, 1);
562 if_inc_counter(dev, IFCOUNTER_IBYTES, mb->m_pkthdr.len);
564 mb->m_pkthdr.rcvif = dev;
565 proto = *mtod(mb, uint16_t *);
566 m_adj(mb, IPOIB_ENCAP_LEN);
568 IPOIB_MTAP_PROTO(dev, mb, proto);
569 ipoib_demux(dev, mb, ntohs(proto));
573 if (unlikely(ipoib_cm_post_receive_srq(priv, wr_id)))
574 ipoib_warn(priv, "ipoib_cm_post_receive_srq failed "
575 "for buf %d\n", wr_id);
577 if (unlikely(ipoib_cm_post_receive_nonsrq(priv, p,
582 ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq failed "
583 "for buf %d\n", wr_id);
591 static inline int post_send(struct ipoib_dev_priv *priv,
592 struct ipoib_cm_tx *tx,
593 struct ipoib_cm_tx_buf *tx_req,
596 struct ib_send_wr *bad_wr;
597 struct mbuf *mb = tx_req->mb;
598 u64 *mapping = tx_req->mapping;
602 for (m = mb, i = 0; m != NULL; m = m->m_next, i++) {
603 priv->tx_sge[i].addr = mapping[i];
604 priv->tx_sge[i].length = m->m_len;
606 priv->tx_wr.wr.num_sge = i;
607 priv->tx_wr.wr.wr_id = wr_id | IPOIB_OP_CM;
608 priv->tx_wr.wr.opcode = IB_WR_SEND;
610 return ib_post_send(tx->qp, &priv->tx_wr.wr, &bad_wr);
613 void ipoib_cm_send(struct ipoib_dev_priv *priv, struct mbuf *mb, struct ipoib_cm_tx *tx)
615 struct ipoib_cm_tx_buf *tx_req;
616 struct ifnet *dev = priv->dev;
618 if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
619 while (ipoib_poll_tx(priv)); /* nothing */
621 m_adj(mb, sizeof(struct ipoib_pseudoheader));
622 if (unlikely(mb->m_pkthdr.len > tx->mtu)) {
623 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
624 mb->m_pkthdr.len, tx->mtu);
625 if_inc_counter(dev, IFCOUNTER_OERRORS, 1);
626 ipoib_cm_mb_too_long(priv, mb, IPOIB_CM_MTU(tx->mtu));
630 ipoib_dbg_data(priv, "sending packet: head 0x%x length %d connection 0x%x\n",
631 tx->tx_head, mb->m_pkthdr.len, tx->qp->qp_num);
635 * We put the mb into the tx_ring _before_ we call post_send()
636 * because it's entirely possible that the completion handler will
637 * run before we execute anything after the post_send(). That
638 * means we have to make sure everything is properly recorded and
639 * our state is consistent before we call post_send().
641 tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)];
643 if (unlikely(ipoib_dma_map_tx(priv->ca, (struct ipoib_tx_buf *)tx_req,
644 priv->cm.num_frags))) {
645 if_inc_counter(dev, IFCOUNTER_OERRORS, 1);
651 if (unlikely(post_send(priv, tx, tx_req, tx->tx_head & (ipoib_sendq_size - 1)))) {
652 ipoib_warn(priv, "post_send failed\n");
653 if_inc_counter(dev, IFCOUNTER_OERRORS, 1);
654 ipoib_dma_unmap_tx(priv->ca, (struct ipoib_tx_buf *)tx_req);
659 if (++priv->tx_outstanding == ipoib_sendq_size) {
660 ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
662 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
663 ipoib_warn(priv, "request notify on send CQ failed\n");
664 dev->if_drv_flags |= IFF_DRV_OACTIVE;
670 void ipoib_cm_handle_tx_wc(struct ipoib_dev_priv *priv, struct ib_wc *wc)
672 struct ipoib_cm_tx *tx = wc->qp->qp_context;
673 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM;
674 struct ifnet *dev = priv->dev;
675 struct ipoib_cm_tx_buf *tx_req;
677 ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n",
680 if (unlikely(wr_id >= ipoib_sendq_size)) {
681 ipoib_warn(priv, "cm send completion event with wrid %d (> %d)\n",
682 wr_id, ipoib_sendq_size);
686 tx_req = &tx->tx_ring[wr_id];
688 ipoib_dma_unmap_tx(priv->ca, (struct ipoib_tx_buf *)tx_req);
690 /* FIXME: is this right? Shouldn't we only increment on success? */
691 if_inc_counter(dev, IFCOUNTER_OPACKETS, 1);
696 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
697 (dev->if_drv_flags & IFF_DRV_OACTIVE) != 0 &&
698 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
699 dev->if_drv_flags &= ~IFF_DRV_OACTIVE;
701 if (wc->status != IB_WC_SUCCESS &&
702 wc->status != IB_WC_WR_FLUSH_ERR) {
703 struct ipoib_path *path;
705 ipoib_dbg(priv, "failed cm send event "
706 "(status=%d, wrid=%d vend_err %x)\n",
707 wc->status, wr_id, wc->vendor_err);
713 rb_erase(&path->rb_node, &priv->path_tree);
714 list_del(&path->list);
717 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
718 list_move(&tx->list, &priv->cm.reap_list);
719 queue_work(ipoib_workqueue, &priv->cm.reap_task);
722 clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
727 int ipoib_cm_dev_open(struct ipoib_dev_priv *priv)
731 if (!IPOIB_CM_SUPPORTED(IF_LLADDR(priv->dev)))
734 priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, priv);
735 if (IS_ERR(priv->cm.id)) {
736 printk(KERN_WARNING "%s: failed to create CM ID\n", priv->ca->name);
737 ret = PTR_ERR(priv->cm.id);
741 ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num), 0);
743 printk(KERN_WARNING "%s: failed to listen on ID 0x%llx\n", priv->ca->name,
744 IPOIB_CM_IETF_ID | priv->qp->qp_num);
751 ib_destroy_cm_id(priv->cm.id);
757 static void ipoib_cm_free_rx_reap_list(struct ipoib_dev_priv *priv)
759 struct ipoib_cm_rx *rx, *n;
762 spin_lock_irq(&priv->lock);
763 list_splice_init(&priv->cm.rx_reap_list, &list);
764 spin_unlock_irq(&priv->lock);
766 list_for_each_entry_safe(rx, n, &list, list) {
767 ib_destroy_cm_id(rx->id);
768 ib_destroy_qp(rx->qp);
769 if (!ipoib_cm_has_srq(priv)) {
770 ipoib_cm_free_rx_ring(priv, rx->rx_ring);
771 spin_lock_irq(&priv->lock);
772 --priv->cm.nonsrq_conn_qp;
773 spin_unlock_irq(&priv->lock);
779 void ipoib_cm_dev_stop(struct ipoib_dev_priv *priv)
781 struct ipoib_cm_rx *p;
785 if (!IPOIB_CM_SUPPORTED(IF_LLADDR(priv->dev)) || !priv->cm.id)
788 ib_destroy_cm_id(priv->cm.id);
791 cancel_work_sync(&priv->cm.rx_reap_task);
793 spin_lock_irq(&priv->lock);
794 while (!list_empty(&priv->cm.passive_ids)) {
795 p = list_entry(priv->cm.passive_ids.next, typeof(*p), list);
796 list_move(&p->list, &priv->cm.rx_error_list);
797 p->state = IPOIB_CM_RX_ERROR;
798 spin_unlock_irq(&priv->lock);
799 ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
801 ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
802 spin_lock_irq(&priv->lock);
805 /* Wait for all RX to be drained */
808 while (!list_empty(&priv->cm.rx_error_list) ||
809 !list_empty(&priv->cm.rx_flush_list) ||
810 !list_empty(&priv->cm.rx_drain_list)) {
811 if (time_after(jiffies, begin + 5 * HZ)) {
812 ipoib_warn(priv, "RX drain timing out\n");
815 * assume the HW is wedged and just free up everything.
817 list_splice_init(&priv->cm.rx_flush_list,
818 &priv->cm.rx_reap_list);
819 list_splice_init(&priv->cm.rx_error_list,
820 &priv->cm.rx_reap_list);
821 list_splice_init(&priv->cm.rx_drain_list,
822 &priv->cm.rx_reap_list);
825 spin_unlock_irq(&priv->lock);
827 ipoib_drain_cq(priv);
828 spin_lock_irq(&priv->lock);
831 spin_unlock_irq(&priv->lock);
833 ipoib_cm_free_rx_reap_list(priv);
835 cancel_delayed_work_sync(&priv->cm.stale_task);
838 static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
840 struct ipoib_cm_tx *p = cm_id->context;
841 struct ipoib_dev_priv *priv = p->priv;
842 struct ipoib_cm_data *data = event->private_data;
843 struct ifqueue mbqueue;
844 struct ib_qp_attr qp_attr;
845 int qp_attr_mask, ret;
848 ipoib_dbg(priv, "cm rep handler\n");
849 p->mtu = be32_to_cpu(data->mtu);
851 if (p->mtu <= IPOIB_ENCAP_LEN) {
852 ipoib_warn(priv, "Rejecting connection: mtu %d <= %d\n",
853 p->mtu, IPOIB_ENCAP_LEN);
857 qp_attr.qp_state = IB_QPS_RTR;
858 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
860 ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
864 qp_attr.rq_psn = 0 /* FIXME */;
865 ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
867 ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
871 qp_attr.qp_state = IB_QPS_RTS;
872 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
874 ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
877 ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
879 ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
883 bzero(&mbqueue, sizeof(mbqueue));
885 spin_lock_irq(&priv->lock);
886 set_bit(IPOIB_FLAG_OPER_UP, &p->flags);
889 _IF_DEQUEUE(&p->path->queue, mb);
892 _IF_ENQUEUE(&mbqueue, mb);
894 spin_unlock_irq(&priv->lock);
897 struct ifnet *dev = p->priv->dev;
898 _IF_DEQUEUE(&mbqueue, mb);
901 mb->m_pkthdr.rcvif = dev;
902 if (dev->if_transmit(dev, mb))
903 ipoib_warn(priv, "dev_queue_xmit failed "
904 "to requeue packet\n");
907 ret = ib_send_cm_rtu(cm_id, NULL, 0);
909 ipoib_warn(priv, "failed to send RTU: %d\n", ret);
915 static struct ib_qp *ipoib_cm_create_tx_qp(struct ipoib_dev_priv *priv,
916 struct ipoib_cm_tx *tx)
918 struct ib_qp_init_attr attr = {
919 .send_cq = priv->send_cq,
920 .recv_cq = priv->recv_cq,
922 .cap.max_send_wr = ipoib_sendq_size,
923 .cap.max_send_sge = priv->cm.num_frags,
924 .sq_sig_type = IB_SIGNAL_ALL_WR,
925 .qp_type = IB_QPT_RC,
929 return ib_create_qp(priv->pd, &attr);
932 static int ipoib_cm_send_req(struct ipoib_dev_priv *priv,
933 struct ib_cm_id *id, struct ib_qp *qp,
935 struct ib_sa_path_rec *pathrec)
937 struct ipoib_cm_data data = {};
938 struct ib_cm_req_param req = {};
940 ipoib_dbg(priv, "cm send req\n");
942 data.qpn = cpu_to_be32(priv->qp->qp_num);
943 data.mtu = cpu_to_be32(priv->cm.max_cm_mtu);
945 req.primary_path = pathrec;
946 req.alternate_path = NULL;
947 req.service_id = cpu_to_be64(IPOIB_CM_IETF_ID | qpn);
948 req.qp_num = qp->qp_num;
949 req.qp_type = qp->qp_type;
950 req.private_data = &data;
951 req.private_data_len = sizeof data;
952 req.flow_control = 0;
954 req.starting_psn = 0; /* FIXME */
957 * Pick some arbitrary defaults here; we could make these
958 * module parameters if anyone cared about setting them.
960 req.responder_resources = 4;
961 req.remote_cm_response_timeout = 20;
962 req.local_cm_response_timeout = 20;
963 req.retry_count = 0; /* RFC draft warns against retries */
964 req.rnr_retry_count = 0; /* RFC draft warns against retries */
965 req.max_cm_retries = 15;
966 req.srq = ipoib_cm_has_srq(priv);
967 return ib_send_cm_req(id, &req);
970 static int ipoib_cm_modify_tx_init(struct ipoib_dev_priv *priv,
971 struct ib_cm_id *cm_id, struct ib_qp *qp)
973 struct ib_qp_attr qp_attr;
974 int qp_attr_mask, ret;
975 ret = ib_find_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index);
977 ipoib_warn(priv, "pkey 0x%x not found: %d\n", priv->pkey, ret);
981 qp_attr.qp_state = IB_QPS_INIT;
982 qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE;
983 qp_attr.port_num = priv->port;
984 qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT;
986 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
988 ipoib_warn(priv, "failed to modify tx QP to INIT: %d\n", ret);
994 static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
995 struct ib_sa_path_rec *pathrec)
997 struct ipoib_dev_priv *priv = p->priv;
1000 p->tx_ring = kzalloc(ipoib_sendq_size * sizeof *p->tx_ring, GFP_KERNEL);
1002 ipoib_warn(priv, "failed to allocate tx ring\n");
1006 memset(p->tx_ring, 0, ipoib_sendq_size * sizeof *p->tx_ring);
1008 p->qp = ipoib_cm_create_tx_qp(p->priv, p);
1009 if (IS_ERR(p->qp)) {
1010 ret = PTR_ERR(p->qp);
1011 ipoib_warn(priv, "failed to allocate tx qp: %d\n", ret);
1015 p->id = ib_create_cm_id(priv->ca, ipoib_cm_tx_handler, p);
1016 if (IS_ERR(p->id)) {
1017 ret = PTR_ERR(p->id);
1018 ipoib_warn(priv, "failed to create tx cm id: %d\n", ret);
1022 ret = ipoib_cm_modify_tx_init(p->priv, p->id, p->qp);
1024 ipoib_warn(priv, "failed to modify tx qp to rtr: %d\n", ret);
1028 ret = ipoib_cm_send_req(p->priv, p->id, p->qp, qpn, pathrec);
1030 ipoib_warn(priv, "failed to send cm req: %d\n", ret);
1034 ipoib_dbg(priv, "Request connection 0x%x for gid %pI6 qpn 0x%x\n",
1035 p->qp->qp_num, pathrec->dgid.raw, qpn);
1041 ib_destroy_cm_id(p->id);
1044 ib_destroy_qp(p->qp);
1052 static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
1054 struct ipoib_dev_priv *priv = p->priv;
1055 struct ifnet *dev = priv->dev;
1056 struct ipoib_cm_tx_buf *tx_req;
1057 unsigned long begin;
1059 ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n",
1060 p->qp ? p->qp->qp_num : 0, p->tx_head, p->tx_tail);
1063 ipoib_path_free(priv, p->path);
1066 ib_destroy_cm_id(p->id);
1069 /* Wait for all sends to complete */
1071 while ((int) p->tx_tail - (int) p->tx_head < 0) {
1072 if (time_after(jiffies, begin + 5 * HZ)) {
1073 ipoib_warn(priv, "timing out; %d sends not completed\n",
1074 p->tx_head - p->tx_tail);
1084 while ((int) p->tx_tail - (int) p->tx_head < 0) {
1085 tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
1086 ipoib_dma_unmap_tx(priv->ca, (struct ipoib_tx_buf *)tx_req);
1087 m_freem(tx_req->mb);
1089 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
1090 (dev->if_drv_flags & IFF_DRV_OACTIVE) != 0 &&
1091 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
1092 dev->if_drv_flags &= ~IFF_DRV_OACTIVE;
1096 ib_destroy_qp(p->qp);
1102 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
1103 struct ib_cm_event *event)
1105 struct ipoib_cm_tx *tx = cm_id->context;
1106 struct ipoib_dev_priv *priv = tx->priv;
1107 struct ipoib_path *path;
1108 unsigned long flags;
1111 switch (event->event) {
1112 case IB_CM_DREQ_RECEIVED:
1113 ipoib_dbg(priv, "DREQ received.\n");
1114 ib_send_cm_drep(cm_id, NULL, 0);
1116 case IB_CM_REP_RECEIVED:
1117 ipoib_dbg(priv, "REP received.\n");
1118 ret = ipoib_cm_rep_handler(cm_id, event);
1120 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
1123 case IB_CM_REQ_ERROR:
1124 case IB_CM_REJ_RECEIVED:
1125 case IB_CM_TIMEWAIT_EXIT:
1126 ipoib_dbg(priv, "CM error %d.\n", event->event);
1127 spin_lock_irqsave(&priv->lock, flags);
1133 rb_erase(&path->rb_node, &priv->path_tree);
1134 list_del(&path->list);
1137 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
1138 list_move(&tx->list, &priv->cm.reap_list);
1139 queue_work(ipoib_workqueue, &priv->cm.reap_task);
1142 spin_unlock_irqrestore(&priv->lock, flags);
1144 ipoib_path_free(tx->priv, path);
1153 struct ipoib_cm_tx *ipoib_cm_create_tx(struct ipoib_dev_priv *priv,
1154 struct ipoib_path *path)
1156 struct ipoib_cm_tx *tx;
1158 tx = kzalloc(sizeof *tx, GFP_ATOMIC);
1162 ipoib_dbg(priv, "Creating cm tx\n");
1166 list_add(&tx->list, &priv->cm.start_list);
1167 set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
1168 queue_work(ipoib_workqueue, &priv->cm.start_task);
1172 void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
1174 struct ipoib_dev_priv *priv = tx->priv;
1175 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
1176 spin_lock(&priv->lock);
1177 list_move(&tx->list, &priv->cm.reap_list);
1178 spin_unlock(&priv->lock);
1179 queue_work(ipoib_workqueue, &priv->cm.reap_task);
1180 ipoib_dbg(priv, "Reap connection for gid %pI6\n",
1181 tx->path->pathrec.dgid.raw);
1186 static void ipoib_cm_tx_start(struct work_struct *work)
1188 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1190 struct ipoib_path *path;
1191 struct ipoib_cm_tx *p;
1192 unsigned long flags;
1195 struct ib_sa_path_rec pathrec;
1198 ipoib_dbg(priv, "cm start task\n");
1199 spin_lock_irqsave(&priv->lock, flags);
1201 while (!list_empty(&priv->cm.start_list)) {
1202 p = list_entry(priv->cm.start_list.next, typeof(*p), list);
1203 list_del_init(&p->list);
1205 qpn = IPOIB_QPN(path->hwaddr);
1206 memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);
1208 spin_unlock_irqrestore(&priv->lock, flags);
1210 ret = ipoib_cm_tx_init(p, qpn, &pathrec);
1212 spin_lock_irqsave(&priv->lock, flags);
1218 rb_erase(&path->rb_node, &priv->path_tree);
1219 list_del(&path->list);
1220 ipoib_path_free(priv, path);
1227 spin_unlock_irqrestore(&priv->lock, flags);
1230 static void ipoib_cm_tx_reap(struct work_struct *work)
1232 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1234 struct ipoib_cm_tx *p;
1235 unsigned long flags;
1237 spin_lock_irqsave(&priv->lock, flags);
1239 while (!list_empty(&priv->cm.reap_list)) {
1240 p = list_entry(priv->cm.reap_list.next, typeof(*p), list);
1242 spin_unlock_irqrestore(&priv->lock, flags);
1243 ipoib_cm_tx_destroy(p);
1244 spin_lock_irqsave(&priv->lock, flags);
1247 spin_unlock_irqrestore(&priv->lock, flags);
1250 static void ipoib_cm_mb_reap(struct work_struct *work)
1252 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1255 unsigned long flags;
1256 #if defined(INET) || defined(INET6)
1257 unsigned mtu = priv->mcast_mtu;
1261 spin_lock_irqsave(&priv->lock, flags);
1264 IF_DEQUEUE(&priv->cm.mb_queue, mb);
1267 spin_unlock_irqrestore(&priv->lock, flags);
1269 proto = htons(*mtod(mb, uint16_t *));
1270 m_adj(mb, IPOIB_ENCAP_LEN);
1274 icmp_error(mb, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0, mtu);
1278 case ETHERTYPE_IPV6:
1279 icmp6_error(mb, ICMP6_PACKET_TOO_BIG, 0, mtu);
1286 spin_lock_irqsave(&priv->lock, flags);
1289 spin_unlock_irqrestore(&priv->lock, flags);
1293 ipoib_cm_mb_too_long(struct ipoib_dev_priv *priv, struct mbuf *mb, unsigned int mtu)
1295 int e = priv->cm.mb_queue.ifq_len;
1297 IF_ENQUEUE(&priv->cm.mb_queue, mb);
1299 queue_work(ipoib_workqueue, &priv->cm.mb_task);
1302 static void ipoib_cm_rx_reap(struct work_struct *work)
1304 ipoib_cm_free_rx_reap_list(container_of(work, struct ipoib_dev_priv,
1308 static void ipoib_cm_stale_task(struct work_struct *work)
1310 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1311 cm.stale_task.work);
1312 struct ipoib_cm_rx *p;
1315 spin_lock_irq(&priv->lock);
1316 while (!list_empty(&priv->cm.passive_ids)) {
1317 /* List is sorted by LRU, start from tail,
1318 * stop when we see a recently used entry */
1319 p = list_entry(priv->cm.passive_ids.prev, typeof(*p), list);
1320 if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT))
1322 list_move(&p->list, &priv->cm.rx_error_list);
1323 p->state = IPOIB_CM_RX_ERROR;
1324 spin_unlock_irq(&priv->lock);
1325 ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
1327 ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
1328 spin_lock_irq(&priv->lock);
1331 if (!list_empty(&priv->cm.passive_ids))
1332 queue_delayed_work(ipoib_workqueue,
1333 &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
1334 spin_unlock_irq(&priv->lock);
1338 static void ipoib_cm_create_srq(struct ipoib_dev_priv *priv, int max_sge)
1340 struct ib_srq_init_attr srq_init_attr = {
1342 .max_wr = ipoib_recvq_size,
1347 priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr);
1348 if (IS_ERR(priv->cm.srq)) {
1349 if (PTR_ERR(priv->cm.srq) != -ENOSYS)
1350 printk(KERN_WARNING "%s: failed to allocate SRQ, error %ld\n",
1351 priv->ca->name, PTR_ERR(priv->cm.srq));
1352 priv->cm.srq = NULL;
1356 priv->cm.srq_ring = kzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring, GFP_KERNEL);
1357 if (!priv->cm.srq_ring) {
1358 printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n",
1359 priv->ca->name, ipoib_recvq_size);
1360 ib_destroy_srq(priv->cm.srq);
1361 priv->cm.srq = NULL;
1365 memset(priv->cm.srq_ring, 0, ipoib_recvq_size * sizeof *priv->cm.srq_ring);
1368 int ipoib_cm_dev_init(struct ipoib_dev_priv *priv)
1370 struct ifnet *dev = priv->dev;
1374 INIT_LIST_HEAD(&priv->cm.passive_ids);
1375 INIT_LIST_HEAD(&priv->cm.reap_list);
1376 INIT_LIST_HEAD(&priv->cm.start_list);
1377 INIT_LIST_HEAD(&priv->cm.rx_error_list);
1378 INIT_LIST_HEAD(&priv->cm.rx_flush_list);
1379 INIT_LIST_HEAD(&priv->cm.rx_drain_list);
1380 INIT_LIST_HEAD(&priv->cm.rx_reap_list);
1381 INIT_WORK(&priv->cm.start_task, ipoib_cm_tx_start);
1382 INIT_WORK(&priv->cm.reap_task, ipoib_cm_tx_reap);
1383 INIT_WORK(&priv->cm.mb_task, ipoib_cm_mb_reap);
1384 INIT_WORK(&priv->cm.rx_reap_task, ipoib_cm_rx_reap);
1385 INIT_DELAYED_WORK(&priv->cm.stale_task, ipoib_cm_stale_task);
1387 bzero(&priv->cm.mb_queue, sizeof(priv->cm.mb_queue));
1388 mtx_init(&priv->cm.mb_queue.ifq_mtx,
1389 dev->if_xname, "if send queue", MTX_DEF);
1391 max_srq_sge = priv->ca->attrs.max_srq_sge;
1393 ipoib_dbg(priv, "max_srq_sge=%d\n", max_srq_sge);
1395 max_srq_sge = min_t(int, IPOIB_CM_RX_SG, max_srq_sge);
1396 ipoib_cm_create_srq(priv, max_srq_sge);
1397 if (ipoib_cm_has_srq(priv)) {
1398 priv->cm.max_cm_mtu = max_srq_sge * MJUMPAGESIZE;
1399 priv->cm.num_frags = max_srq_sge;
1400 ipoib_dbg(priv, "max_cm_mtu = 0x%x, num_frags=%d\n",
1401 priv->cm.max_cm_mtu, priv->cm.num_frags);
1403 priv->cm.max_cm_mtu = IPOIB_CM_MAX_MTU;
1404 priv->cm.num_frags = IPOIB_CM_RX_SG;
1407 ipoib_cm_init_rx_wr(priv, &priv->cm.rx_wr, priv->cm.rx_sge);
1409 if (ipoib_cm_has_srq(priv)) {
1410 for (i = 0; i < ipoib_recvq_size; ++i) {
1411 if (!ipoib_cm_alloc_rx_mb(priv, &priv->cm.srq_ring[i])) {
1412 ipoib_warn(priv, "failed to allocate "
1413 "receive buffer %d\n", i);
1414 ipoib_cm_dev_cleanup(priv);
1418 if (ipoib_cm_post_receive_srq(priv, i)) {
1419 ipoib_warn(priv, "ipoib_cm_post_receive_srq "
1420 "failed for buf %d\n", i);
1421 ipoib_cm_dev_cleanup(priv);
1427 IF_LLADDR(priv->dev)[0] = IPOIB_FLAGS_RC;
1431 void ipoib_cm_dev_cleanup(struct ipoib_dev_priv *priv)
1438 ipoib_dbg(priv, "Cleanup ipoib connected mode.\n");
1440 ret = ib_destroy_srq(priv->cm.srq);
1442 ipoib_warn(priv, "ib_destroy_srq failed: %d\n", ret);
1444 priv->cm.srq = NULL;
1445 if (!priv->cm.srq_ring)
1448 ipoib_cm_free_rx_ring(priv, priv->cm.srq_ring);
1449 priv->cm.srq_ring = NULL;
1451 mtx_destroy(&priv->cm.mb_queue.ifq_mtx);
1454 #endif /* CONFIG_INFINIBAND_IPOIB_CM */