2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0
4 * Copyright (c) 2006 Mellanox Technologies. All rights reserved
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
40 #ifdef CONFIG_INFINIBAND_IPOIB_CM
42 #include <netinet/ip.h>
43 #include <netinet/ip_icmp.h>
44 #include <netinet/icmp6.h>
46 #include <rdma/ib_cm.h>
47 #include <rdma/ib_cache.h>
48 #include <linux/delay.h>
50 int ipoib_max_conn_qp = 128;
52 module_param_named(max_nonsrq_conn_qp, ipoib_max_conn_qp, int, 0444);
53 MODULE_PARM_DESC(max_nonsrq_conn_qp,
54 "Max number of connected-mode QPs per interface "
55 "(applied only if shared receive queue is not available)");
57 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
58 static int data_debug_level;
60 module_param_named(cm_data_debug_level, data_debug_level, int, 0644);
61 MODULE_PARM_DESC(cm_data_debug_level,
62 "Enable data path debug tracing for connected mode if > 0");
65 #define IPOIB_CM_IETF_ID 0x1000000000000000ULL
67 #define IPOIB_CM_RX_UPDATE_TIME (256 * HZ)
68 #define IPOIB_CM_RX_TIMEOUT (2 * 256 * HZ)
69 #define IPOIB_CM_RX_DELAY (3 * 256 * HZ)
70 #define IPOIB_CM_RX_UPDATE_MASK (0x3)
72 static struct ib_qp_attr ipoib_cm_err_attr = {
73 .qp_state = IB_QPS_ERR
76 #define IPOIB_CM_RX_DRAIN_WRID 0xffffffff
78 static struct ib_send_wr ipoib_cm_rx_drain_wr = {
79 .wr_id = IPOIB_CM_RX_DRAIN_WRID,
83 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
84 struct ib_cm_event *event);
86 static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, struct ipoib_cm_rx_buf *rx_req)
89 ipoib_dma_unmap_rx(priv, (struct ipoib_rx_buf *)rx_req);
93 static int ipoib_cm_post_receive_srq(struct ipoib_dev_priv *priv, int id)
95 struct ib_recv_wr *bad_wr;
96 struct ipoib_rx_buf *rx_req;
101 rx_req = (struct ipoib_rx_buf *)&priv->cm.srq_ring[id];
102 for (m = rx_req->mb, i = 0; m != NULL; m = m->m_next, i++) {
103 priv->cm.rx_sge[i].addr = rx_req->mapping[i];
104 priv->cm.rx_sge[i].length = m->m_len;
107 priv->cm.rx_wr.num_sge = i;
108 priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
110 ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr);
112 ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret);
113 ipoib_dma_unmap_rx(priv, rx_req);
114 m_freem(priv->cm.srq_ring[id].mb);
115 priv->cm.srq_ring[id].mb = NULL;
121 static int ipoib_cm_post_receive_nonsrq(struct ipoib_dev_priv *priv,
122 struct ipoib_cm_rx *rx,
123 struct ib_recv_wr *wr,
124 struct ib_sge *sge, int id)
126 struct ipoib_rx_buf *rx_req;
127 struct ib_recv_wr *bad_wr;
132 rx_req = (struct ipoib_rx_buf *)&rx->rx_ring[id];
133 for (m = rx_req->mb, i = 0; m != NULL; m = m->m_next, i++) {
134 sge[i].addr = rx_req->mapping[i];
135 sge[i].length = m->m_len;
139 wr->wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
141 ret = ib_post_recv(rx->qp, wr, &bad_wr);
143 ipoib_warn(priv, "post recv failed for buf %d (%d)\n", id, ret);
144 ipoib_dma_unmap_rx(priv, rx_req);
145 m_freem(rx->rx_ring[id].mb);
146 rx->rx_ring[id].mb = NULL;
153 ipoib_cm_alloc_rx_mb(struct ipoib_dev_priv *priv, struct ipoib_cm_rx_buf *rx_req)
155 return ipoib_alloc_map_mb(priv, (struct ipoib_rx_buf *)rx_req,
156 priv->cm.max_cm_mtu);
159 static void ipoib_cm_free_rx_ring(struct ipoib_dev_priv *priv,
160 struct ipoib_cm_rx_buf *rx_ring)
164 for (i = 0; i < ipoib_recvq_size; ++i)
166 ipoib_cm_dma_unmap_rx(priv, &rx_ring[i]);
167 m_freem(rx_ring[i].mb);
173 static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv)
175 struct ib_send_wr *bad_wr;
176 struct ipoib_cm_rx *p;
178 /* We only reserved 1 extra slot in CQ for drain WRs, so
179 * make sure we have at most 1 outstanding WR. */
180 if (list_empty(&priv->cm.rx_flush_list) ||
181 !list_empty(&priv->cm.rx_drain_list))
185 * QPs on flush list are error state. This way, a "flush
186 * error" WC will be immediately generated for each WR we post.
188 p = list_entry(priv->cm.rx_flush_list.next, typeof(*p), list);
189 if (ib_post_send(p->qp, &ipoib_cm_rx_drain_wr, &bad_wr))
190 ipoib_warn(priv, "failed to post drain wr\n");
192 list_splice_init(&priv->cm.rx_flush_list, &priv->cm.rx_drain_list);
195 static void ipoib_cm_rx_event_handler(struct ib_event *event, void *ctx)
197 struct ipoib_cm_rx *p = ctx;
198 struct ipoib_dev_priv *priv = p->priv;
201 if (event->event != IB_EVENT_QP_LAST_WQE_REACHED)
204 spin_lock_irqsave(&priv->lock, flags);
205 list_move(&p->list, &priv->cm.rx_flush_list);
206 p->state = IPOIB_CM_RX_FLUSH;
207 ipoib_cm_start_rx_drain(priv);
208 spin_unlock_irqrestore(&priv->lock, flags);
211 static struct ib_qp *ipoib_cm_create_rx_qp(struct ipoib_dev_priv *priv,
212 struct ipoib_cm_rx *p)
214 struct ib_qp_init_attr attr = {
215 .event_handler = ipoib_cm_rx_event_handler,
216 .send_cq = priv->recv_cq, /* For drain WR */
217 .recv_cq = priv->recv_cq,
219 .cap.max_send_wr = 1, /* For drain WR */
220 .cap.max_send_sge = 1,
221 .sq_sig_type = IB_SIGNAL_ALL_WR,
222 .qp_type = IB_QPT_RC,
226 if (!ipoib_cm_has_srq(priv)) {
227 attr.cap.max_recv_wr = ipoib_recvq_size;
228 attr.cap.max_recv_sge = priv->cm.num_frags;
231 return ib_create_qp(priv->pd, &attr);
234 static int ipoib_cm_modify_rx_qp(struct ipoib_dev_priv *priv,
235 struct ib_cm_id *cm_id, struct ib_qp *qp,
238 struct ib_qp_attr qp_attr;
239 int qp_attr_mask, ret;
241 qp_attr.qp_state = IB_QPS_INIT;
242 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
244 ipoib_warn(priv, "failed to init QP attr for INIT: %d\n", ret);
247 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
249 ipoib_warn(priv, "failed to modify QP to INIT: %d\n", ret);
252 qp_attr.qp_state = IB_QPS_RTR;
253 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
255 ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
258 qp_attr.rq_psn = psn;
259 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
261 ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
266 * Current Mellanox HCA firmware won't generate completions
267 * with error for drain WRs unless the QP has been moved to
268 * RTS first. This work-around leaves a window where a QP has
269 * moved to error asynchronously, but this will eventually get
270 * fixed in firmware, so let's not error out if modify QP
273 qp_attr.qp_state = IB_QPS_RTS;
274 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
276 ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
279 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
281 ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
288 static void ipoib_cm_init_rx_wr(struct ipoib_dev_priv *priv,
289 struct ib_recv_wr *wr,
294 for (i = 0; i < IPOIB_CM_RX_SG; i++)
295 sge[i].lkey = priv->pd->local_dma_lkey;
302 static int ipoib_cm_nonsrq_init_rx(struct ipoib_dev_priv *priv,
303 struct ib_cm_id *cm_id, struct ipoib_cm_rx *rx)
306 struct ib_recv_wr wr;
307 struct ib_sge sge[IPOIB_CM_RX_SG];
312 rx->rx_ring = kzalloc(ipoib_recvq_size * sizeof *rx->rx_ring, GFP_KERNEL);
314 printk(KERN_WARNING "%s: failed to allocate CM non-SRQ ring (%d entries)\n",
315 priv->ca->name, ipoib_recvq_size);
319 memset(rx->rx_ring, 0, ipoib_recvq_size * sizeof *rx->rx_ring);
321 t = kmalloc(sizeof *t, GFP_KERNEL);
327 ipoib_cm_init_rx_wr(priv, &t->wr, t->sge);
329 spin_lock_irq(&priv->lock);
331 if (priv->cm.nonsrq_conn_qp >= ipoib_max_conn_qp) {
332 spin_unlock_irq(&priv->lock);
333 ib_send_cm_rej(cm_id, IB_CM_REJ_NO_QP, NULL, 0, NULL, 0);
337 ++priv->cm.nonsrq_conn_qp;
339 spin_unlock_irq(&priv->lock);
341 for (i = 0; i < ipoib_recvq_size; ++i) {
342 if (!ipoib_cm_alloc_rx_mb(priv, &rx->rx_ring[i])) {
343 ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
347 ret = ipoib_cm_post_receive_nonsrq(priv, rx, &t->wr, t->sge, i);
349 ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq "
350 "failed for buf %d\n", i);
356 rx->recv_count = ipoib_recvq_size;
363 spin_lock_irq(&priv->lock);
364 --priv->cm.nonsrq_conn_qp;
365 spin_unlock_irq(&priv->lock);
369 ipoib_cm_free_rx_ring(priv, rx->rx_ring);
374 static int ipoib_cm_send_rep(struct ipoib_dev_priv *priv, struct ib_cm_id *cm_id,
375 struct ib_qp *qp, struct ib_cm_req_event_param *req,
378 struct ipoib_cm_data data = {};
379 struct ib_cm_rep_param rep = {};
381 data.qpn = cpu_to_be32(priv->qp->qp_num);
382 data.mtu = cpu_to_be32(priv->cm.max_cm_mtu);
384 rep.private_data = &data;
385 rep.private_data_len = sizeof data;
386 rep.flow_control = 0;
387 rep.rnr_retry_count = req->rnr_retry_count;
388 rep.srq = ipoib_cm_has_srq(priv);
389 rep.qp_num = qp->qp_num;
390 rep.starting_psn = psn;
391 return ib_send_cm_rep(cm_id, &rep);
394 static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
396 struct ipoib_dev_priv *priv = cm_id->context;
397 struct ipoib_cm_rx *p;
401 ipoib_dbg(priv, "REQ arrived\n");
402 p = kzalloc(sizeof *p, GFP_KERNEL);
408 p->state = IPOIB_CM_RX_LIVE;
409 p->jiffies = jiffies;
410 INIT_LIST_HEAD(&p->list);
412 p->qp = ipoib_cm_create_rx_qp(priv, p);
414 ret = PTR_ERR(p->qp);
418 psn = random() & 0xffffff;
419 ret = ipoib_cm_modify_rx_qp(priv, cm_id, p->qp, psn);
423 if (!ipoib_cm_has_srq(priv)) {
424 ret = ipoib_cm_nonsrq_init_rx(priv, cm_id, p);
429 spin_lock_irq(&priv->lock);
430 queue_delayed_work(ipoib_workqueue,
431 &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
432 /* Add this entry to passive ids list head, but do not re-add it
433 * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
434 p->jiffies = jiffies;
435 if (p->state == IPOIB_CM_RX_LIVE)
436 list_move(&p->list, &priv->cm.passive_ids);
437 spin_unlock_irq(&priv->lock);
439 ret = ipoib_cm_send_rep(priv, cm_id, p->qp, &event->param.req_rcvd, psn);
441 ipoib_warn(priv, "failed to send REP: %d\n", ret);
442 if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
443 ipoib_warn(priv, "unable to move qp to error state\n");
448 ib_destroy_qp(p->qp);
454 static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
455 struct ib_cm_event *event)
457 struct ipoib_cm_rx *p;
458 struct ipoib_dev_priv *priv;
460 switch (event->event) {
461 case IB_CM_REQ_RECEIVED:
462 return ipoib_cm_req_handler(cm_id, event);
463 case IB_CM_DREQ_RECEIVED:
465 ib_send_cm_drep(cm_id, NULL, 0);
467 case IB_CM_REJ_RECEIVED:
470 if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
471 ipoib_warn(priv, "unable to move qp to error state\n");
478 void ipoib_cm_handle_rx_wc(struct ipoib_dev_priv *priv, struct ib_wc *wc)
480 struct ipoib_cm_rx_buf saverx;
481 struct ipoib_cm_rx_buf *rx_ring;
482 unsigned int wr_id = wc->wr_id & ~(IPOIB_OP_CM | IPOIB_OP_RECV);
483 struct ifnet *dev = priv->dev;
484 struct mbuf *mb, *newmb;
485 struct ipoib_cm_rx *p;
489 CURVNET_SET_QUIET(dev->if_vnet);
491 ipoib_dbg_data(priv, "cm recv completion: id %d, status: %d\n",
494 if (unlikely(wr_id >= ipoib_recvq_size)) {
495 if (wr_id == (IPOIB_CM_RX_DRAIN_WRID & ~(IPOIB_OP_CM | IPOIB_OP_RECV))) {
496 spin_lock(&priv->lock);
497 list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list);
498 ipoib_cm_start_rx_drain(priv);
499 if (priv->cm.id != NULL)
500 queue_work(ipoib_workqueue,
501 &priv->cm.rx_reap_task);
502 spin_unlock(&priv->lock);
504 ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
505 wr_id, ipoib_recvq_size);
509 p = wc->qp->qp_context;
511 has_srq = ipoib_cm_has_srq(priv);
512 rx_ring = has_srq ? priv->cm.srq_ring : p->rx_ring;
514 mb = rx_ring[wr_id].mb;
516 if (unlikely(wc->status != IB_WC_SUCCESS)) {
517 ipoib_dbg(priv, "cm recv error "
518 "(status=%d, wrid=%d vend_err %x)\n",
519 wc->status, wr_id, wc->vendor_err);
520 if_inc_counter(dev, IFCOUNTER_IERRORS, 1);
524 if (!--p->recv_count) {
525 spin_lock(&priv->lock);
526 list_move(&p->list, &priv->cm.rx_reap_list);
527 queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
528 spin_unlock(&priv->lock);
534 if (unlikely(!(wr_id & IPOIB_CM_RX_UPDATE_MASK))) {
535 if (p && time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) {
536 p->jiffies = jiffies;
537 /* Move this entry to list head, but do not re-add it
538 * if it has been moved out of list. */
539 if (p->state == IPOIB_CM_RX_LIVE)
540 list_move(&p->list, &priv->cm.passive_ids);
544 memcpy(&saverx, &rx_ring[wr_id], sizeof(saverx));
545 newmb = ipoib_cm_alloc_rx_mb(priv, &rx_ring[wr_id]);
546 if (unlikely(!newmb)) {
548 * If we can't allocate a new RX buffer, dump
549 * this packet and reuse the old buffer.
551 ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
552 if_inc_counter(dev, IFCOUNTER_IERRORS, 1);
553 memcpy(&rx_ring[wr_id], &saverx, sizeof(saverx));
557 ipoib_cm_dma_unmap_rx(priv, &saverx);
559 ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
560 wc->byte_len, wc->slid);
562 ipoib_dma_mb(priv, mb, wc->byte_len);
564 if_inc_counter(dev, IFCOUNTER_IPACKETS, 1);
565 if_inc_counter(dev, IFCOUNTER_IBYTES, mb->m_pkthdr.len);
567 mb->m_pkthdr.rcvif = dev;
568 proto = *mtod(mb, uint16_t *);
569 m_adj(mb, IPOIB_ENCAP_LEN);
571 IPOIB_MTAP_PROTO(dev, mb, proto);
572 ipoib_demux(dev, mb, ntohs(proto));
576 if (unlikely(ipoib_cm_post_receive_srq(priv, wr_id)))
577 ipoib_warn(priv, "ipoib_cm_post_receive_srq failed "
578 "for buf %d\n", wr_id);
580 if (unlikely(ipoib_cm_post_receive_nonsrq(priv, p,
585 ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq failed "
586 "for buf %d\n", wr_id);
594 static inline int post_send(struct ipoib_dev_priv *priv,
595 struct ipoib_cm_tx *tx,
596 struct ipoib_cm_tx_buf *tx_req,
599 struct ib_send_wr *bad_wr;
600 struct mbuf *mb = tx_req->mb;
601 u64 *mapping = tx_req->mapping;
605 for (m = mb, i = 0; m != NULL; m = m->m_next, i++) {
606 priv->tx_sge[i].addr = mapping[i];
607 priv->tx_sge[i].length = m->m_len;
609 priv->tx_wr.wr.num_sge = i;
610 priv->tx_wr.wr.wr_id = wr_id | IPOIB_OP_CM;
611 priv->tx_wr.wr.opcode = IB_WR_SEND;
613 return ib_post_send(tx->qp, &priv->tx_wr.wr, &bad_wr);
616 void ipoib_cm_send(struct ipoib_dev_priv *priv, struct mbuf *mb, struct ipoib_cm_tx *tx)
618 struct ipoib_cm_tx_buf *tx_req;
619 struct ifnet *dev = priv->dev;
621 if (unlikely(priv->tx_outstanding > MAX_SEND_CQE)) {
622 while (ipoib_poll_tx(priv, false))
626 m_adj(mb, sizeof(struct ipoib_pseudoheader));
627 if (unlikely(mb->m_pkthdr.len > tx->mtu)) {
628 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
629 mb->m_pkthdr.len, tx->mtu);
630 if_inc_counter(dev, IFCOUNTER_OERRORS, 1);
631 ipoib_cm_mb_too_long(priv, mb, IPOIB_CM_MTU(tx->mtu));
635 ipoib_dbg_data(priv, "sending packet: head 0x%x length %d connection 0x%x\n",
636 tx->tx_head, mb->m_pkthdr.len, tx->qp->qp_num);
640 * We put the mb into the tx_ring _before_ we call post_send()
641 * because it's entirely possible that the completion handler will
642 * run before we execute anything after the post_send(). That
643 * means we have to make sure everything is properly recorded and
644 * our state is consistent before we call post_send().
646 tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)];
648 if (unlikely(ipoib_dma_map_tx(priv->ca, (struct ipoib_tx_buf *)tx_req,
649 priv->cm.num_frags))) {
650 if_inc_counter(dev, IFCOUNTER_OERRORS, 1);
656 if (unlikely(post_send(priv, tx, tx_req, tx->tx_head & (ipoib_sendq_size - 1)))) {
657 ipoib_warn(priv, "post_send failed\n");
658 if_inc_counter(dev, IFCOUNTER_OERRORS, 1);
659 ipoib_dma_unmap_tx(priv->ca, (struct ipoib_tx_buf *)tx_req);
664 if (++priv->tx_outstanding == ipoib_sendq_size) {
665 ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
667 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
668 ipoib_warn(priv, "request notify on send CQ failed\n");
669 dev->if_drv_flags |= IFF_DRV_OACTIVE;
675 void ipoib_cm_handle_tx_wc(struct ipoib_dev_priv *priv, struct ib_wc *wc)
677 struct ipoib_cm_tx *tx = wc->qp->qp_context;
678 unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM;
679 struct ifnet *dev = priv->dev;
680 struct ipoib_cm_tx_buf *tx_req;
682 ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n",
685 if (unlikely(wr_id >= ipoib_sendq_size)) {
686 ipoib_warn(priv, "cm send completion event with wrid %d (> %d)\n",
687 wr_id, ipoib_sendq_size);
691 tx_req = &tx->tx_ring[wr_id];
693 ipoib_dma_unmap_tx(priv->ca, (struct ipoib_tx_buf *)tx_req);
695 /* FIXME: is this right? Shouldn't we only increment on success? */
696 if_inc_counter(dev, IFCOUNTER_OPACKETS, 1);
701 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
702 (dev->if_drv_flags & IFF_DRV_OACTIVE) != 0 &&
703 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
704 dev->if_drv_flags &= ~IFF_DRV_OACTIVE;
706 if (wc->status != IB_WC_SUCCESS &&
707 wc->status != IB_WC_WR_FLUSH_ERR) {
708 struct ipoib_path *path;
710 ipoib_dbg(priv, "failed cm send event "
711 "(status=%d, wrid=%d vend_err %x)\n",
712 wc->status, wr_id, wc->vendor_err);
718 rb_erase(&path->rb_node, &priv->path_tree);
719 list_del(&path->list);
722 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
723 list_move(&tx->list, &priv->cm.reap_list);
724 queue_work(ipoib_workqueue, &priv->cm.reap_task);
727 clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
732 int ipoib_cm_dev_open(struct ipoib_dev_priv *priv)
736 if (!IPOIB_CM_SUPPORTED(IF_LLADDR(priv->dev)))
739 priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, priv);
740 if (IS_ERR(priv->cm.id)) {
741 printk(KERN_WARNING "%s: failed to create CM ID\n", priv->ca->name);
742 ret = PTR_ERR(priv->cm.id);
746 ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num), 0);
748 printk(KERN_WARNING "%s: failed to listen on ID 0x%llx\n", priv->ca->name,
749 IPOIB_CM_IETF_ID | priv->qp->qp_num);
756 ib_destroy_cm_id(priv->cm.id);
762 static void ipoib_cm_free_rx_reap_list(struct ipoib_dev_priv *priv)
764 struct ipoib_cm_rx *rx, *n;
767 spin_lock_irq(&priv->lock);
768 list_splice_init(&priv->cm.rx_reap_list, &list);
769 spin_unlock_irq(&priv->lock);
771 list_for_each_entry_safe(rx, n, &list, list) {
772 ib_destroy_cm_id(rx->id);
773 ib_destroy_qp(rx->qp);
774 if (!ipoib_cm_has_srq(priv)) {
775 ipoib_cm_free_rx_ring(priv, rx->rx_ring);
776 spin_lock_irq(&priv->lock);
777 --priv->cm.nonsrq_conn_qp;
778 spin_unlock_irq(&priv->lock);
784 void ipoib_cm_dev_stop(struct ipoib_dev_priv *priv)
786 struct ipoib_cm_rx *p;
790 if (!IPOIB_CM_SUPPORTED(IF_LLADDR(priv->dev)) || !priv->cm.id)
793 ib_destroy_cm_id(priv->cm.id);
796 cancel_work_sync(&priv->cm.rx_reap_task);
798 spin_lock_irq(&priv->lock);
799 while (!list_empty(&priv->cm.passive_ids)) {
800 p = list_entry(priv->cm.passive_ids.next, typeof(*p), list);
801 list_move(&p->list, &priv->cm.rx_error_list);
802 p->state = IPOIB_CM_RX_ERROR;
803 spin_unlock_irq(&priv->lock);
804 ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
806 ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
807 spin_lock_irq(&priv->lock);
810 /* Wait for all RX to be drained */
813 while (!list_empty(&priv->cm.rx_error_list) ||
814 !list_empty(&priv->cm.rx_flush_list) ||
815 !list_empty(&priv->cm.rx_drain_list)) {
816 if (time_after(jiffies, begin + 5 * HZ)) {
817 ipoib_warn(priv, "RX drain timing out\n");
820 * assume the HW is wedged and just free up everything.
822 list_splice_init(&priv->cm.rx_flush_list,
823 &priv->cm.rx_reap_list);
824 list_splice_init(&priv->cm.rx_error_list,
825 &priv->cm.rx_reap_list);
826 list_splice_init(&priv->cm.rx_drain_list,
827 &priv->cm.rx_reap_list);
830 spin_unlock_irq(&priv->lock);
832 ipoib_drain_cq(priv);
833 spin_lock_irq(&priv->lock);
836 spin_unlock_irq(&priv->lock);
838 ipoib_cm_free_rx_reap_list(priv);
840 cancel_delayed_work_sync(&priv->cm.stale_task);
843 static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
845 struct ipoib_cm_tx *p = cm_id->context;
846 struct ipoib_dev_priv *priv = p->priv;
847 struct ipoib_cm_data *data = event->private_data;
848 struct ifqueue mbqueue;
849 struct ib_qp_attr qp_attr;
850 int qp_attr_mask, ret;
853 ipoib_dbg(priv, "cm rep handler\n");
854 p->mtu = be32_to_cpu(data->mtu);
856 if (p->mtu <= IPOIB_ENCAP_LEN) {
857 ipoib_warn(priv, "Rejecting connection: mtu %d <= %d\n",
858 p->mtu, IPOIB_ENCAP_LEN);
862 qp_attr.qp_state = IB_QPS_RTR;
863 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
865 ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
869 qp_attr.rq_psn = 0 /* FIXME */;
870 ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
872 ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
876 qp_attr.qp_state = IB_QPS_RTS;
877 ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
879 ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
882 ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
884 ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
888 bzero(&mbqueue, sizeof(mbqueue));
890 spin_lock_irq(&priv->lock);
891 set_bit(IPOIB_FLAG_OPER_UP, &p->flags);
894 _IF_DEQUEUE(&p->path->queue, mb);
897 _IF_ENQUEUE(&mbqueue, mb);
899 spin_unlock_irq(&priv->lock);
902 struct ifnet *dev = p->priv->dev;
903 _IF_DEQUEUE(&mbqueue, mb);
906 mb->m_pkthdr.rcvif = dev;
907 if (dev->if_transmit(dev, mb))
908 ipoib_warn(priv, "dev_queue_xmit failed "
909 "to requeue packet\n");
912 ret = ib_send_cm_rtu(cm_id, NULL, 0);
914 ipoib_warn(priv, "failed to send RTU: %d\n", ret);
920 static struct ib_qp *ipoib_cm_create_tx_qp(struct ipoib_dev_priv *priv,
921 struct ipoib_cm_tx *tx)
923 struct ib_qp_init_attr attr = {
924 .send_cq = priv->send_cq,
925 .recv_cq = priv->recv_cq,
927 .cap.max_send_wr = ipoib_sendq_size,
928 .cap.max_send_sge = priv->cm.num_frags,
929 .sq_sig_type = IB_SIGNAL_ALL_WR,
930 .qp_type = IB_QPT_RC,
934 return ib_create_qp(priv->pd, &attr);
937 static int ipoib_cm_send_req(struct ipoib_dev_priv *priv,
938 struct ib_cm_id *id, struct ib_qp *qp,
940 struct ib_sa_path_rec *pathrec)
942 struct ipoib_cm_data data = {};
943 struct ib_cm_req_param req = {};
945 ipoib_dbg(priv, "cm send req\n");
947 data.qpn = cpu_to_be32(priv->qp->qp_num);
948 data.mtu = cpu_to_be32(priv->cm.max_cm_mtu);
950 req.primary_path = pathrec;
951 req.alternate_path = NULL;
952 req.service_id = cpu_to_be64(IPOIB_CM_IETF_ID | qpn);
953 req.qp_num = qp->qp_num;
954 req.qp_type = qp->qp_type;
955 req.private_data = &data;
956 req.private_data_len = sizeof data;
957 req.flow_control = 0;
959 req.starting_psn = 0; /* FIXME */
962 * Pick some arbitrary defaults here; we could make these
963 * module parameters if anyone cared about setting them.
965 req.responder_resources = 4;
966 req.remote_cm_response_timeout = 20;
967 req.local_cm_response_timeout = 20;
968 req.retry_count = 0; /* RFC draft warns against retries */
969 req.rnr_retry_count = 0; /* RFC draft warns against retries */
970 req.max_cm_retries = 15;
971 req.srq = ipoib_cm_has_srq(priv);
972 return ib_send_cm_req(id, &req);
975 static int ipoib_cm_modify_tx_init(struct ipoib_dev_priv *priv,
976 struct ib_cm_id *cm_id, struct ib_qp *qp)
978 struct ib_qp_attr qp_attr;
979 int qp_attr_mask, ret;
980 ret = ib_find_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index);
982 ipoib_warn(priv, "pkey 0x%x not found: %d\n", priv->pkey, ret);
986 qp_attr.qp_state = IB_QPS_INIT;
987 qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE;
988 qp_attr.port_num = priv->port;
989 qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT;
991 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
993 ipoib_warn(priv, "failed to modify tx QP to INIT: %d\n", ret);
999 static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
1000 struct ib_sa_path_rec *pathrec)
1002 struct ipoib_dev_priv *priv = p->priv;
1005 p->tx_ring = kzalloc(ipoib_sendq_size * sizeof *p->tx_ring, GFP_KERNEL);
1007 ipoib_warn(priv, "failed to allocate tx ring\n");
1011 memset(p->tx_ring, 0, ipoib_sendq_size * sizeof *p->tx_ring);
1013 p->qp = ipoib_cm_create_tx_qp(p->priv, p);
1014 if (IS_ERR(p->qp)) {
1015 ret = PTR_ERR(p->qp);
1016 ipoib_warn(priv, "failed to allocate tx qp: %d\n", ret);
1020 p->id = ib_create_cm_id(priv->ca, ipoib_cm_tx_handler, p);
1021 if (IS_ERR(p->id)) {
1022 ret = PTR_ERR(p->id);
1023 ipoib_warn(priv, "failed to create tx cm id: %d\n", ret);
1027 ret = ipoib_cm_modify_tx_init(p->priv, p->id, p->qp);
1029 ipoib_warn(priv, "failed to modify tx qp to rtr: %d\n", ret);
1033 ret = ipoib_cm_send_req(p->priv, p->id, p->qp, qpn, pathrec);
1035 ipoib_warn(priv, "failed to send cm req: %d\n", ret);
1039 ipoib_dbg(priv, "Request connection 0x%x for gid %pI6 qpn 0x%x\n",
1040 p->qp->qp_num, pathrec->dgid.raw, qpn);
1046 ib_destroy_cm_id(p->id);
1049 ib_destroy_qp(p->qp);
1057 static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
1059 struct ipoib_dev_priv *priv = p->priv;
1060 struct ifnet *dev = priv->dev;
1061 struct ipoib_cm_tx_buf *tx_req;
1062 unsigned long begin;
1064 ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n",
1065 p->qp ? p->qp->qp_num : 0, p->tx_head, p->tx_tail);
1068 ipoib_path_free(priv, p->path);
1071 ib_destroy_cm_id(p->id);
1074 /* Wait for all sends to complete */
1076 while ((int) p->tx_tail - (int) p->tx_head < 0) {
1077 if (time_after(jiffies, begin + 5 * HZ)) {
1078 ipoib_warn(priv, "timing out; %d sends not completed\n",
1079 p->tx_head - p->tx_tail);
1089 while ((int) p->tx_tail - (int) p->tx_head < 0) {
1090 tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
1091 ipoib_dma_unmap_tx(priv->ca, (struct ipoib_tx_buf *)tx_req);
1092 m_freem(tx_req->mb);
1094 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
1095 (dev->if_drv_flags & IFF_DRV_OACTIVE) != 0 &&
1096 test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
1097 dev->if_drv_flags &= ~IFF_DRV_OACTIVE;
1101 ib_destroy_qp(p->qp);
1107 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
1108 struct ib_cm_event *event)
1110 struct ipoib_cm_tx *tx = cm_id->context;
1111 struct ipoib_dev_priv *priv = tx->priv;
1112 struct ipoib_path *path;
1113 unsigned long flags;
1116 switch (event->event) {
1117 case IB_CM_DREQ_RECEIVED:
1118 ipoib_dbg(priv, "DREQ received.\n");
1119 ib_send_cm_drep(cm_id, NULL, 0);
1121 case IB_CM_REP_RECEIVED:
1122 ipoib_dbg(priv, "REP received.\n");
1123 ret = ipoib_cm_rep_handler(cm_id, event);
1125 ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
1128 case IB_CM_REQ_ERROR:
1129 case IB_CM_REJ_RECEIVED:
1130 case IB_CM_TIMEWAIT_EXIT:
1131 ipoib_dbg(priv, "CM error %d.\n", event->event);
1132 spin_lock_irqsave(&priv->lock, flags);
1138 rb_erase(&path->rb_node, &priv->path_tree);
1139 list_del(&path->list);
1142 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
1143 list_move(&tx->list, &priv->cm.reap_list);
1144 queue_work(ipoib_workqueue, &priv->cm.reap_task);
1147 spin_unlock_irqrestore(&priv->lock, flags);
1149 ipoib_path_free(tx->priv, path);
1158 struct ipoib_cm_tx *ipoib_cm_create_tx(struct ipoib_dev_priv *priv,
1159 struct ipoib_path *path)
1161 struct ipoib_cm_tx *tx;
1163 tx = kzalloc(sizeof *tx, GFP_ATOMIC);
1167 ipoib_dbg(priv, "Creating cm tx\n");
1171 list_add(&tx->list, &priv->cm.start_list);
1172 set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
1173 queue_work(ipoib_workqueue, &priv->cm.start_task);
1177 void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
1179 struct ipoib_dev_priv *priv = tx->priv;
1180 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
1181 spin_lock(&priv->lock);
1182 list_move(&tx->list, &priv->cm.reap_list);
1183 spin_unlock(&priv->lock);
1184 queue_work(ipoib_workqueue, &priv->cm.reap_task);
1185 ipoib_dbg(priv, "Reap connection for gid %pI6\n",
1186 tx->path->pathrec.dgid.raw);
1191 static void ipoib_cm_tx_start(struct work_struct *work)
1193 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1195 struct ipoib_path *path;
1196 struct ipoib_cm_tx *p;
1197 unsigned long flags;
1200 struct ib_sa_path_rec pathrec;
1203 ipoib_dbg(priv, "cm start task\n");
1204 spin_lock_irqsave(&priv->lock, flags);
1206 while (!list_empty(&priv->cm.start_list)) {
1207 p = list_entry(priv->cm.start_list.next, typeof(*p), list);
1208 list_del_init(&p->list);
1210 qpn = IPOIB_QPN(path->hwaddr);
1211 memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);
1213 spin_unlock_irqrestore(&priv->lock, flags);
1215 ret = ipoib_cm_tx_init(p, qpn, &pathrec);
1217 spin_lock_irqsave(&priv->lock, flags);
1223 rb_erase(&path->rb_node, &priv->path_tree);
1224 list_del(&path->list);
1225 ipoib_path_free(priv, path);
1232 spin_unlock_irqrestore(&priv->lock, flags);
1235 static void ipoib_cm_tx_reap(struct work_struct *work)
1237 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1239 struct ipoib_cm_tx *p;
1240 unsigned long flags;
1242 spin_lock_irqsave(&priv->lock, flags);
1244 while (!list_empty(&priv->cm.reap_list)) {
1245 p = list_entry(priv->cm.reap_list.next, typeof(*p), list);
1247 spin_unlock_irqrestore(&priv->lock, flags);
1248 ipoib_cm_tx_destroy(p);
1249 spin_lock_irqsave(&priv->lock, flags);
1252 spin_unlock_irqrestore(&priv->lock, flags);
1255 static void ipoib_cm_mb_reap(struct work_struct *work)
1257 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1260 unsigned long flags;
1261 #if defined(INET) || defined(INET6)
1262 unsigned mtu = priv->mcast_mtu;
1266 spin_lock_irqsave(&priv->lock, flags);
1269 IF_DEQUEUE(&priv->cm.mb_queue, mb);
1272 spin_unlock_irqrestore(&priv->lock, flags);
1274 proto = htons(*mtod(mb, uint16_t *));
1275 m_adj(mb, IPOIB_ENCAP_LEN);
1279 icmp_error(mb, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0, mtu);
1283 case ETHERTYPE_IPV6:
1284 icmp6_error(mb, ICMP6_PACKET_TOO_BIG, 0, mtu);
1291 spin_lock_irqsave(&priv->lock, flags);
1294 spin_unlock_irqrestore(&priv->lock, flags);
1298 ipoib_cm_mb_too_long(struct ipoib_dev_priv *priv, struct mbuf *mb, unsigned int mtu)
1300 int e = priv->cm.mb_queue.ifq_len;
1302 IF_ENQUEUE(&priv->cm.mb_queue, mb);
1304 queue_work(ipoib_workqueue, &priv->cm.mb_task);
1307 static void ipoib_cm_rx_reap(struct work_struct *work)
1309 ipoib_cm_free_rx_reap_list(container_of(work, struct ipoib_dev_priv,
1313 static void ipoib_cm_stale_task(struct work_struct *work)
1315 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1316 cm.stale_task.work);
1317 struct ipoib_cm_rx *p;
1320 spin_lock_irq(&priv->lock);
1321 while (!list_empty(&priv->cm.passive_ids)) {
1322 /* List is sorted by LRU, start from tail,
1323 * stop when we see a recently used entry */
1324 p = list_entry(priv->cm.passive_ids.prev, typeof(*p), list);
1325 if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT))
1327 list_move(&p->list, &priv->cm.rx_error_list);
1328 p->state = IPOIB_CM_RX_ERROR;
1329 spin_unlock_irq(&priv->lock);
1330 ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
1332 ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
1333 spin_lock_irq(&priv->lock);
1336 if (!list_empty(&priv->cm.passive_ids))
1337 queue_delayed_work(ipoib_workqueue,
1338 &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
1339 spin_unlock_irq(&priv->lock);
1343 static void ipoib_cm_create_srq(struct ipoib_dev_priv *priv, int max_sge)
1345 struct ib_srq_init_attr srq_init_attr = {
1347 .max_wr = ipoib_recvq_size,
1352 priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr);
1353 if (IS_ERR(priv->cm.srq)) {
1354 if (PTR_ERR(priv->cm.srq) != -ENOSYS)
1355 printk(KERN_WARNING "%s: failed to allocate SRQ, error %ld\n",
1356 priv->ca->name, PTR_ERR(priv->cm.srq));
1357 priv->cm.srq = NULL;
1361 priv->cm.srq_ring = kzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring, GFP_KERNEL);
1362 if (!priv->cm.srq_ring) {
1363 printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n",
1364 priv->ca->name, ipoib_recvq_size);
1365 ib_destroy_srq(priv->cm.srq);
1366 priv->cm.srq = NULL;
1370 memset(priv->cm.srq_ring, 0, ipoib_recvq_size * sizeof *priv->cm.srq_ring);
1373 int ipoib_cm_dev_init(struct ipoib_dev_priv *priv)
1375 struct ifnet *dev = priv->dev;
1379 INIT_LIST_HEAD(&priv->cm.passive_ids);
1380 INIT_LIST_HEAD(&priv->cm.reap_list);
1381 INIT_LIST_HEAD(&priv->cm.start_list);
1382 INIT_LIST_HEAD(&priv->cm.rx_error_list);
1383 INIT_LIST_HEAD(&priv->cm.rx_flush_list);
1384 INIT_LIST_HEAD(&priv->cm.rx_drain_list);
1385 INIT_LIST_HEAD(&priv->cm.rx_reap_list);
1386 INIT_WORK(&priv->cm.start_task, ipoib_cm_tx_start);
1387 INIT_WORK(&priv->cm.reap_task, ipoib_cm_tx_reap);
1388 INIT_WORK(&priv->cm.mb_task, ipoib_cm_mb_reap);
1389 INIT_WORK(&priv->cm.rx_reap_task, ipoib_cm_rx_reap);
1390 INIT_DELAYED_WORK(&priv->cm.stale_task, ipoib_cm_stale_task);
1392 bzero(&priv->cm.mb_queue, sizeof(priv->cm.mb_queue));
1393 mtx_init(&priv->cm.mb_queue.ifq_mtx,
1394 dev->if_xname, "if send queue", MTX_DEF);
1396 max_srq_sge = priv->ca->attrs.max_srq_sge;
1398 ipoib_dbg(priv, "max_srq_sge=%d\n", max_srq_sge);
1400 max_srq_sge = min_t(int, IPOIB_CM_RX_SG, max_srq_sge);
1401 ipoib_cm_create_srq(priv, max_srq_sge);
1402 if (ipoib_cm_has_srq(priv)) {
1403 priv->cm.max_cm_mtu = max_srq_sge * MJUMPAGESIZE;
1404 priv->cm.num_frags = max_srq_sge;
1405 ipoib_dbg(priv, "max_cm_mtu = 0x%x, num_frags=%d\n",
1406 priv->cm.max_cm_mtu, priv->cm.num_frags);
1408 priv->cm.max_cm_mtu = IPOIB_CM_MAX_MTU;
1409 priv->cm.num_frags = IPOIB_CM_RX_SG;
1412 ipoib_cm_init_rx_wr(priv, &priv->cm.rx_wr, priv->cm.rx_sge);
1414 if (ipoib_cm_has_srq(priv)) {
1415 for (i = 0; i < ipoib_recvq_size; ++i) {
1416 if (!ipoib_cm_alloc_rx_mb(priv, &priv->cm.srq_ring[i])) {
1417 ipoib_warn(priv, "failed to allocate "
1418 "receive buffer %d\n", i);
1419 ipoib_cm_dev_cleanup(priv);
1423 if (ipoib_cm_post_receive_srq(priv, i)) {
1424 ipoib_warn(priv, "ipoib_cm_post_receive_srq "
1425 "failed for buf %d\n", i);
1426 ipoib_cm_dev_cleanup(priv);
1432 IF_LLADDR(priv->dev)[0] = IPOIB_FLAGS_RC;
1436 void ipoib_cm_dev_cleanup(struct ipoib_dev_priv *priv)
1443 ipoib_dbg(priv, "Cleanup ipoib connected mode.\n");
1445 ret = ib_destroy_srq(priv->cm.srq);
1447 ipoib_warn(priv, "ib_destroy_srq failed: %d\n", ret);
1449 priv->cm.srq = NULL;
1450 if (!priv->cm.srq_ring)
1453 ipoib_cm_free_rx_ring(priv, priv->cm.srq_ring);
1454 priv->cm.srq_ring = NULL;
1456 mtx_destroy(&priv->cm.mb_queue.ifq_mtx);
1459 #endif /* CONFIG_INFINIBAND_IPOIB_CM */