]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_cm.c
MFV: r336485
[FreeBSD/FreeBSD.git] / sys / ofed / drivers / infiniband / ulp / ipoib / ipoib_cm.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0
3  *
4  * Copyright (c) 2006 Mellanox Technologies. All rights reserved
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
37
38 #include "ipoib.h"
39
40 #ifdef CONFIG_INFINIBAND_IPOIB_CM
41
42 #include <netinet/ip.h>
43 #include <netinet/ip_icmp.h>
44 #include <netinet/icmp6.h>
45
46 #include <rdma/ib_cm.h>
47 #include <rdma/ib_cache.h>
48 #include <linux/delay.h>
49
50 int ipoib_max_conn_qp = 128;
51
52 module_param_named(max_nonsrq_conn_qp, ipoib_max_conn_qp, int, 0444);
53 MODULE_PARM_DESC(max_nonsrq_conn_qp,
54                  "Max number of connected-mode QPs per interface "
55                  "(applied only if shared receive queue is not available)");
56
57 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
58 static int data_debug_level;
59
60 module_param_named(cm_data_debug_level, data_debug_level, int, 0644);
61 MODULE_PARM_DESC(cm_data_debug_level,
62                  "Enable data path debug tracing for connected mode if > 0");
63 #endif
64
65 #define IPOIB_CM_IETF_ID 0x1000000000000000ULL
66
67 #define IPOIB_CM_RX_UPDATE_TIME (256 * HZ)
68 #define IPOIB_CM_RX_TIMEOUT     (2 * 256 * HZ)
69 #define IPOIB_CM_RX_DELAY       (3 * 256 * HZ)
70 #define IPOIB_CM_RX_UPDATE_MASK (0x3)
71
72 static struct ib_qp_attr ipoib_cm_err_attr = {
73         .qp_state = IB_QPS_ERR
74 };
75
76 #define IPOIB_CM_RX_DRAIN_WRID 0xffffffff
77
78 static struct ib_send_wr ipoib_cm_rx_drain_wr = {
79         .wr_id = IPOIB_CM_RX_DRAIN_WRID,
80         .opcode = IB_WR_SEND,
81 };
82
83 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
84                                struct ib_cm_event *event);
85
86 static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, struct ipoib_cm_rx_buf *rx_req)
87 {
88
89         ipoib_dma_unmap_rx(priv, (struct ipoib_rx_buf *)rx_req);
90
91 }
92
93 static int ipoib_cm_post_receive_srq(struct ipoib_dev_priv *priv, int id)
94 {
95         struct ib_recv_wr *bad_wr;
96         struct ipoib_rx_buf *rx_req;
97         struct mbuf *m;
98         int ret;
99         int i;
100
101         rx_req = (struct ipoib_rx_buf *)&priv->cm.srq_ring[id];
102         for (m = rx_req->mb, i = 0; m != NULL; m = m->m_next, i++) {
103                 priv->cm.rx_sge[i].addr = rx_req->mapping[i];
104                 priv->cm.rx_sge[i].length = m->m_len;
105         }
106
107         priv->cm.rx_wr.num_sge = i;
108         priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
109
110         ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr);
111         if (unlikely(ret)) {
112                 ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret);
113                 ipoib_dma_unmap_rx(priv, rx_req);
114                 m_freem(priv->cm.srq_ring[id].mb);
115                 priv->cm.srq_ring[id].mb = NULL;
116         }
117
118         return ret;
119 }
120
121 static int ipoib_cm_post_receive_nonsrq(struct ipoib_dev_priv *priv,
122                                         struct ipoib_cm_rx *rx,
123                                         struct ib_recv_wr *wr,
124                                         struct ib_sge *sge, int id)
125 {
126         struct ipoib_rx_buf *rx_req;
127         struct ib_recv_wr *bad_wr;
128         struct mbuf *m;
129         int ret;
130         int i;
131
132         rx_req = (struct ipoib_rx_buf *)&rx->rx_ring[id];
133         for (m = rx_req->mb, i = 0; m != NULL; m = m->m_next, i++) {
134                 sge[i].addr = rx_req->mapping[i];
135                 sge[i].length = m->m_len;
136         }
137
138         wr->num_sge = i;
139         wr->wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
140
141         ret = ib_post_recv(rx->qp, wr, &bad_wr);
142         if (unlikely(ret)) {
143                 ipoib_warn(priv, "post recv failed for buf %d (%d)\n", id, ret);
144                 ipoib_dma_unmap_rx(priv, rx_req);
145                 m_freem(rx->rx_ring[id].mb);
146                 rx->rx_ring[id].mb = NULL;
147         }
148
149         return ret;
150 }
151
152 static struct mbuf *
153 ipoib_cm_alloc_rx_mb(struct ipoib_dev_priv *priv, struct ipoib_cm_rx_buf *rx_req)
154 {
155         return ipoib_alloc_map_mb(priv, (struct ipoib_rx_buf *)rx_req,
156             priv->cm.max_cm_mtu);
157 }
158
159 static void ipoib_cm_free_rx_ring(struct ipoib_dev_priv *priv,
160                                   struct ipoib_cm_rx_buf *rx_ring)
161 {
162         int i;
163
164         for (i = 0; i < ipoib_recvq_size; ++i)
165                 if (rx_ring[i].mb) {
166                         ipoib_cm_dma_unmap_rx(priv, &rx_ring[i]);
167                         m_freem(rx_ring[i].mb);
168                 }
169
170         kfree(rx_ring);
171 }
172
173 static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv)
174 {
175         struct ib_send_wr *bad_wr;
176         struct ipoib_cm_rx *p;
177
178         /* We only reserved 1 extra slot in CQ for drain WRs, so
179          * make sure we have at most 1 outstanding WR. */
180         if (list_empty(&priv->cm.rx_flush_list) ||
181             !list_empty(&priv->cm.rx_drain_list))
182                 return;
183
184         /*
185          * QPs on flush list are error state.  This way, a "flush
186          * error" WC will be immediately generated for each WR we post.
187          */
188         p = list_entry(priv->cm.rx_flush_list.next, typeof(*p), list);
189         if (ib_post_send(p->qp, &ipoib_cm_rx_drain_wr, &bad_wr))
190                 ipoib_warn(priv, "failed to post drain wr\n");
191
192         list_splice_init(&priv->cm.rx_flush_list, &priv->cm.rx_drain_list);
193 }
194
195 static void ipoib_cm_rx_event_handler(struct ib_event *event, void *ctx)
196 {
197         struct ipoib_cm_rx *p = ctx;
198         struct ipoib_dev_priv *priv = p->priv;
199         unsigned long flags;
200
201         if (event->event != IB_EVENT_QP_LAST_WQE_REACHED)
202                 return;
203
204         spin_lock_irqsave(&priv->lock, flags);
205         list_move(&p->list, &priv->cm.rx_flush_list);
206         p->state = IPOIB_CM_RX_FLUSH;
207         ipoib_cm_start_rx_drain(priv);
208         spin_unlock_irqrestore(&priv->lock, flags);
209 }
210
211 static struct ib_qp *ipoib_cm_create_rx_qp(struct ipoib_dev_priv *priv,
212                                            struct ipoib_cm_rx *p)
213 {
214         struct ib_qp_init_attr attr = {
215                 .event_handler = ipoib_cm_rx_event_handler,
216                 .send_cq = priv->recv_cq, /* For drain WR */
217                 .recv_cq = priv->recv_cq,
218                 .srq = priv->cm.srq,
219                 .cap.max_send_wr = 1, /* For drain WR */
220                 .cap.max_send_sge = 1,
221                 .sq_sig_type = IB_SIGNAL_ALL_WR,
222                 .qp_type = IB_QPT_RC,
223                 .qp_context = p,
224         };
225
226         if (!ipoib_cm_has_srq(priv)) {
227                 attr.cap.max_recv_wr  = ipoib_recvq_size;
228                 attr.cap.max_recv_sge = priv->cm.num_frags;
229         }
230
231         return ib_create_qp(priv->pd, &attr);
232 }
233
234 static int ipoib_cm_modify_rx_qp(struct ipoib_dev_priv *priv,
235                                  struct ib_cm_id *cm_id, struct ib_qp *qp,
236                                  unsigned psn)
237 {
238         struct ib_qp_attr qp_attr;
239         int qp_attr_mask, ret;
240
241         qp_attr.qp_state = IB_QPS_INIT;
242         ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
243         if (ret) {
244                 ipoib_warn(priv, "failed to init QP attr for INIT: %d\n", ret);
245                 return ret;
246         }
247         ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
248         if (ret) {
249                 ipoib_warn(priv, "failed to modify QP to INIT: %d\n", ret);
250                 return ret;
251         }
252         qp_attr.qp_state = IB_QPS_RTR;
253         ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
254         if (ret) {
255                 ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
256                 return ret;
257         }
258         qp_attr.rq_psn = psn;
259         ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
260         if (ret) {
261                 ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
262                 return ret;
263         }
264
265         /*
266          * Current Mellanox HCA firmware won't generate completions
267          * with error for drain WRs unless the QP has been moved to
268          * RTS first. This work-around leaves a window where a QP has
269          * moved to error asynchronously, but this will eventually get
270          * fixed in firmware, so let's not error out if modify QP
271          * fails.
272          */
273         qp_attr.qp_state = IB_QPS_RTS;
274         ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
275         if (ret) {
276                 ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
277                 return 0;
278         }
279         ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
280         if (ret) {
281                 ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
282                 return 0;
283         }
284
285         return 0;
286 }
287
288 static void ipoib_cm_init_rx_wr(struct ipoib_dev_priv *priv,
289                                 struct ib_recv_wr *wr,
290                                 struct ib_sge *sge)
291 {
292         int i;
293
294         for (i = 0; i < IPOIB_CM_RX_SG; i++)
295                 sge[i].lkey = priv->pd->local_dma_lkey;
296
297         wr->next    = NULL;
298         wr->sg_list = sge;
299         wr->num_sge = 1;
300 }
301
302 static int ipoib_cm_nonsrq_init_rx(struct ipoib_dev_priv *priv,
303     struct ib_cm_id *cm_id, struct ipoib_cm_rx *rx)
304 {
305         struct {
306                 struct ib_recv_wr wr;
307                 struct ib_sge sge[IPOIB_CM_RX_SG];
308         } *t;
309         int ret;
310         int i;
311
312         rx->rx_ring = kzalloc(ipoib_recvq_size * sizeof *rx->rx_ring, GFP_KERNEL);
313         if (!rx->rx_ring) {
314                 printk(KERN_WARNING "%s: failed to allocate CM non-SRQ ring (%d entries)\n",
315                        priv->ca->name, ipoib_recvq_size);
316                 return -ENOMEM;
317         }
318
319         memset(rx->rx_ring, 0, ipoib_recvq_size * sizeof *rx->rx_ring);
320
321         t = kmalloc(sizeof *t, GFP_KERNEL);
322         if (!t) {
323                 ret = -ENOMEM;
324                 goto err_free;
325         }
326
327         ipoib_cm_init_rx_wr(priv, &t->wr, t->sge);
328
329         spin_lock_irq(&priv->lock);
330
331         if (priv->cm.nonsrq_conn_qp >= ipoib_max_conn_qp) {
332                 spin_unlock_irq(&priv->lock);
333                 ib_send_cm_rej(cm_id, IB_CM_REJ_NO_QP, NULL, 0, NULL, 0);
334                 ret = -EINVAL;
335                 goto err_free;
336         } else
337                 ++priv->cm.nonsrq_conn_qp;
338
339         spin_unlock_irq(&priv->lock);
340
341         for (i = 0; i < ipoib_recvq_size; ++i) {
342                 if (!ipoib_cm_alloc_rx_mb(priv, &rx->rx_ring[i])) {
343                         ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
344                                 ret = -ENOMEM;
345                                 goto err_count;
346                 }
347                 ret = ipoib_cm_post_receive_nonsrq(priv, rx, &t->wr, t->sge, i);
348                 if (ret) {
349                         ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq "
350                                    "failed for buf %d\n", i);
351                         ret = -EIO;
352                         goto err_count;
353                 }
354         }
355
356         rx->recv_count = ipoib_recvq_size;
357
358         kfree(t);
359
360         return 0;
361
362 err_count:
363         spin_lock_irq(&priv->lock);
364         --priv->cm.nonsrq_conn_qp;
365         spin_unlock_irq(&priv->lock);
366
367 err_free:
368         kfree(t);
369         ipoib_cm_free_rx_ring(priv, rx->rx_ring);
370
371         return ret;
372 }
373
374 static int ipoib_cm_send_rep(struct ipoib_dev_priv *priv, struct ib_cm_id *cm_id,
375                              struct ib_qp *qp, struct ib_cm_req_event_param *req,
376                              unsigned psn)
377 {
378         struct ipoib_cm_data data = {};
379         struct ib_cm_rep_param rep = {};
380
381         data.qpn = cpu_to_be32(priv->qp->qp_num);
382         data.mtu = cpu_to_be32(priv->cm.max_cm_mtu);
383
384         rep.private_data = &data;
385         rep.private_data_len = sizeof data;
386         rep.flow_control = 0;
387         rep.rnr_retry_count = req->rnr_retry_count;
388         rep.srq = ipoib_cm_has_srq(priv);
389         rep.qp_num = qp->qp_num;
390         rep.starting_psn = psn;
391         return ib_send_cm_rep(cm_id, &rep);
392 }
393
394 static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
395 {
396         struct ipoib_dev_priv *priv = cm_id->context;
397         struct ipoib_cm_rx *p;
398         unsigned psn;
399         int ret;
400
401         ipoib_dbg(priv, "REQ arrived\n");
402         p = kzalloc(sizeof *p, GFP_KERNEL);
403         if (!p)
404                 return -ENOMEM;
405         p->priv = priv;
406         p->id = cm_id;
407         cm_id->context = p;
408         p->state = IPOIB_CM_RX_LIVE;
409         p->jiffies = jiffies;
410         INIT_LIST_HEAD(&p->list);
411
412         p->qp = ipoib_cm_create_rx_qp(priv, p);
413         if (IS_ERR(p->qp)) {
414                 ret = PTR_ERR(p->qp);
415                 goto err_qp;
416         }
417
418         psn = random() & 0xffffff;
419         ret = ipoib_cm_modify_rx_qp(priv, cm_id, p->qp, psn);
420         if (ret)
421                 goto err_modify;
422
423         if (!ipoib_cm_has_srq(priv)) {
424                 ret = ipoib_cm_nonsrq_init_rx(priv, cm_id, p);
425                 if (ret)
426                         goto err_modify;
427         }
428
429         spin_lock_irq(&priv->lock);
430         queue_delayed_work(ipoib_workqueue,
431                            &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
432         /* Add this entry to passive ids list head, but do not re-add it
433          * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
434         p->jiffies = jiffies;
435         if (p->state == IPOIB_CM_RX_LIVE)
436                 list_move(&p->list, &priv->cm.passive_ids);
437         spin_unlock_irq(&priv->lock);
438
439         ret = ipoib_cm_send_rep(priv, cm_id, p->qp, &event->param.req_rcvd, psn);
440         if (ret) {
441                 ipoib_warn(priv, "failed to send REP: %d\n", ret);
442                 if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
443                         ipoib_warn(priv, "unable to move qp to error state\n");
444         }
445         return 0;
446
447 err_modify:
448         ib_destroy_qp(p->qp);
449 err_qp:
450         kfree(p);
451         return ret;
452 }
453
454 static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
455                                struct ib_cm_event *event)
456 {
457         struct ipoib_cm_rx *p;
458         struct ipoib_dev_priv *priv;
459
460         switch (event->event) {
461         case IB_CM_REQ_RECEIVED:
462                 return ipoib_cm_req_handler(cm_id, event);
463         case IB_CM_DREQ_RECEIVED:
464                 p = cm_id->context;
465                 ib_send_cm_drep(cm_id, NULL, 0);
466                 /* Fall through */
467         case IB_CM_REJ_RECEIVED:
468                 p = cm_id->context;
469                 priv = p->priv;
470                 if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
471                         ipoib_warn(priv, "unable to move qp to error state\n");
472                 /* Fall through */
473         default:
474                 return 0;
475         }
476 }
477
478 void ipoib_cm_handle_rx_wc(struct ipoib_dev_priv *priv, struct ib_wc *wc)
479 {
480         struct ipoib_cm_rx_buf saverx;
481         struct ipoib_cm_rx_buf *rx_ring;
482         unsigned int wr_id = wc->wr_id & ~(IPOIB_OP_CM | IPOIB_OP_RECV);
483         struct ifnet *dev = priv->dev;
484         struct mbuf *mb, *newmb;
485         struct ipoib_cm_rx *p;
486         int has_srq;
487         u_short proto;
488
489         CURVNET_SET_QUIET(dev->if_vnet);
490
491         ipoib_dbg_data(priv, "cm recv completion: id %d, status: %d\n",
492                        wr_id, wc->status);
493
494         if (unlikely(wr_id >= ipoib_recvq_size)) {
495                 if (wr_id == (IPOIB_CM_RX_DRAIN_WRID & ~(IPOIB_OP_CM | IPOIB_OP_RECV))) {
496                         spin_lock(&priv->lock);
497                         list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list);
498                         ipoib_cm_start_rx_drain(priv);
499                         if (priv->cm.id != NULL)
500                                 queue_work(ipoib_workqueue,
501                                     &priv->cm.rx_reap_task);
502                         spin_unlock(&priv->lock);
503                 } else
504                         ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
505                                    wr_id, ipoib_recvq_size);
506                 goto done;
507         }
508
509         p = wc->qp->qp_context;
510
511         has_srq = ipoib_cm_has_srq(priv);
512         rx_ring = has_srq ? priv->cm.srq_ring : p->rx_ring;
513
514         mb = rx_ring[wr_id].mb;
515
516         if (unlikely(wc->status != IB_WC_SUCCESS)) {
517                 ipoib_dbg(priv, "cm recv error "
518                            "(status=%d, wrid=%d vend_err %x)\n",
519                            wc->status, wr_id, wc->vendor_err);
520                 if_inc_counter(dev, IFCOUNTER_IERRORS, 1);
521                 if (has_srq)
522                         goto repost;
523                 else {
524                         if (!--p->recv_count) {
525                                 spin_lock(&priv->lock);
526                                 list_move(&p->list, &priv->cm.rx_reap_list);
527                                 queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
528                                 spin_unlock(&priv->lock);
529                         }
530                         goto done;
531                 }
532         }
533
534         if (unlikely(!(wr_id & IPOIB_CM_RX_UPDATE_MASK))) {
535                 if (p && time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) {
536                         p->jiffies = jiffies;
537                         /* Move this entry to list head, but do not re-add it
538                          * if it has been moved out of list. */
539                         if (p->state == IPOIB_CM_RX_LIVE)
540                                 list_move(&p->list, &priv->cm.passive_ids);
541                 }
542         }
543
544         memcpy(&saverx, &rx_ring[wr_id], sizeof(saverx));
545         newmb = ipoib_cm_alloc_rx_mb(priv, &rx_ring[wr_id]);
546         if (unlikely(!newmb)) {
547                 /*
548                  * If we can't allocate a new RX buffer, dump
549                  * this packet and reuse the old buffer.
550                  */
551                 ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
552                 if_inc_counter(dev, IFCOUNTER_IERRORS, 1);
553                 memcpy(&rx_ring[wr_id], &saverx, sizeof(saverx));
554                 goto repost;
555         }
556
557         ipoib_cm_dma_unmap_rx(priv, &saverx);
558
559         ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
560                        wc->byte_len, wc->slid);
561
562         ipoib_dma_mb(priv, mb, wc->byte_len);
563
564         if_inc_counter(dev, IFCOUNTER_IPACKETS, 1);
565         if_inc_counter(dev, IFCOUNTER_IBYTES, mb->m_pkthdr.len);
566
567         mb->m_pkthdr.rcvif = dev;
568         proto = *mtod(mb, uint16_t *);
569         m_adj(mb, IPOIB_ENCAP_LEN);
570
571         IPOIB_MTAP_PROTO(dev, mb, proto);
572         ipoib_demux(dev, mb, ntohs(proto));
573
574 repost:
575         if (has_srq) {
576                 if (unlikely(ipoib_cm_post_receive_srq(priv, wr_id)))
577                         ipoib_warn(priv, "ipoib_cm_post_receive_srq failed "
578                                    "for buf %d\n", wr_id);
579         } else {
580                 if (unlikely(ipoib_cm_post_receive_nonsrq(priv, p,
581                                                           &priv->cm.rx_wr,
582                                                           priv->cm.rx_sge,
583                                                           wr_id))) {
584                         --p->recv_count;
585                         ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq failed "
586                                    "for buf %d\n", wr_id);
587                 }
588         }
589 done:
590         CURVNET_RESTORE();
591         return;
592 }
593
594 static inline int post_send(struct ipoib_dev_priv *priv,
595                             struct ipoib_cm_tx *tx,
596                             struct ipoib_cm_tx_buf *tx_req,
597                             unsigned int wr_id)
598 {
599         struct ib_send_wr *bad_wr;
600         struct mbuf *mb = tx_req->mb;
601         u64 *mapping = tx_req->mapping;
602         struct mbuf *m;
603         int i;
604
605         for (m = mb, i = 0; m != NULL; m = m->m_next, i++) {
606                 priv->tx_sge[i].addr = mapping[i];
607                 priv->tx_sge[i].length = m->m_len;
608         }
609         priv->tx_wr.wr.num_sge = i;
610         priv->tx_wr.wr.wr_id = wr_id | IPOIB_OP_CM;
611         priv->tx_wr.wr.opcode = IB_WR_SEND;
612
613         return ib_post_send(tx->qp, &priv->tx_wr.wr, &bad_wr);
614 }
615
616 void ipoib_cm_send(struct ipoib_dev_priv *priv, struct mbuf *mb, struct ipoib_cm_tx *tx)
617 {
618         struct ipoib_cm_tx_buf *tx_req;
619         struct ifnet *dev = priv->dev;
620
621         if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
622                 while (ipoib_poll_tx(priv)); /* nothing */
623
624         m_adj(mb, sizeof(struct ipoib_pseudoheader));
625         if (unlikely(mb->m_pkthdr.len > tx->mtu)) {
626                 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
627                            mb->m_pkthdr.len, tx->mtu);
628                 if_inc_counter(dev, IFCOUNTER_OERRORS, 1);
629                 ipoib_cm_mb_too_long(priv, mb, IPOIB_CM_MTU(tx->mtu));
630                 return;
631         }
632
633         ipoib_dbg_data(priv, "sending packet: head 0x%x length %d connection 0x%x\n",
634                        tx->tx_head, mb->m_pkthdr.len, tx->qp->qp_num);
635
636
637         /*
638          * We put the mb into the tx_ring _before_ we call post_send()
639          * because it's entirely possible that the completion handler will
640          * run before we execute anything after the post_send().  That
641          * means we have to make sure everything is properly recorded and
642          * our state is consistent before we call post_send().
643          */
644         tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)];
645         tx_req->mb = mb;
646         if (unlikely(ipoib_dma_map_tx(priv->ca, (struct ipoib_tx_buf *)tx_req,
647             priv->cm.num_frags))) {
648                 if_inc_counter(dev, IFCOUNTER_OERRORS, 1);
649                 if (tx_req->mb)
650                         m_freem(tx_req->mb);
651                 return;
652         }
653
654         if (unlikely(post_send(priv, tx, tx_req, tx->tx_head & (ipoib_sendq_size - 1)))) {
655                 ipoib_warn(priv, "post_send failed\n");
656                 if_inc_counter(dev, IFCOUNTER_OERRORS, 1);
657                 ipoib_dma_unmap_tx(priv->ca, (struct ipoib_tx_buf *)tx_req);
658                 m_freem(mb);
659         } else {
660                 ++tx->tx_head;
661
662                 if (++priv->tx_outstanding == ipoib_sendq_size) {
663                         ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
664                                   tx->qp->qp_num);
665                         if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
666                                 ipoib_warn(priv, "request notify on send CQ failed\n");
667                         dev->if_drv_flags |= IFF_DRV_OACTIVE;
668                 }
669         }
670
671 }
672
673 void ipoib_cm_handle_tx_wc(struct ipoib_dev_priv *priv, struct ib_wc *wc)
674 {
675         struct ipoib_cm_tx *tx = wc->qp->qp_context;
676         unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM;
677         struct ifnet *dev = priv->dev;
678         struct ipoib_cm_tx_buf *tx_req;
679
680         ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n",
681                        wr_id, wc->status);
682
683         if (unlikely(wr_id >= ipoib_sendq_size)) {
684                 ipoib_warn(priv, "cm send completion event with wrid %d (> %d)\n",
685                            wr_id, ipoib_sendq_size);
686                 return;
687         }
688
689         tx_req = &tx->tx_ring[wr_id];
690
691         ipoib_dma_unmap_tx(priv->ca, (struct ipoib_tx_buf *)tx_req);
692
693         /* FIXME: is this right? Shouldn't we only increment on success? */
694         if_inc_counter(dev, IFCOUNTER_OPACKETS, 1);
695
696         m_freem(tx_req->mb);
697
698         ++tx->tx_tail;
699         if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
700             (dev->if_drv_flags & IFF_DRV_OACTIVE) != 0 &&
701             test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
702                 dev->if_drv_flags &= ~IFF_DRV_OACTIVE;
703
704         if (wc->status != IB_WC_SUCCESS &&
705             wc->status != IB_WC_WR_FLUSH_ERR) {
706                 struct ipoib_path *path;
707
708                 ipoib_dbg(priv, "failed cm send event "
709                            "(status=%d, wrid=%d vend_err %x)\n",
710                            wc->status, wr_id, wc->vendor_err);
711
712                 path = tx->path;
713
714                 if (path) {
715                         path->cm = NULL;
716                         rb_erase(&path->rb_node, &priv->path_tree);
717                         list_del(&path->list);
718                 }
719
720                 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
721                         list_move(&tx->list, &priv->cm.reap_list);
722                         queue_work(ipoib_workqueue, &priv->cm.reap_task);
723                 }
724
725                 clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
726         }
727
728 }
729
730 int ipoib_cm_dev_open(struct ipoib_dev_priv *priv)
731 {
732         int ret;
733
734         if (!IPOIB_CM_SUPPORTED(IF_LLADDR(priv->dev)))
735                 return 0;
736
737         priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, priv);
738         if (IS_ERR(priv->cm.id)) {
739                 printk(KERN_WARNING "%s: failed to create CM ID\n", priv->ca->name);
740                 ret = PTR_ERR(priv->cm.id);
741                 goto err_cm;
742         }
743
744         ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num), 0);
745         if (ret) {
746                 printk(KERN_WARNING "%s: failed to listen on ID 0x%llx\n", priv->ca->name,
747                        IPOIB_CM_IETF_ID | priv->qp->qp_num);
748                 goto err_listen;
749         }
750
751         return 0;
752
753 err_listen:
754         ib_destroy_cm_id(priv->cm.id);
755 err_cm:
756         priv->cm.id = NULL;
757         return ret;
758 }
759
760 static void ipoib_cm_free_rx_reap_list(struct ipoib_dev_priv *priv)
761 {
762         struct ipoib_cm_rx *rx, *n;
763         LIST_HEAD(list);
764
765         spin_lock_irq(&priv->lock);
766         list_splice_init(&priv->cm.rx_reap_list, &list);
767         spin_unlock_irq(&priv->lock);
768
769         list_for_each_entry_safe(rx, n, &list, list) {
770                 ib_destroy_cm_id(rx->id);
771                 ib_destroy_qp(rx->qp);
772                 if (!ipoib_cm_has_srq(priv)) {
773                         ipoib_cm_free_rx_ring(priv, rx->rx_ring);
774                         spin_lock_irq(&priv->lock);
775                         --priv->cm.nonsrq_conn_qp;
776                         spin_unlock_irq(&priv->lock);
777                 }
778                 kfree(rx);
779         }
780 }
781
782 void ipoib_cm_dev_stop(struct ipoib_dev_priv *priv)
783 {
784         struct ipoib_cm_rx *p;
785         unsigned long begin;
786         int ret;
787
788         if (!IPOIB_CM_SUPPORTED(IF_LLADDR(priv->dev)) || !priv->cm.id)
789                 return;
790
791         ib_destroy_cm_id(priv->cm.id);
792         priv->cm.id = NULL;
793
794         cancel_work_sync(&priv->cm.rx_reap_task);
795
796         spin_lock_irq(&priv->lock);
797         while (!list_empty(&priv->cm.passive_ids)) {
798                 p = list_entry(priv->cm.passive_ids.next, typeof(*p), list);
799                 list_move(&p->list, &priv->cm.rx_error_list);
800                 p->state = IPOIB_CM_RX_ERROR;
801                 spin_unlock_irq(&priv->lock);
802                 ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
803                 if (ret)
804                         ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
805                 spin_lock_irq(&priv->lock);
806         }
807
808         /* Wait for all RX to be drained */
809         begin = jiffies;
810
811         while (!list_empty(&priv->cm.rx_error_list) ||
812                !list_empty(&priv->cm.rx_flush_list) ||
813                !list_empty(&priv->cm.rx_drain_list)) {
814                 if (time_after(jiffies, begin + 5 * HZ)) {
815                         ipoib_warn(priv, "RX drain timing out\n");
816
817                         /*
818                          * assume the HW is wedged and just free up everything.
819                          */
820                         list_splice_init(&priv->cm.rx_flush_list,
821                                          &priv->cm.rx_reap_list);
822                         list_splice_init(&priv->cm.rx_error_list,
823                                          &priv->cm.rx_reap_list);
824                         list_splice_init(&priv->cm.rx_drain_list,
825                                          &priv->cm.rx_reap_list);
826                         break;
827                 }
828                 spin_unlock_irq(&priv->lock);
829                 msleep(1);
830                 ipoib_drain_cq(priv);
831                 spin_lock_irq(&priv->lock);
832         }
833
834         spin_unlock_irq(&priv->lock);
835
836         ipoib_cm_free_rx_reap_list(priv);
837
838         cancel_delayed_work_sync(&priv->cm.stale_task);
839 }
840
841 static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
842 {
843         struct ipoib_cm_tx *p = cm_id->context;
844         struct ipoib_dev_priv *priv = p->priv;
845         struct ipoib_cm_data *data = event->private_data;
846         struct ifqueue mbqueue;
847         struct ib_qp_attr qp_attr;
848         int qp_attr_mask, ret;
849         struct mbuf *mb;
850
851         ipoib_dbg(priv, "cm rep handler\n");
852         p->mtu = be32_to_cpu(data->mtu);
853
854         if (p->mtu <= IPOIB_ENCAP_LEN) {
855                 ipoib_warn(priv, "Rejecting connection: mtu %d <= %d\n",
856                            p->mtu, IPOIB_ENCAP_LEN);
857                 return -EINVAL;
858         }
859
860         qp_attr.qp_state = IB_QPS_RTR;
861         ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
862         if (ret) {
863                 ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
864                 return ret;
865         }
866
867         qp_attr.rq_psn = 0 /* FIXME */;
868         ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
869         if (ret) {
870                 ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
871                 return ret;
872         }
873
874         qp_attr.qp_state = IB_QPS_RTS;
875         ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
876         if (ret) {
877                 ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
878                 return ret;
879         }
880         ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
881         if (ret) {
882                 ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
883                 return ret;
884         }
885
886         bzero(&mbqueue, sizeof(mbqueue));
887
888         spin_lock_irq(&priv->lock);
889         set_bit(IPOIB_FLAG_OPER_UP, &p->flags);
890         if (p->path)
891                 for (;;) {
892                         _IF_DEQUEUE(&p->path->queue, mb);
893                         if (mb == NULL)
894                                 break;
895                         _IF_ENQUEUE(&mbqueue, mb);
896                 }
897         spin_unlock_irq(&priv->lock);
898
899         for (;;) {
900                 struct ifnet *dev = p->priv->dev;
901                 _IF_DEQUEUE(&mbqueue, mb);
902                 if (mb == NULL)
903                         break;
904                 mb->m_pkthdr.rcvif = dev;
905                 if (dev->if_transmit(dev, mb))
906                         ipoib_warn(priv, "dev_queue_xmit failed "
907                                    "to requeue packet\n");
908         }
909
910         ret = ib_send_cm_rtu(cm_id, NULL, 0);
911         if (ret) {
912                 ipoib_warn(priv, "failed to send RTU: %d\n", ret);
913                 return ret;
914         }
915         return 0;
916 }
917
918 static struct ib_qp *ipoib_cm_create_tx_qp(struct ipoib_dev_priv *priv,
919     struct ipoib_cm_tx *tx)
920 {
921         struct ib_qp_init_attr attr = {
922                 .send_cq                = priv->send_cq,
923                 .recv_cq                = priv->recv_cq,
924                 .srq                    = priv->cm.srq,
925                 .cap.max_send_wr        = ipoib_sendq_size,
926                 .cap.max_send_sge       = priv->cm.num_frags,
927                 .sq_sig_type            = IB_SIGNAL_ALL_WR,
928                 .qp_type                = IB_QPT_RC,
929                 .qp_context             = tx
930         };
931
932         return ib_create_qp(priv->pd, &attr);
933 }
934
935 static int ipoib_cm_send_req(struct ipoib_dev_priv *priv,
936                              struct ib_cm_id *id, struct ib_qp *qp,
937                              u32 qpn,
938                              struct ib_sa_path_rec *pathrec)
939 {
940         struct ipoib_cm_data data = {};
941         struct ib_cm_req_param req = {};
942
943         ipoib_dbg(priv, "cm send req\n");
944
945         data.qpn = cpu_to_be32(priv->qp->qp_num);
946         data.mtu = cpu_to_be32(priv->cm.max_cm_mtu);
947
948         req.primary_path                = pathrec;
949         req.alternate_path              = NULL;
950         req.service_id                  = cpu_to_be64(IPOIB_CM_IETF_ID | qpn);
951         req.qp_num                      = qp->qp_num;
952         req.qp_type                     = qp->qp_type;
953         req.private_data                = &data;
954         req.private_data_len            = sizeof data;
955         req.flow_control                = 0;
956
957         req.starting_psn                = 0; /* FIXME */
958
959         /*
960          * Pick some arbitrary defaults here; we could make these
961          * module parameters if anyone cared about setting them.
962          */
963         req.responder_resources         = 4;
964         req.remote_cm_response_timeout  = 20;
965         req.local_cm_response_timeout   = 20;
966         req.retry_count                 = 0; /* RFC draft warns against retries */
967         req.rnr_retry_count             = 0; /* RFC draft warns against retries */
968         req.max_cm_retries              = 15;
969         req.srq                         = ipoib_cm_has_srq(priv);
970         return ib_send_cm_req(id, &req);
971 }
972
973 static int ipoib_cm_modify_tx_init(struct ipoib_dev_priv *priv,
974                                   struct ib_cm_id *cm_id, struct ib_qp *qp)
975 {
976         struct ib_qp_attr qp_attr;
977         int qp_attr_mask, ret;
978         ret = ib_find_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index);
979         if (ret) {
980                 ipoib_warn(priv, "pkey 0x%x not found: %d\n", priv->pkey, ret);
981                 return ret;
982         }
983
984         qp_attr.qp_state = IB_QPS_INIT;
985         qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE;
986         qp_attr.port_num = priv->port;
987         qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT;
988
989         ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
990         if (ret) {
991                 ipoib_warn(priv, "failed to modify tx QP to INIT: %d\n", ret);
992                 return ret;
993         }
994         return 0;
995 }
996
997 static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
998                             struct ib_sa_path_rec *pathrec)
999 {
1000         struct ipoib_dev_priv *priv = p->priv;
1001         int ret;
1002
1003         p->tx_ring = kzalloc(ipoib_sendq_size * sizeof *p->tx_ring, GFP_KERNEL);
1004         if (!p->tx_ring) {
1005                 ipoib_warn(priv, "failed to allocate tx ring\n");
1006                 ret = -ENOMEM;
1007                 goto err_tx;
1008         }
1009         memset(p->tx_ring, 0, ipoib_sendq_size * sizeof *p->tx_ring);
1010
1011         p->qp = ipoib_cm_create_tx_qp(p->priv, p);
1012         if (IS_ERR(p->qp)) {
1013                 ret = PTR_ERR(p->qp);
1014                 ipoib_warn(priv, "failed to allocate tx qp: %d\n", ret);
1015                 goto err_qp;
1016         }
1017
1018         p->id = ib_create_cm_id(priv->ca, ipoib_cm_tx_handler, p);
1019         if (IS_ERR(p->id)) {
1020                 ret = PTR_ERR(p->id);
1021                 ipoib_warn(priv, "failed to create tx cm id: %d\n", ret);
1022                 goto err_id;
1023         }
1024
1025         ret = ipoib_cm_modify_tx_init(p->priv, p->id,  p->qp);
1026         if (ret) {
1027                 ipoib_warn(priv, "failed to modify tx qp to rtr: %d\n", ret);
1028                 goto err_modify;
1029         }
1030
1031         ret = ipoib_cm_send_req(p->priv, p->id, p->qp, qpn, pathrec);
1032         if (ret) {
1033                 ipoib_warn(priv, "failed to send cm req: %d\n", ret);
1034                 goto err_send_cm;
1035         }
1036
1037         ipoib_dbg(priv, "Request connection 0x%x for gid %pI6 qpn 0x%x\n",
1038                   p->qp->qp_num, pathrec->dgid.raw, qpn);
1039
1040         return 0;
1041
1042 err_send_cm:
1043 err_modify:
1044         ib_destroy_cm_id(p->id);
1045 err_id:
1046         p->id = NULL;
1047         ib_destroy_qp(p->qp);
1048 err_qp:
1049         p->qp = NULL;
1050         kfree(p->tx_ring);
1051 err_tx:
1052         return ret;
1053 }
1054
1055 static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
1056 {
1057         struct ipoib_dev_priv *priv = p->priv;
1058         struct ifnet *dev = priv->dev;
1059         struct ipoib_cm_tx_buf *tx_req;
1060         unsigned long begin;
1061
1062         ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n",
1063                   p->qp ? p->qp->qp_num : 0, p->tx_head, p->tx_tail);
1064
1065         if (p->path)
1066                 ipoib_path_free(priv, p->path);
1067
1068         if (p->id)
1069                 ib_destroy_cm_id(p->id);
1070
1071         if (p->tx_ring) {
1072                 /* Wait for all sends to complete */
1073                 begin = jiffies;
1074                 while ((int) p->tx_tail - (int) p->tx_head < 0) {
1075                         if (time_after(jiffies, begin + 5 * HZ)) {
1076                                 ipoib_warn(priv, "timing out; %d sends not completed\n",
1077                                            p->tx_head - p->tx_tail);
1078                                 goto timeout;
1079                         }
1080
1081                         msleep(1);
1082                 }
1083         }
1084
1085 timeout:
1086
1087         while ((int) p->tx_tail - (int) p->tx_head < 0) {
1088                 tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
1089                 ipoib_dma_unmap_tx(priv->ca, (struct ipoib_tx_buf *)tx_req);
1090                 m_freem(tx_req->mb);
1091                 ++p->tx_tail;
1092                 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
1093                     (dev->if_drv_flags & IFF_DRV_OACTIVE) != 0 &&
1094                     test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
1095                         dev->if_drv_flags &= ~IFF_DRV_OACTIVE;
1096         }
1097
1098         if (p->qp)
1099                 ib_destroy_qp(p->qp);
1100
1101         kfree(p->tx_ring);
1102         kfree(p);
1103 }
1104
1105 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
1106                                struct ib_cm_event *event)
1107 {
1108         struct ipoib_cm_tx *tx = cm_id->context;
1109         struct ipoib_dev_priv *priv = tx->priv;
1110         struct ipoib_path *path;
1111         unsigned long flags;
1112         int ret;
1113
1114         switch (event->event) {
1115         case IB_CM_DREQ_RECEIVED:
1116                 ipoib_dbg(priv, "DREQ received.\n");
1117                 ib_send_cm_drep(cm_id, NULL, 0);
1118                 break;
1119         case IB_CM_REP_RECEIVED:
1120                 ipoib_dbg(priv, "REP received.\n");
1121                 ret = ipoib_cm_rep_handler(cm_id, event);
1122                 if (ret)
1123                         ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
1124                                        NULL, 0, NULL, 0);
1125                 break;
1126         case IB_CM_REQ_ERROR:
1127         case IB_CM_REJ_RECEIVED:
1128         case IB_CM_TIMEWAIT_EXIT:
1129                 ipoib_dbg(priv, "CM error %d.\n", event->event);
1130                 spin_lock_irqsave(&priv->lock, flags);
1131                 path = tx->path;
1132
1133                 if (path) {
1134                         path->cm = NULL;
1135                         tx->path = NULL;
1136                         rb_erase(&path->rb_node, &priv->path_tree);
1137                         list_del(&path->list);
1138                 }
1139
1140                 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
1141                         list_move(&tx->list, &priv->cm.reap_list);
1142                         queue_work(ipoib_workqueue, &priv->cm.reap_task);
1143                 }
1144
1145                 spin_unlock_irqrestore(&priv->lock, flags);
1146                 if (path)
1147                         ipoib_path_free(tx->priv, path);
1148                 break;
1149         default:
1150                 break;
1151         }
1152
1153         return 0;
1154 }
1155
1156 struct ipoib_cm_tx *ipoib_cm_create_tx(struct ipoib_dev_priv *priv,
1157     struct ipoib_path *path)
1158 {
1159         struct ipoib_cm_tx *tx;
1160
1161         tx = kzalloc(sizeof *tx, GFP_ATOMIC);
1162         if (!tx)
1163                 return NULL;
1164
1165         ipoib_dbg(priv, "Creating cm tx\n");
1166         path->cm = tx;
1167         tx->path = path;
1168         tx->priv = priv;
1169         list_add(&tx->list, &priv->cm.start_list);
1170         set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
1171         queue_work(ipoib_workqueue, &priv->cm.start_task);
1172         return tx;
1173 }
1174
1175 void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
1176 {
1177         struct ipoib_dev_priv *priv = tx->priv;
1178         if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
1179                 spin_lock(&priv->lock);
1180                 list_move(&tx->list, &priv->cm.reap_list);
1181                 spin_unlock(&priv->lock);
1182                 queue_work(ipoib_workqueue, &priv->cm.reap_task);
1183                 ipoib_dbg(priv, "Reap connection for gid %pI6\n",
1184                           tx->path->pathrec.dgid.raw);
1185                 tx->path = NULL;
1186         }
1187 }
1188
1189 static void ipoib_cm_tx_start(struct work_struct *work)
1190 {
1191         struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1192                                                    cm.start_task);
1193         struct ipoib_path *path;
1194         struct ipoib_cm_tx *p;
1195         unsigned long flags;
1196         int ret;
1197
1198         struct ib_sa_path_rec pathrec;
1199         u32 qpn;
1200
1201         ipoib_dbg(priv, "cm start task\n");
1202         spin_lock_irqsave(&priv->lock, flags);
1203
1204         while (!list_empty(&priv->cm.start_list)) {
1205                 p = list_entry(priv->cm.start_list.next, typeof(*p), list);
1206                 list_del_init(&p->list);
1207                 path = p->path;
1208                 qpn = IPOIB_QPN(path->hwaddr);
1209                 memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);
1210
1211                 spin_unlock_irqrestore(&priv->lock, flags);
1212
1213                 ret = ipoib_cm_tx_init(p, qpn, &pathrec);
1214
1215                 spin_lock_irqsave(&priv->lock, flags);
1216
1217                 if (ret) {
1218                         path = p->path;
1219                         if (path) {
1220                                 path->cm = NULL;
1221                                 rb_erase(&path->rb_node, &priv->path_tree);
1222                                 list_del(&path->list);
1223                                 ipoib_path_free(priv, path);
1224                         }
1225                         list_del(&p->list);
1226                         kfree(p);
1227                 }
1228         }
1229
1230         spin_unlock_irqrestore(&priv->lock, flags);
1231 }
1232
1233 static void ipoib_cm_tx_reap(struct work_struct *work)
1234 {
1235         struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1236                                                    cm.reap_task);
1237         struct ipoib_cm_tx *p;
1238         unsigned long flags;
1239
1240         spin_lock_irqsave(&priv->lock, flags);
1241
1242         while (!list_empty(&priv->cm.reap_list)) {
1243                 p = list_entry(priv->cm.reap_list.next, typeof(*p), list);
1244                 list_del(&p->list);
1245                 spin_unlock_irqrestore(&priv->lock, flags);
1246                 ipoib_cm_tx_destroy(p);
1247                 spin_lock_irqsave(&priv->lock, flags);
1248         }
1249
1250         spin_unlock_irqrestore(&priv->lock, flags);
1251 }
1252
1253 static void ipoib_cm_mb_reap(struct work_struct *work)
1254 {
1255         struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1256                                                    cm.mb_task);
1257         struct mbuf *mb;
1258         unsigned long flags;
1259 #if defined(INET) || defined(INET6)
1260         unsigned mtu = priv->mcast_mtu;
1261 #endif
1262         uint16_t proto;
1263
1264         spin_lock_irqsave(&priv->lock, flags);
1265
1266         for (;;) {
1267                 IF_DEQUEUE(&priv->cm.mb_queue, mb);
1268                 if (mb == NULL)
1269                         break;
1270                 spin_unlock_irqrestore(&priv->lock, flags);
1271
1272                 proto = htons(*mtod(mb, uint16_t *));
1273                 m_adj(mb, IPOIB_ENCAP_LEN);
1274                 switch (proto) {
1275 #if defined(INET)
1276                 case ETHERTYPE_IP:
1277                         icmp_error(mb, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0, mtu);
1278                         break;
1279 #endif
1280 #if defined(INET6)
1281                 case ETHERTYPE_IPV6:
1282                         icmp6_error(mb, ICMP6_PACKET_TOO_BIG, 0, mtu);
1283                         break;
1284 #endif
1285                 default:
1286                         m_freem(mb);
1287                 }
1288
1289                 spin_lock_irqsave(&priv->lock, flags);
1290         }
1291
1292         spin_unlock_irqrestore(&priv->lock, flags);
1293 }
1294
1295 void
1296 ipoib_cm_mb_too_long(struct ipoib_dev_priv *priv, struct mbuf *mb, unsigned int mtu)
1297 {
1298         int e = priv->cm.mb_queue.ifq_len; 
1299
1300         IF_ENQUEUE(&priv->cm.mb_queue, mb);
1301         if (e == 0)
1302                 queue_work(ipoib_workqueue, &priv->cm.mb_task);
1303 }
1304
1305 static void ipoib_cm_rx_reap(struct work_struct *work)
1306 {
1307         ipoib_cm_free_rx_reap_list(container_of(work, struct ipoib_dev_priv,
1308                                                 cm.rx_reap_task));
1309 }
1310
1311 static void ipoib_cm_stale_task(struct work_struct *work)
1312 {
1313         struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1314                                                    cm.stale_task.work);
1315         struct ipoib_cm_rx *p;
1316         int ret;
1317
1318         spin_lock_irq(&priv->lock);
1319         while (!list_empty(&priv->cm.passive_ids)) {
1320                 /* List is sorted by LRU, start from tail,
1321                  * stop when we see a recently used entry */
1322                 p = list_entry(priv->cm.passive_ids.prev, typeof(*p), list);
1323                 if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT))
1324                         break;
1325                 list_move(&p->list, &priv->cm.rx_error_list);
1326                 p->state = IPOIB_CM_RX_ERROR;
1327                 spin_unlock_irq(&priv->lock);
1328                 ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
1329                 if (ret)
1330                         ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
1331                 spin_lock_irq(&priv->lock);
1332         }
1333
1334         if (!list_empty(&priv->cm.passive_ids))
1335                 queue_delayed_work(ipoib_workqueue,
1336                                    &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
1337         spin_unlock_irq(&priv->lock);
1338 }
1339
1340
1341 static void ipoib_cm_create_srq(struct ipoib_dev_priv *priv, int max_sge)
1342 {
1343         struct ib_srq_init_attr srq_init_attr = {
1344                 .attr = {
1345                         .max_wr  = ipoib_recvq_size,
1346                         .max_sge = max_sge
1347                 }
1348         };
1349
1350         priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr);
1351         if (IS_ERR(priv->cm.srq)) {
1352                 if (PTR_ERR(priv->cm.srq) != -ENOSYS)
1353                         printk(KERN_WARNING "%s: failed to allocate SRQ, error %ld\n",
1354                                priv->ca->name, PTR_ERR(priv->cm.srq));
1355                 priv->cm.srq = NULL;
1356                 return;
1357         }
1358
1359         priv->cm.srq_ring = kzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring, GFP_KERNEL);
1360         if (!priv->cm.srq_ring) {
1361                 printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n",
1362                        priv->ca->name, ipoib_recvq_size);
1363                 ib_destroy_srq(priv->cm.srq);
1364                 priv->cm.srq = NULL;
1365                 return;
1366         }
1367
1368         memset(priv->cm.srq_ring, 0, ipoib_recvq_size * sizeof *priv->cm.srq_ring);
1369 }
1370
1371 int ipoib_cm_dev_init(struct ipoib_dev_priv *priv)
1372 {
1373         struct ifnet *dev = priv->dev;
1374         int i;
1375         int max_srq_sge;
1376
1377         INIT_LIST_HEAD(&priv->cm.passive_ids);
1378         INIT_LIST_HEAD(&priv->cm.reap_list);
1379         INIT_LIST_HEAD(&priv->cm.start_list);
1380         INIT_LIST_HEAD(&priv->cm.rx_error_list);
1381         INIT_LIST_HEAD(&priv->cm.rx_flush_list);
1382         INIT_LIST_HEAD(&priv->cm.rx_drain_list);
1383         INIT_LIST_HEAD(&priv->cm.rx_reap_list);
1384         INIT_WORK(&priv->cm.start_task, ipoib_cm_tx_start);
1385         INIT_WORK(&priv->cm.reap_task, ipoib_cm_tx_reap);
1386         INIT_WORK(&priv->cm.mb_task, ipoib_cm_mb_reap);
1387         INIT_WORK(&priv->cm.rx_reap_task, ipoib_cm_rx_reap);
1388         INIT_DELAYED_WORK(&priv->cm.stale_task, ipoib_cm_stale_task);
1389
1390         bzero(&priv->cm.mb_queue, sizeof(priv->cm.mb_queue));
1391         mtx_init(&priv->cm.mb_queue.ifq_mtx,
1392             dev->if_xname, "if send queue", MTX_DEF);
1393
1394         max_srq_sge = priv->ca->attrs.max_srq_sge;
1395
1396         ipoib_dbg(priv, "max_srq_sge=%d\n", max_srq_sge);
1397
1398         max_srq_sge = min_t(int, IPOIB_CM_RX_SG, max_srq_sge);
1399         ipoib_cm_create_srq(priv, max_srq_sge);
1400         if (ipoib_cm_has_srq(priv)) {
1401                 priv->cm.max_cm_mtu = max_srq_sge * MJUMPAGESIZE;
1402                 priv->cm.num_frags  = max_srq_sge;
1403                 ipoib_dbg(priv, "max_cm_mtu = 0x%x, num_frags=%d\n",
1404                           priv->cm.max_cm_mtu, priv->cm.num_frags);
1405         } else {
1406                 priv->cm.max_cm_mtu = IPOIB_CM_MAX_MTU;
1407                 priv->cm.num_frags  = IPOIB_CM_RX_SG;
1408         }
1409
1410         ipoib_cm_init_rx_wr(priv, &priv->cm.rx_wr, priv->cm.rx_sge);
1411
1412         if (ipoib_cm_has_srq(priv)) {
1413                 for (i = 0; i < ipoib_recvq_size; ++i) {
1414                         if (!ipoib_cm_alloc_rx_mb(priv, &priv->cm.srq_ring[i])) {
1415                                 ipoib_warn(priv, "failed to allocate "
1416                                            "receive buffer %d\n", i);
1417                                 ipoib_cm_dev_cleanup(priv);
1418                                 return -ENOMEM;
1419                         }
1420
1421                         if (ipoib_cm_post_receive_srq(priv, i)) {
1422                                 ipoib_warn(priv, "ipoib_cm_post_receive_srq "
1423                                            "failed for buf %d\n", i);
1424                                 ipoib_cm_dev_cleanup(priv);
1425                                 return -EIO;
1426                         }
1427                 }
1428         }
1429
1430         IF_LLADDR(priv->dev)[0] = IPOIB_FLAGS_RC;
1431         return 0;
1432 }
1433
1434 void ipoib_cm_dev_cleanup(struct ipoib_dev_priv *priv)
1435 {
1436         int ret;
1437
1438         if (!priv->cm.srq)
1439                 return;
1440
1441         ipoib_dbg(priv, "Cleanup ipoib connected mode.\n");
1442
1443         ret = ib_destroy_srq(priv->cm.srq);
1444         if (ret)
1445                 ipoib_warn(priv, "ib_destroy_srq failed: %d\n", ret);
1446
1447         priv->cm.srq = NULL;
1448         if (!priv->cm.srq_ring)
1449                 return;
1450
1451         ipoib_cm_free_rx_ring(priv, priv->cm.srq_ring);
1452         priv->cm.srq_ring = NULL;
1453
1454         mtx_destroy(&priv->cm.mb_queue.ifq_mtx);
1455 }
1456
1457 #endif /* CONFIG_INFINIBAND_IPOIB_CM */