]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/ofed/drivers/infiniband/ulp/ipoib/ipoib_cm.c
Upgrade LDNS to 1.7.0.
[FreeBSD/FreeBSD.git] / sys / ofed / drivers / infiniband / ulp / ipoib / ipoib_cm.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0
3  *
4  * Copyright (c) 2006 Mellanox Technologies. All rights reserved
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34
35 #include "ipoib.h"
36
37 #ifdef CONFIG_INFINIBAND_IPOIB_CM
38
39 #include <netinet/ip.h>
40 #include <netinet/ip_icmp.h>
41 #include <netinet/icmp6.h>
42
43 #include <rdma/ib_cm.h>
44 #include <rdma/ib_cache.h>
45 #include <linux/delay.h>
46
47 int ipoib_max_conn_qp = 128;
48
49 module_param_named(max_nonsrq_conn_qp, ipoib_max_conn_qp, int, 0444);
50 MODULE_PARM_DESC(max_nonsrq_conn_qp,
51                  "Max number of connected-mode QPs per interface "
52                  "(applied only if shared receive queue is not available)");
53
54 #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
55 static int data_debug_level;
56
57 module_param_named(cm_data_debug_level, data_debug_level, int, 0644);
58 MODULE_PARM_DESC(cm_data_debug_level,
59                  "Enable data path debug tracing for connected mode if > 0");
60 #endif
61
62 #define IPOIB_CM_IETF_ID 0x1000000000000000ULL
63
64 #define IPOIB_CM_RX_UPDATE_TIME (256 * HZ)
65 #define IPOIB_CM_RX_TIMEOUT     (2 * 256 * HZ)
66 #define IPOIB_CM_RX_DELAY       (3 * 256 * HZ)
67 #define IPOIB_CM_RX_UPDATE_MASK (0x3)
68
69 static struct ib_qp_attr ipoib_cm_err_attr = {
70         .qp_state = IB_QPS_ERR
71 };
72
73 #define IPOIB_CM_RX_DRAIN_WRID 0xffffffff
74
75 static struct ib_send_wr ipoib_cm_rx_drain_wr = {
76         .wr_id = IPOIB_CM_RX_DRAIN_WRID,
77         .opcode = IB_WR_SEND,
78 };
79
80 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
81                                struct ib_cm_event *event);
82
83 static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, struct ipoib_cm_rx_buf *rx_req)
84 {
85
86         ipoib_dma_unmap_rx(priv, (struct ipoib_rx_buf *)rx_req);
87
88 }
89
90 static int ipoib_cm_post_receive_srq(struct ipoib_dev_priv *priv, int id)
91 {
92         struct ib_recv_wr *bad_wr;
93         struct ipoib_rx_buf *rx_req;
94         struct mbuf *m;
95         int ret;
96         int i;
97
98         rx_req = (struct ipoib_rx_buf *)&priv->cm.srq_ring[id];
99         for (m = rx_req->mb, i = 0; m != NULL; m = m->m_next, i++) {
100                 priv->cm.rx_sge[i].addr = rx_req->mapping[i];
101                 priv->cm.rx_sge[i].length = m->m_len;
102         }
103
104         priv->cm.rx_wr.num_sge = i;
105         priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
106
107         ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr);
108         if (unlikely(ret)) {
109                 ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret);
110                 ipoib_dma_unmap_rx(priv, rx_req);
111                 m_freem(priv->cm.srq_ring[id].mb);
112                 priv->cm.srq_ring[id].mb = NULL;
113         }
114
115         return ret;
116 }
117
118 static int ipoib_cm_post_receive_nonsrq(struct ipoib_dev_priv *priv,
119                                         struct ipoib_cm_rx *rx,
120                                         struct ib_recv_wr *wr,
121                                         struct ib_sge *sge, int id)
122 {
123         struct ipoib_rx_buf *rx_req;
124         struct ib_recv_wr *bad_wr;
125         struct mbuf *m;
126         int ret;
127         int i;
128
129         rx_req = (struct ipoib_rx_buf *)&rx->rx_ring[id];
130         for (m = rx_req->mb, i = 0; m != NULL; m = m->m_next, i++) {
131                 sge[i].addr = rx_req->mapping[i];
132                 sge[i].length = m->m_len;
133         }
134
135         wr->num_sge = i;
136         wr->wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
137
138         ret = ib_post_recv(rx->qp, wr, &bad_wr);
139         if (unlikely(ret)) {
140                 ipoib_warn(priv, "post recv failed for buf %d (%d)\n", id, ret);
141                 ipoib_dma_unmap_rx(priv, rx_req);
142                 m_freem(rx->rx_ring[id].mb);
143                 rx->rx_ring[id].mb = NULL;
144         }
145
146         return ret;
147 }
148
149 static struct mbuf *
150 ipoib_cm_alloc_rx_mb(struct ipoib_dev_priv *priv, struct ipoib_cm_rx_buf *rx_req)
151 {
152         return ipoib_alloc_map_mb(priv, (struct ipoib_rx_buf *)rx_req,
153             priv->cm.max_cm_mtu);
154 }
155
156 static void ipoib_cm_free_rx_ring(struct ipoib_dev_priv *priv,
157                                   struct ipoib_cm_rx_buf *rx_ring)
158 {
159         int i;
160
161         for (i = 0; i < ipoib_recvq_size; ++i)
162                 if (rx_ring[i].mb) {
163                         ipoib_cm_dma_unmap_rx(priv, &rx_ring[i]);
164                         m_freem(rx_ring[i].mb);
165                 }
166
167         kfree(rx_ring);
168 }
169
170 static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv)
171 {
172         struct ib_send_wr *bad_wr;
173         struct ipoib_cm_rx *p;
174
175         /* We only reserved 1 extra slot in CQ for drain WRs, so
176          * make sure we have at most 1 outstanding WR. */
177         if (list_empty(&priv->cm.rx_flush_list) ||
178             !list_empty(&priv->cm.rx_drain_list))
179                 return;
180
181         /*
182          * QPs on flush list are error state.  This way, a "flush
183          * error" WC will be immediately generated for each WR we post.
184          */
185         p = list_entry(priv->cm.rx_flush_list.next, typeof(*p), list);
186         if (ib_post_send(p->qp, &ipoib_cm_rx_drain_wr, &bad_wr))
187                 ipoib_warn(priv, "failed to post drain wr\n");
188
189         list_splice_init(&priv->cm.rx_flush_list, &priv->cm.rx_drain_list);
190 }
191
192 static void ipoib_cm_rx_event_handler(struct ib_event *event, void *ctx)
193 {
194         struct ipoib_cm_rx *p = ctx;
195         struct ipoib_dev_priv *priv = p->priv;
196         unsigned long flags;
197
198         if (event->event != IB_EVENT_QP_LAST_WQE_REACHED)
199                 return;
200
201         spin_lock_irqsave(&priv->lock, flags);
202         list_move(&p->list, &priv->cm.rx_flush_list);
203         p->state = IPOIB_CM_RX_FLUSH;
204         ipoib_cm_start_rx_drain(priv);
205         spin_unlock_irqrestore(&priv->lock, flags);
206 }
207
208 static struct ib_qp *ipoib_cm_create_rx_qp(struct ipoib_dev_priv *priv,
209                                            struct ipoib_cm_rx *p)
210 {
211         struct ib_qp_init_attr attr = {
212                 .event_handler = ipoib_cm_rx_event_handler,
213                 .send_cq = priv->recv_cq, /* For drain WR */
214                 .recv_cq = priv->recv_cq,
215                 .srq = priv->cm.srq,
216                 .cap.max_send_wr = 1, /* For drain WR */
217                 .cap.max_send_sge = 1,
218                 .sq_sig_type = IB_SIGNAL_ALL_WR,
219                 .qp_type = IB_QPT_RC,
220                 .qp_context = p,
221         };
222
223         if (!ipoib_cm_has_srq(priv)) {
224                 attr.cap.max_recv_wr  = ipoib_recvq_size;
225                 attr.cap.max_recv_sge = priv->cm.num_frags;
226         }
227
228         return ib_create_qp(priv->pd, &attr);
229 }
230
231 static int ipoib_cm_modify_rx_qp(struct ipoib_dev_priv *priv,
232                                  struct ib_cm_id *cm_id, struct ib_qp *qp,
233                                  unsigned psn)
234 {
235         struct ib_qp_attr qp_attr;
236         int qp_attr_mask, ret;
237
238         qp_attr.qp_state = IB_QPS_INIT;
239         ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
240         if (ret) {
241                 ipoib_warn(priv, "failed to init QP attr for INIT: %d\n", ret);
242                 return ret;
243         }
244         ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
245         if (ret) {
246                 ipoib_warn(priv, "failed to modify QP to INIT: %d\n", ret);
247                 return ret;
248         }
249         qp_attr.qp_state = IB_QPS_RTR;
250         ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
251         if (ret) {
252                 ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
253                 return ret;
254         }
255         qp_attr.rq_psn = psn;
256         ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
257         if (ret) {
258                 ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
259                 return ret;
260         }
261
262         /*
263          * Current Mellanox HCA firmware won't generate completions
264          * with error for drain WRs unless the QP has been moved to
265          * RTS first. This work-around leaves a window where a QP has
266          * moved to error asynchronously, but this will eventually get
267          * fixed in firmware, so let's not error out if modify QP
268          * fails.
269          */
270         qp_attr.qp_state = IB_QPS_RTS;
271         ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
272         if (ret) {
273                 ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
274                 return 0;
275         }
276         ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
277         if (ret) {
278                 ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
279                 return 0;
280         }
281
282         return 0;
283 }
284
285 static void ipoib_cm_init_rx_wr(struct ipoib_dev_priv *priv,
286                                 struct ib_recv_wr *wr,
287                                 struct ib_sge *sge)
288 {
289         int i;
290
291         for (i = 0; i < IPOIB_CM_RX_SG; i++)
292                 sge[i].lkey = priv->pd->local_dma_lkey;
293
294         wr->next    = NULL;
295         wr->sg_list = sge;
296         wr->num_sge = 1;
297 }
298
299 static int ipoib_cm_nonsrq_init_rx(struct ipoib_dev_priv *priv,
300     struct ib_cm_id *cm_id, struct ipoib_cm_rx *rx)
301 {
302         struct {
303                 struct ib_recv_wr wr;
304                 struct ib_sge sge[IPOIB_CM_RX_SG];
305         } *t;
306         int ret;
307         int i;
308
309         rx->rx_ring = kzalloc(ipoib_recvq_size * sizeof *rx->rx_ring, GFP_KERNEL);
310         if (!rx->rx_ring) {
311                 printk(KERN_WARNING "%s: failed to allocate CM non-SRQ ring (%d entries)\n",
312                        priv->ca->name, ipoib_recvq_size);
313                 return -ENOMEM;
314         }
315
316         memset(rx->rx_ring, 0, ipoib_recvq_size * sizeof *rx->rx_ring);
317
318         t = kmalloc(sizeof *t, GFP_KERNEL);
319         if (!t) {
320                 ret = -ENOMEM;
321                 goto err_free;
322         }
323
324         ipoib_cm_init_rx_wr(priv, &t->wr, t->sge);
325
326         spin_lock_irq(&priv->lock);
327
328         if (priv->cm.nonsrq_conn_qp >= ipoib_max_conn_qp) {
329                 spin_unlock_irq(&priv->lock);
330                 ib_send_cm_rej(cm_id, IB_CM_REJ_NO_QP, NULL, 0, NULL, 0);
331                 ret = -EINVAL;
332                 goto err_free;
333         } else
334                 ++priv->cm.nonsrq_conn_qp;
335
336         spin_unlock_irq(&priv->lock);
337
338         for (i = 0; i < ipoib_recvq_size; ++i) {
339                 if (!ipoib_cm_alloc_rx_mb(priv, &rx->rx_ring[i])) {
340                         ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
341                                 ret = -ENOMEM;
342                                 goto err_count;
343                 }
344                 ret = ipoib_cm_post_receive_nonsrq(priv, rx, &t->wr, t->sge, i);
345                 if (ret) {
346                         ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq "
347                                    "failed for buf %d\n", i);
348                         ret = -EIO;
349                         goto err_count;
350                 }
351         }
352
353         rx->recv_count = ipoib_recvq_size;
354
355         kfree(t);
356
357         return 0;
358
359 err_count:
360         spin_lock_irq(&priv->lock);
361         --priv->cm.nonsrq_conn_qp;
362         spin_unlock_irq(&priv->lock);
363
364 err_free:
365         kfree(t);
366         ipoib_cm_free_rx_ring(priv, rx->rx_ring);
367
368         return ret;
369 }
370
371 static int ipoib_cm_send_rep(struct ipoib_dev_priv *priv, struct ib_cm_id *cm_id,
372                              struct ib_qp *qp, struct ib_cm_req_event_param *req,
373                              unsigned psn)
374 {
375         struct ipoib_cm_data data = {};
376         struct ib_cm_rep_param rep = {};
377
378         data.qpn = cpu_to_be32(priv->qp->qp_num);
379         data.mtu = cpu_to_be32(priv->cm.max_cm_mtu);
380
381         rep.private_data = &data;
382         rep.private_data_len = sizeof data;
383         rep.flow_control = 0;
384         rep.rnr_retry_count = req->rnr_retry_count;
385         rep.srq = ipoib_cm_has_srq(priv);
386         rep.qp_num = qp->qp_num;
387         rep.starting_psn = psn;
388         return ib_send_cm_rep(cm_id, &rep);
389 }
390
391 static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
392 {
393         struct ipoib_dev_priv *priv = cm_id->context;
394         struct ipoib_cm_rx *p;
395         unsigned psn;
396         int ret;
397
398         ipoib_dbg(priv, "REQ arrived\n");
399         p = kzalloc(sizeof *p, GFP_KERNEL);
400         if (!p)
401                 return -ENOMEM;
402         p->priv = priv;
403         p->id = cm_id;
404         cm_id->context = p;
405         p->state = IPOIB_CM_RX_LIVE;
406         p->jiffies = jiffies;
407         INIT_LIST_HEAD(&p->list);
408
409         p->qp = ipoib_cm_create_rx_qp(priv, p);
410         if (IS_ERR(p->qp)) {
411                 ret = PTR_ERR(p->qp);
412                 goto err_qp;
413         }
414
415         psn = random() & 0xffffff;
416         ret = ipoib_cm_modify_rx_qp(priv, cm_id, p->qp, psn);
417         if (ret)
418                 goto err_modify;
419
420         if (!ipoib_cm_has_srq(priv)) {
421                 ret = ipoib_cm_nonsrq_init_rx(priv, cm_id, p);
422                 if (ret)
423                         goto err_modify;
424         }
425
426         spin_lock_irq(&priv->lock);
427         queue_delayed_work(ipoib_workqueue,
428                            &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
429         /* Add this entry to passive ids list head, but do not re-add it
430          * if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
431         p->jiffies = jiffies;
432         if (p->state == IPOIB_CM_RX_LIVE)
433                 list_move(&p->list, &priv->cm.passive_ids);
434         spin_unlock_irq(&priv->lock);
435
436         ret = ipoib_cm_send_rep(priv, cm_id, p->qp, &event->param.req_rcvd, psn);
437         if (ret) {
438                 ipoib_warn(priv, "failed to send REP: %d\n", ret);
439                 if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
440                         ipoib_warn(priv, "unable to move qp to error state\n");
441         }
442         return 0;
443
444 err_modify:
445         ib_destroy_qp(p->qp);
446 err_qp:
447         kfree(p);
448         return ret;
449 }
450
451 static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
452                                struct ib_cm_event *event)
453 {
454         struct ipoib_cm_rx *p;
455         struct ipoib_dev_priv *priv;
456
457         switch (event->event) {
458         case IB_CM_REQ_RECEIVED:
459                 return ipoib_cm_req_handler(cm_id, event);
460         case IB_CM_DREQ_RECEIVED:
461                 p = cm_id->context;
462                 ib_send_cm_drep(cm_id, NULL, 0);
463                 /* Fall through */
464         case IB_CM_REJ_RECEIVED:
465                 p = cm_id->context;
466                 priv = p->priv;
467                 if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
468                         ipoib_warn(priv, "unable to move qp to error state\n");
469                 /* Fall through */
470         default:
471                 return 0;
472         }
473 }
474
475 void ipoib_cm_handle_rx_wc(struct ipoib_dev_priv *priv, struct ib_wc *wc)
476 {
477         struct ipoib_cm_rx_buf saverx;
478         struct ipoib_cm_rx_buf *rx_ring;
479         unsigned int wr_id = wc->wr_id & ~(IPOIB_OP_CM | IPOIB_OP_RECV);
480         struct ifnet *dev = priv->dev;
481         struct mbuf *mb, *newmb;
482         struct ipoib_cm_rx *p;
483         int has_srq;
484         u_short proto;
485
486         CURVNET_SET_QUIET(dev->if_vnet);
487
488         ipoib_dbg_data(priv, "cm recv completion: id %d, status: %d\n",
489                        wr_id, wc->status);
490
491         if (unlikely(wr_id >= ipoib_recvq_size)) {
492                 if (wr_id == (IPOIB_CM_RX_DRAIN_WRID & ~(IPOIB_OP_CM | IPOIB_OP_RECV))) {
493                         spin_lock(&priv->lock);
494                         list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list);
495                         ipoib_cm_start_rx_drain(priv);
496                         if (priv->cm.id != NULL)
497                                 queue_work(ipoib_workqueue,
498                                     &priv->cm.rx_reap_task);
499                         spin_unlock(&priv->lock);
500                 } else
501                         ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
502                                    wr_id, ipoib_recvq_size);
503                 goto done;
504         }
505
506         p = wc->qp->qp_context;
507
508         has_srq = ipoib_cm_has_srq(priv);
509         rx_ring = has_srq ? priv->cm.srq_ring : p->rx_ring;
510
511         mb = rx_ring[wr_id].mb;
512
513         if (unlikely(wc->status != IB_WC_SUCCESS)) {
514                 ipoib_dbg(priv, "cm recv error "
515                            "(status=%d, wrid=%d vend_err %x)\n",
516                            wc->status, wr_id, wc->vendor_err);
517                 if_inc_counter(dev, IFCOUNTER_IERRORS, 1);
518                 if (has_srq)
519                         goto repost;
520                 else {
521                         if (!--p->recv_count) {
522                                 spin_lock(&priv->lock);
523                                 list_move(&p->list, &priv->cm.rx_reap_list);
524                                 queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
525                                 spin_unlock(&priv->lock);
526                         }
527                         goto done;
528                 }
529         }
530
531         if (unlikely(!(wr_id & IPOIB_CM_RX_UPDATE_MASK))) {
532                 if (p && time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) {
533                         p->jiffies = jiffies;
534                         /* Move this entry to list head, but do not re-add it
535                          * if it has been moved out of list. */
536                         if (p->state == IPOIB_CM_RX_LIVE)
537                                 list_move(&p->list, &priv->cm.passive_ids);
538                 }
539         }
540
541         memcpy(&saverx, &rx_ring[wr_id], sizeof(saverx));
542         newmb = ipoib_cm_alloc_rx_mb(priv, &rx_ring[wr_id]);
543         if (unlikely(!newmb)) {
544                 /*
545                  * If we can't allocate a new RX buffer, dump
546                  * this packet and reuse the old buffer.
547                  */
548                 ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
549                 if_inc_counter(dev, IFCOUNTER_IERRORS, 1);
550                 memcpy(&rx_ring[wr_id], &saverx, sizeof(saverx));
551                 goto repost;
552         }
553
554         ipoib_cm_dma_unmap_rx(priv, &saverx);
555
556         ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
557                        wc->byte_len, wc->slid);
558
559         ipoib_dma_mb(priv, mb, wc->byte_len);
560
561         if_inc_counter(dev, IFCOUNTER_IPACKETS, 1);
562         if_inc_counter(dev, IFCOUNTER_IBYTES, mb->m_pkthdr.len);
563
564         mb->m_pkthdr.rcvif = dev;
565         proto = *mtod(mb, uint16_t *);
566         m_adj(mb, IPOIB_ENCAP_LEN);
567
568         IPOIB_MTAP_PROTO(dev, mb, proto);
569         ipoib_demux(dev, mb, ntohs(proto));
570
571 repost:
572         if (has_srq) {
573                 if (unlikely(ipoib_cm_post_receive_srq(priv, wr_id)))
574                         ipoib_warn(priv, "ipoib_cm_post_receive_srq failed "
575                                    "for buf %d\n", wr_id);
576         } else {
577                 if (unlikely(ipoib_cm_post_receive_nonsrq(priv, p,
578                                                           &priv->cm.rx_wr,
579                                                           priv->cm.rx_sge,
580                                                           wr_id))) {
581                         --p->recv_count;
582                         ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq failed "
583                                    "for buf %d\n", wr_id);
584                 }
585         }
586 done:
587         CURVNET_RESTORE();
588         return;
589 }
590
591 static inline int post_send(struct ipoib_dev_priv *priv,
592                             struct ipoib_cm_tx *tx,
593                             struct ipoib_cm_tx_buf *tx_req,
594                             unsigned int wr_id)
595 {
596         struct ib_send_wr *bad_wr;
597         struct mbuf *mb = tx_req->mb;
598         u64 *mapping = tx_req->mapping;
599         struct mbuf *m;
600         int i;
601
602         for (m = mb, i = 0; m != NULL; m = m->m_next, i++) {
603                 priv->tx_sge[i].addr = mapping[i];
604                 priv->tx_sge[i].length = m->m_len;
605         }
606         priv->tx_wr.wr.num_sge = i;
607         priv->tx_wr.wr.wr_id = wr_id | IPOIB_OP_CM;
608         priv->tx_wr.wr.opcode = IB_WR_SEND;
609
610         return ib_post_send(tx->qp, &priv->tx_wr.wr, &bad_wr);
611 }
612
613 void ipoib_cm_send(struct ipoib_dev_priv *priv, struct mbuf *mb, struct ipoib_cm_tx *tx)
614 {
615         struct ipoib_cm_tx_buf *tx_req;
616         struct ifnet *dev = priv->dev;
617
618         if (unlikely(priv->tx_outstanding > MAX_SEND_CQE))
619                 while (ipoib_poll_tx(priv)); /* nothing */
620
621         m_adj(mb, sizeof(struct ipoib_pseudoheader));
622         if (unlikely(mb->m_pkthdr.len > tx->mtu)) {
623                 ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
624                            mb->m_pkthdr.len, tx->mtu);
625                 if_inc_counter(dev, IFCOUNTER_OERRORS, 1);
626                 ipoib_cm_mb_too_long(priv, mb, IPOIB_CM_MTU(tx->mtu));
627                 return;
628         }
629
630         ipoib_dbg_data(priv, "sending packet: head 0x%x length %d connection 0x%x\n",
631                        tx->tx_head, mb->m_pkthdr.len, tx->qp->qp_num);
632
633
634         /*
635          * We put the mb into the tx_ring _before_ we call post_send()
636          * because it's entirely possible that the completion handler will
637          * run before we execute anything after the post_send().  That
638          * means we have to make sure everything is properly recorded and
639          * our state is consistent before we call post_send().
640          */
641         tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)];
642         tx_req->mb = mb;
643         if (unlikely(ipoib_dma_map_tx(priv->ca, (struct ipoib_tx_buf *)tx_req,
644             priv->cm.num_frags))) {
645                 if_inc_counter(dev, IFCOUNTER_OERRORS, 1);
646                 if (tx_req->mb)
647                         m_freem(tx_req->mb);
648                 return;
649         }
650
651         if (unlikely(post_send(priv, tx, tx_req, tx->tx_head & (ipoib_sendq_size - 1)))) {
652                 ipoib_warn(priv, "post_send failed\n");
653                 if_inc_counter(dev, IFCOUNTER_OERRORS, 1);
654                 ipoib_dma_unmap_tx(priv->ca, (struct ipoib_tx_buf *)tx_req);
655                 m_freem(mb);
656         } else {
657                 ++tx->tx_head;
658
659                 if (++priv->tx_outstanding == ipoib_sendq_size) {
660                         ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
661                                   tx->qp->qp_num);
662                         if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
663                                 ipoib_warn(priv, "request notify on send CQ failed\n");
664                         dev->if_drv_flags |= IFF_DRV_OACTIVE;
665                 }
666         }
667
668 }
669
670 void ipoib_cm_handle_tx_wc(struct ipoib_dev_priv *priv, struct ib_wc *wc)
671 {
672         struct ipoib_cm_tx *tx = wc->qp->qp_context;
673         unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM;
674         struct ifnet *dev = priv->dev;
675         struct ipoib_cm_tx_buf *tx_req;
676
677         ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n",
678                        wr_id, wc->status);
679
680         if (unlikely(wr_id >= ipoib_sendq_size)) {
681                 ipoib_warn(priv, "cm send completion event with wrid %d (> %d)\n",
682                            wr_id, ipoib_sendq_size);
683                 return;
684         }
685
686         tx_req = &tx->tx_ring[wr_id];
687
688         ipoib_dma_unmap_tx(priv->ca, (struct ipoib_tx_buf *)tx_req);
689
690         /* FIXME: is this right? Shouldn't we only increment on success? */
691         if_inc_counter(dev, IFCOUNTER_OPACKETS, 1);
692
693         m_freem(tx_req->mb);
694
695         ++tx->tx_tail;
696         if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
697             (dev->if_drv_flags & IFF_DRV_OACTIVE) != 0 &&
698             test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
699                 dev->if_drv_flags &= ~IFF_DRV_OACTIVE;
700
701         if (wc->status != IB_WC_SUCCESS &&
702             wc->status != IB_WC_WR_FLUSH_ERR) {
703                 struct ipoib_path *path;
704
705                 ipoib_dbg(priv, "failed cm send event "
706                            "(status=%d, wrid=%d vend_err %x)\n",
707                            wc->status, wr_id, wc->vendor_err);
708
709                 path = tx->path;
710
711                 if (path) {
712                         path->cm = NULL;
713                         rb_erase(&path->rb_node, &priv->path_tree);
714                         list_del(&path->list);
715                 }
716
717                 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
718                         list_move(&tx->list, &priv->cm.reap_list);
719                         queue_work(ipoib_workqueue, &priv->cm.reap_task);
720                 }
721
722                 clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
723         }
724
725 }
726
727 int ipoib_cm_dev_open(struct ipoib_dev_priv *priv)
728 {
729         int ret;
730
731         if (!IPOIB_CM_SUPPORTED(IF_LLADDR(priv->dev)))
732                 return 0;
733
734         priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, priv);
735         if (IS_ERR(priv->cm.id)) {
736                 printk(KERN_WARNING "%s: failed to create CM ID\n", priv->ca->name);
737                 ret = PTR_ERR(priv->cm.id);
738                 goto err_cm;
739         }
740
741         ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num), 0);
742         if (ret) {
743                 printk(KERN_WARNING "%s: failed to listen on ID 0x%llx\n", priv->ca->name,
744                        IPOIB_CM_IETF_ID | priv->qp->qp_num);
745                 goto err_listen;
746         }
747
748         return 0;
749
750 err_listen:
751         ib_destroy_cm_id(priv->cm.id);
752 err_cm:
753         priv->cm.id = NULL;
754         return ret;
755 }
756
757 static void ipoib_cm_free_rx_reap_list(struct ipoib_dev_priv *priv)
758 {
759         struct ipoib_cm_rx *rx, *n;
760         LIST_HEAD(list);
761
762         spin_lock_irq(&priv->lock);
763         list_splice_init(&priv->cm.rx_reap_list, &list);
764         spin_unlock_irq(&priv->lock);
765
766         list_for_each_entry_safe(rx, n, &list, list) {
767                 ib_destroy_cm_id(rx->id);
768                 ib_destroy_qp(rx->qp);
769                 if (!ipoib_cm_has_srq(priv)) {
770                         ipoib_cm_free_rx_ring(priv, rx->rx_ring);
771                         spin_lock_irq(&priv->lock);
772                         --priv->cm.nonsrq_conn_qp;
773                         spin_unlock_irq(&priv->lock);
774                 }
775                 kfree(rx);
776         }
777 }
778
779 void ipoib_cm_dev_stop(struct ipoib_dev_priv *priv)
780 {
781         struct ipoib_cm_rx *p;
782         unsigned long begin;
783         int ret;
784
785         if (!IPOIB_CM_SUPPORTED(IF_LLADDR(priv->dev)) || !priv->cm.id)
786                 return;
787
788         ib_destroy_cm_id(priv->cm.id);
789         priv->cm.id = NULL;
790
791         cancel_work_sync(&priv->cm.rx_reap_task);
792
793         spin_lock_irq(&priv->lock);
794         while (!list_empty(&priv->cm.passive_ids)) {
795                 p = list_entry(priv->cm.passive_ids.next, typeof(*p), list);
796                 list_move(&p->list, &priv->cm.rx_error_list);
797                 p->state = IPOIB_CM_RX_ERROR;
798                 spin_unlock_irq(&priv->lock);
799                 ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
800                 if (ret)
801                         ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
802                 spin_lock_irq(&priv->lock);
803         }
804
805         /* Wait for all RX to be drained */
806         begin = jiffies;
807
808         while (!list_empty(&priv->cm.rx_error_list) ||
809                !list_empty(&priv->cm.rx_flush_list) ||
810                !list_empty(&priv->cm.rx_drain_list)) {
811                 if (time_after(jiffies, begin + 5 * HZ)) {
812                         ipoib_warn(priv, "RX drain timing out\n");
813
814                         /*
815                          * assume the HW is wedged and just free up everything.
816                          */
817                         list_splice_init(&priv->cm.rx_flush_list,
818                                          &priv->cm.rx_reap_list);
819                         list_splice_init(&priv->cm.rx_error_list,
820                                          &priv->cm.rx_reap_list);
821                         list_splice_init(&priv->cm.rx_drain_list,
822                                          &priv->cm.rx_reap_list);
823                         break;
824                 }
825                 spin_unlock_irq(&priv->lock);
826                 msleep(1);
827                 ipoib_drain_cq(priv);
828                 spin_lock_irq(&priv->lock);
829         }
830
831         spin_unlock_irq(&priv->lock);
832
833         ipoib_cm_free_rx_reap_list(priv);
834
835         cancel_delayed_work_sync(&priv->cm.stale_task);
836 }
837
838 static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
839 {
840         struct ipoib_cm_tx *p = cm_id->context;
841         struct ipoib_dev_priv *priv = p->priv;
842         struct ipoib_cm_data *data = event->private_data;
843         struct ifqueue mbqueue;
844         struct ib_qp_attr qp_attr;
845         int qp_attr_mask, ret;
846         struct mbuf *mb;
847
848         ipoib_dbg(priv, "cm rep handler\n");
849         p->mtu = be32_to_cpu(data->mtu);
850
851         if (p->mtu <= IPOIB_ENCAP_LEN) {
852                 ipoib_warn(priv, "Rejecting connection: mtu %d <= %d\n",
853                            p->mtu, IPOIB_ENCAP_LEN);
854                 return -EINVAL;
855         }
856
857         qp_attr.qp_state = IB_QPS_RTR;
858         ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
859         if (ret) {
860                 ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
861                 return ret;
862         }
863
864         qp_attr.rq_psn = 0 /* FIXME */;
865         ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
866         if (ret) {
867                 ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
868                 return ret;
869         }
870
871         qp_attr.qp_state = IB_QPS_RTS;
872         ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
873         if (ret) {
874                 ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
875                 return ret;
876         }
877         ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
878         if (ret) {
879                 ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
880                 return ret;
881         }
882
883         bzero(&mbqueue, sizeof(mbqueue));
884
885         spin_lock_irq(&priv->lock);
886         set_bit(IPOIB_FLAG_OPER_UP, &p->flags);
887         if (p->path)
888                 for (;;) {
889                         _IF_DEQUEUE(&p->path->queue, mb);
890                         if (mb == NULL)
891                                 break;
892                         _IF_ENQUEUE(&mbqueue, mb);
893                 }
894         spin_unlock_irq(&priv->lock);
895
896         for (;;) {
897                 struct ifnet *dev = p->priv->dev;
898                 _IF_DEQUEUE(&mbqueue, mb);
899                 if (mb == NULL)
900                         break;
901                 mb->m_pkthdr.rcvif = dev;
902                 if (dev->if_transmit(dev, mb))
903                         ipoib_warn(priv, "dev_queue_xmit failed "
904                                    "to requeue packet\n");
905         }
906
907         ret = ib_send_cm_rtu(cm_id, NULL, 0);
908         if (ret) {
909                 ipoib_warn(priv, "failed to send RTU: %d\n", ret);
910                 return ret;
911         }
912         return 0;
913 }
914
915 static struct ib_qp *ipoib_cm_create_tx_qp(struct ipoib_dev_priv *priv,
916     struct ipoib_cm_tx *tx)
917 {
918         struct ib_qp_init_attr attr = {
919                 .send_cq                = priv->send_cq,
920                 .recv_cq                = priv->recv_cq,
921                 .srq                    = priv->cm.srq,
922                 .cap.max_send_wr        = ipoib_sendq_size,
923                 .cap.max_send_sge       = priv->cm.num_frags,
924                 .sq_sig_type            = IB_SIGNAL_ALL_WR,
925                 .qp_type                = IB_QPT_RC,
926                 .qp_context             = tx
927         };
928
929         return ib_create_qp(priv->pd, &attr);
930 }
931
932 static int ipoib_cm_send_req(struct ipoib_dev_priv *priv,
933                              struct ib_cm_id *id, struct ib_qp *qp,
934                              u32 qpn,
935                              struct ib_sa_path_rec *pathrec)
936 {
937         struct ipoib_cm_data data = {};
938         struct ib_cm_req_param req = {};
939
940         ipoib_dbg(priv, "cm send req\n");
941
942         data.qpn = cpu_to_be32(priv->qp->qp_num);
943         data.mtu = cpu_to_be32(priv->cm.max_cm_mtu);
944
945         req.primary_path                = pathrec;
946         req.alternate_path              = NULL;
947         req.service_id                  = cpu_to_be64(IPOIB_CM_IETF_ID | qpn);
948         req.qp_num                      = qp->qp_num;
949         req.qp_type                     = qp->qp_type;
950         req.private_data                = &data;
951         req.private_data_len            = sizeof data;
952         req.flow_control                = 0;
953
954         req.starting_psn                = 0; /* FIXME */
955
956         /*
957          * Pick some arbitrary defaults here; we could make these
958          * module parameters if anyone cared about setting them.
959          */
960         req.responder_resources         = 4;
961         req.remote_cm_response_timeout  = 20;
962         req.local_cm_response_timeout   = 20;
963         req.retry_count                 = 0; /* RFC draft warns against retries */
964         req.rnr_retry_count             = 0; /* RFC draft warns against retries */
965         req.max_cm_retries              = 15;
966         req.srq                         = ipoib_cm_has_srq(priv);
967         return ib_send_cm_req(id, &req);
968 }
969
970 static int ipoib_cm_modify_tx_init(struct ipoib_dev_priv *priv,
971                                   struct ib_cm_id *cm_id, struct ib_qp *qp)
972 {
973         struct ib_qp_attr qp_attr;
974         int qp_attr_mask, ret;
975         ret = ib_find_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index);
976         if (ret) {
977                 ipoib_warn(priv, "pkey 0x%x not found: %d\n", priv->pkey, ret);
978                 return ret;
979         }
980
981         qp_attr.qp_state = IB_QPS_INIT;
982         qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE;
983         qp_attr.port_num = priv->port;
984         qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT;
985
986         ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
987         if (ret) {
988                 ipoib_warn(priv, "failed to modify tx QP to INIT: %d\n", ret);
989                 return ret;
990         }
991         return 0;
992 }
993
994 static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
995                             struct ib_sa_path_rec *pathrec)
996 {
997         struct ipoib_dev_priv *priv = p->priv;
998         int ret;
999
1000         p->tx_ring = kzalloc(ipoib_sendq_size * sizeof *p->tx_ring, GFP_KERNEL);
1001         if (!p->tx_ring) {
1002                 ipoib_warn(priv, "failed to allocate tx ring\n");
1003                 ret = -ENOMEM;
1004                 goto err_tx;
1005         }
1006         memset(p->tx_ring, 0, ipoib_sendq_size * sizeof *p->tx_ring);
1007
1008         p->qp = ipoib_cm_create_tx_qp(p->priv, p);
1009         if (IS_ERR(p->qp)) {
1010                 ret = PTR_ERR(p->qp);
1011                 ipoib_warn(priv, "failed to allocate tx qp: %d\n", ret);
1012                 goto err_qp;
1013         }
1014
1015         p->id = ib_create_cm_id(priv->ca, ipoib_cm_tx_handler, p);
1016         if (IS_ERR(p->id)) {
1017                 ret = PTR_ERR(p->id);
1018                 ipoib_warn(priv, "failed to create tx cm id: %d\n", ret);
1019                 goto err_id;
1020         }
1021
1022         ret = ipoib_cm_modify_tx_init(p->priv, p->id,  p->qp);
1023         if (ret) {
1024                 ipoib_warn(priv, "failed to modify tx qp to rtr: %d\n", ret);
1025                 goto err_modify;
1026         }
1027
1028         ret = ipoib_cm_send_req(p->priv, p->id, p->qp, qpn, pathrec);
1029         if (ret) {
1030                 ipoib_warn(priv, "failed to send cm req: %d\n", ret);
1031                 goto err_send_cm;
1032         }
1033
1034         ipoib_dbg(priv, "Request connection 0x%x for gid %pI6 qpn 0x%x\n",
1035                   p->qp->qp_num, pathrec->dgid.raw, qpn);
1036
1037         return 0;
1038
1039 err_send_cm:
1040 err_modify:
1041         ib_destroy_cm_id(p->id);
1042 err_id:
1043         p->id = NULL;
1044         ib_destroy_qp(p->qp);
1045 err_qp:
1046         p->qp = NULL;
1047         kfree(p->tx_ring);
1048 err_tx:
1049         return ret;
1050 }
1051
1052 static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
1053 {
1054         struct ipoib_dev_priv *priv = p->priv;
1055         struct ifnet *dev = priv->dev;
1056         struct ipoib_cm_tx_buf *tx_req;
1057         unsigned long begin;
1058
1059         ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n",
1060                   p->qp ? p->qp->qp_num : 0, p->tx_head, p->tx_tail);
1061
1062         if (p->path)
1063                 ipoib_path_free(priv, p->path);
1064
1065         if (p->id)
1066                 ib_destroy_cm_id(p->id);
1067
1068         if (p->tx_ring) {
1069                 /* Wait for all sends to complete */
1070                 begin = jiffies;
1071                 while ((int) p->tx_tail - (int) p->tx_head < 0) {
1072                         if (time_after(jiffies, begin + 5 * HZ)) {
1073                                 ipoib_warn(priv, "timing out; %d sends not completed\n",
1074                                            p->tx_head - p->tx_tail);
1075                                 goto timeout;
1076                         }
1077
1078                         msleep(1);
1079                 }
1080         }
1081
1082 timeout:
1083
1084         while ((int) p->tx_tail - (int) p->tx_head < 0) {
1085                 tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
1086                 ipoib_dma_unmap_tx(priv->ca, (struct ipoib_tx_buf *)tx_req);
1087                 m_freem(tx_req->mb);
1088                 ++p->tx_tail;
1089                 if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
1090                     (dev->if_drv_flags & IFF_DRV_OACTIVE) != 0 &&
1091                     test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
1092                         dev->if_drv_flags &= ~IFF_DRV_OACTIVE;
1093         }
1094
1095         if (p->qp)
1096                 ib_destroy_qp(p->qp);
1097
1098         kfree(p->tx_ring);
1099         kfree(p);
1100 }
1101
1102 static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
1103                                struct ib_cm_event *event)
1104 {
1105         struct ipoib_cm_tx *tx = cm_id->context;
1106         struct ipoib_dev_priv *priv = tx->priv;
1107         struct ipoib_path *path;
1108         unsigned long flags;
1109         int ret;
1110
1111         switch (event->event) {
1112         case IB_CM_DREQ_RECEIVED:
1113                 ipoib_dbg(priv, "DREQ received.\n");
1114                 ib_send_cm_drep(cm_id, NULL, 0);
1115                 break;
1116         case IB_CM_REP_RECEIVED:
1117                 ipoib_dbg(priv, "REP received.\n");
1118                 ret = ipoib_cm_rep_handler(cm_id, event);
1119                 if (ret)
1120                         ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
1121                                        NULL, 0, NULL, 0);
1122                 break;
1123         case IB_CM_REQ_ERROR:
1124         case IB_CM_REJ_RECEIVED:
1125         case IB_CM_TIMEWAIT_EXIT:
1126                 ipoib_dbg(priv, "CM error %d.\n", event->event);
1127                 spin_lock_irqsave(&priv->lock, flags);
1128                 path = tx->path;
1129
1130                 if (path) {
1131                         path->cm = NULL;
1132                         tx->path = NULL;
1133                         rb_erase(&path->rb_node, &priv->path_tree);
1134                         list_del(&path->list);
1135                 }
1136
1137                 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
1138                         list_move(&tx->list, &priv->cm.reap_list);
1139                         queue_work(ipoib_workqueue, &priv->cm.reap_task);
1140                 }
1141
1142                 spin_unlock_irqrestore(&priv->lock, flags);
1143                 if (path)
1144                         ipoib_path_free(tx->priv, path);
1145                 break;
1146         default:
1147                 break;
1148         }
1149
1150         return 0;
1151 }
1152
1153 struct ipoib_cm_tx *ipoib_cm_create_tx(struct ipoib_dev_priv *priv,
1154     struct ipoib_path *path)
1155 {
1156         struct ipoib_cm_tx *tx;
1157
1158         tx = kzalloc(sizeof *tx, GFP_ATOMIC);
1159         if (!tx)
1160                 return NULL;
1161
1162         ipoib_dbg(priv, "Creating cm tx\n");
1163         path->cm = tx;
1164         tx->path = path;
1165         tx->priv = priv;
1166         list_add(&tx->list, &priv->cm.start_list);
1167         set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
1168         queue_work(ipoib_workqueue, &priv->cm.start_task);
1169         return tx;
1170 }
1171
1172 void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
1173 {
1174         struct ipoib_dev_priv *priv = tx->priv;
1175         if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
1176                 spin_lock(&priv->lock);
1177                 list_move(&tx->list, &priv->cm.reap_list);
1178                 spin_unlock(&priv->lock);
1179                 queue_work(ipoib_workqueue, &priv->cm.reap_task);
1180                 ipoib_dbg(priv, "Reap connection for gid %pI6\n",
1181                           tx->path->pathrec.dgid.raw);
1182                 tx->path = NULL;
1183         }
1184 }
1185
1186 static void ipoib_cm_tx_start(struct work_struct *work)
1187 {
1188         struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1189                                                    cm.start_task);
1190         struct ipoib_path *path;
1191         struct ipoib_cm_tx *p;
1192         unsigned long flags;
1193         int ret;
1194
1195         struct ib_sa_path_rec pathrec;
1196         u32 qpn;
1197
1198         ipoib_dbg(priv, "cm start task\n");
1199         spin_lock_irqsave(&priv->lock, flags);
1200
1201         while (!list_empty(&priv->cm.start_list)) {
1202                 p = list_entry(priv->cm.start_list.next, typeof(*p), list);
1203                 list_del_init(&p->list);
1204                 path = p->path;
1205                 qpn = IPOIB_QPN(path->hwaddr);
1206                 memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);
1207
1208                 spin_unlock_irqrestore(&priv->lock, flags);
1209
1210                 ret = ipoib_cm_tx_init(p, qpn, &pathrec);
1211
1212                 spin_lock_irqsave(&priv->lock, flags);
1213
1214                 if (ret) {
1215                         path = p->path;
1216                         if (path) {
1217                                 path->cm = NULL;
1218                                 rb_erase(&path->rb_node, &priv->path_tree);
1219                                 list_del(&path->list);
1220                                 ipoib_path_free(priv, path);
1221                         }
1222                         list_del(&p->list);
1223                         kfree(p);
1224                 }
1225         }
1226
1227         spin_unlock_irqrestore(&priv->lock, flags);
1228 }
1229
1230 static void ipoib_cm_tx_reap(struct work_struct *work)
1231 {
1232         struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1233                                                    cm.reap_task);
1234         struct ipoib_cm_tx *p;
1235         unsigned long flags;
1236
1237         spin_lock_irqsave(&priv->lock, flags);
1238
1239         while (!list_empty(&priv->cm.reap_list)) {
1240                 p = list_entry(priv->cm.reap_list.next, typeof(*p), list);
1241                 list_del(&p->list);
1242                 spin_unlock_irqrestore(&priv->lock, flags);
1243                 ipoib_cm_tx_destroy(p);
1244                 spin_lock_irqsave(&priv->lock, flags);
1245         }
1246
1247         spin_unlock_irqrestore(&priv->lock, flags);
1248 }
1249
1250 static void ipoib_cm_mb_reap(struct work_struct *work)
1251 {
1252         struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1253                                                    cm.mb_task);
1254         struct mbuf *mb;
1255         unsigned long flags;
1256 #if defined(INET) || defined(INET6)
1257         unsigned mtu = priv->mcast_mtu;
1258 #endif
1259         uint16_t proto;
1260
1261         spin_lock_irqsave(&priv->lock, flags);
1262
1263         for (;;) {
1264                 IF_DEQUEUE(&priv->cm.mb_queue, mb);
1265                 if (mb == NULL)
1266                         break;
1267                 spin_unlock_irqrestore(&priv->lock, flags);
1268
1269                 proto = htons(*mtod(mb, uint16_t *));
1270                 m_adj(mb, IPOIB_ENCAP_LEN);
1271                 switch (proto) {
1272 #if defined(INET)
1273                 case ETHERTYPE_IP:
1274                         icmp_error(mb, ICMP_UNREACH, ICMP_UNREACH_NEEDFRAG, 0, mtu);
1275                         break;
1276 #endif
1277 #if defined(INET6)
1278                 case ETHERTYPE_IPV6:
1279                         icmp6_error(mb, ICMP6_PACKET_TOO_BIG, 0, mtu);
1280                         break;
1281 #endif
1282                 default:
1283                         m_freem(mb);
1284                 }
1285
1286                 spin_lock_irqsave(&priv->lock, flags);
1287         }
1288
1289         spin_unlock_irqrestore(&priv->lock, flags);
1290 }
1291
1292 void
1293 ipoib_cm_mb_too_long(struct ipoib_dev_priv *priv, struct mbuf *mb, unsigned int mtu)
1294 {
1295         int e = priv->cm.mb_queue.ifq_len; 
1296
1297         IF_ENQUEUE(&priv->cm.mb_queue, mb);
1298         if (e == 0)
1299                 queue_work(ipoib_workqueue, &priv->cm.mb_task);
1300 }
1301
1302 static void ipoib_cm_rx_reap(struct work_struct *work)
1303 {
1304         ipoib_cm_free_rx_reap_list(container_of(work, struct ipoib_dev_priv,
1305                                                 cm.rx_reap_task));
1306 }
1307
1308 static void ipoib_cm_stale_task(struct work_struct *work)
1309 {
1310         struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
1311                                                    cm.stale_task.work);
1312         struct ipoib_cm_rx *p;
1313         int ret;
1314
1315         spin_lock_irq(&priv->lock);
1316         while (!list_empty(&priv->cm.passive_ids)) {
1317                 /* List is sorted by LRU, start from tail,
1318                  * stop when we see a recently used entry */
1319                 p = list_entry(priv->cm.passive_ids.prev, typeof(*p), list);
1320                 if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT))
1321                         break;
1322                 list_move(&p->list, &priv->cm.rx_error_list);
1323                 p->state = IPOIB_CM_RX_ERROR;
1324                 spin_unlock_irq(&priv->lock);
1325                 ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
1326                 if (ret)
1327                         ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
1328                 spin_lock_irq(&priv->lock);
1329         }
1330
1331         if (!list_empty(&priv->cm.passive_ids))
1332                 queue_delayed_work(ipoib_workqueue,
1333                                    &priv->cm.stale_task, IPOIB_CM_RX_DELAY);
1334         spin_unlock_irq(&priv->lock);
1335 }
1336
1337
1338 static void ipoib_cm_create_srq(struct ipoib_dev_priv *priv, int max_sge)
1339 {
1340         struct ib_srq_init_attr srq_init_attr = {
1341                 .attr = {
1342                         .max_wr  = ipoib_recvq_size,
1343                         .max_sge = max_sge
1344                 }
1345         };
1346
1347         priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr);
1348         if (IS_ERR(priv->cm.srq)) {
1349                 if (PTR_ERR(priv->cm.srq) != -ENOSYS)
1350                         printk(KERN_WARNING "%s: failed to allocate SRQ, error %ld\n",
1351                                priv->ca->name, PTR_ERR(priv->cm.srq));
1352                 priv->cm.srq = NULL;
1353                 return;
1354         }
1355
1356         priv->cm.srq_ring = kzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring, GFP_KERNEL);
1357         if (!priv->cm.srq_ring) {
1358                 printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n",
1359                        priv->ca->name, ipoib_recvq_size);
1360                 ib_destroy_srq(priv->cm.srq);
1361                 priv->cm.srq = NULL;
1362                 return;
1363         }
1364
1365         memset(priv->cm.srq_ring, 0, ipoib_recvq_size * sizeof *priv->cm.srq_ring);
1366 }
1367
1368 int ipoib_cm_dev_init(struct ipoib_dev_priv *priv)
1369 {
1370         struct ifnet *dev = priv->dev;
1371         int i;
1372         int max_srq_sge;
1373
1374         INIT_LIST_HEAD(&priv->cm.passive_ids);
1375         INIT_LIST_HEAD(&priv->cm.reap_list);
1376         INIT_LIST_HEAD(&priv->cm.start_list);
1377         INIT_LIST_HEAD(&priv->cm.rx_error_list);
1378         INIT_LIST_HEAD(&priv->cm.rx_flush_list);
1379         INIT_LIST_HEAD(&priv->cm.rx_drain_list);
1380         INIT_LIST_HEAD(&priv->cm.rx_reap_list);
1381         INIT_WORK(&priv->cm.start_task, ipoib_cm_tx_start);
1382         INIT_WORK(&priv->cm.reap_task, ipoib_cm_tx_reap);
1383         INIT_WORK(&priv->cm.mb_task, ipoib_cm_mb_reap);
1384         INIT_WORK(&priv->cm.rx_reap_task, ipoib_cm_rx_reap);
1385         INIT_DELAYED_WORK(&priv->cm.stale_task, ipoib_cm_stale_task);
1386
1387         bzero(&priv->cm.mb_queue, sizeof(priv->cm.mb_queue));
1388         mtx_init(&priv->cm.mb_queue.ifq_mtx,
1389             dev->if_xname, "if send queue", MTX_DEF);
1390
1391         max_srq_sge = priv->ca->attrs.max_srq_sge;
1392
1393         ipoib_dbg(priv, "max_srq_sge=%d\n", max_srq_sge);
1394
1395         max_srq_sge = min_t(int, IPOIB_CM_RX_SG, max_srq_sge);
1396         ipoib_cm_create_srq(priv, max_srq_sge);
1397         if (ipoib_cm_has_srq(priv)) {
1398                 priv->cm.max_cm_mtu = max_srq_sge * MJUMPAGESIZE;
1399                 priv->cm.num_frags  = max_srq_sge;
1400                 ipoib_dbg(priv, "max_cm_mtu = 0x%x, num_frags=%d\n",
1401                           priv->cm.max_cm_mtu, priv->cm.num_frags);
1402         } else {
1403                 priv->cm.max_cm_mtu = IPOIB_CM_MAX_MTU;
1404                 priv->cm.num_frags  = IPOIB_CM_RX_SG;
1405         }
1406
1407         ipoib_cm_init_rx_wr(priv, &priv->cm.rx_wr, priv->cm.rx_sge);
1408
1409         if (ipoib_cm_has_srq(priv)) {
1410                 for (i = 0; i < ipoib_recvq_size; ++i) {
1411                         if (!ipoib_cm_alloc_rx_mb(priv, &priv->cm.srq_ring[i])) {
1412                                 ipoib_warn(priv, "failed to allocate "
1413                                            "receive buffer %d\n", i);
1414                                 ipoib_cm_dev_cleanup(priv);
1415                                 return -ENOMEM;
1416                         }
1417
1418                         if (ipoib_cm_post_receive_srq(priv, i)) {
1419                                 ipoib_warn(priv, "ipoib_cm_post_receive_srq "
1420                                            "failed for buf %d\n", i);
1421                                 ipoib_cm_dev_cleanup(priv);
1422                                 return -EIO;
1423                         }
1424                 }
1425         }
1426
1427         IF_LLADDR(priv->dev)[0] = IPOIB_FLAGS_RC;
1428         return 0;
1429 }
1430
1431 void ipoib_cm_dev_cleanup(struct ipoib_dev_priv *priv)
1432 {
1433         int ret;
1434
1435         if (!priv->cm.srq)
1436                 return;
1437
1438         ipoib_dbg(priv, "Cleanup ipoib connected mode.\n");
1439
1440         ret = ib_destroy_srq(priv->cm.srq);
1441         if (ret)
1442                 ipoib_warn(priv, "ib_destroy_srq failed: %d\n", ret);
1443
1444         priv->cm.srq = NULL;
1445         if (!priv->cm.srq_ring)
1446                 return;
1447
1448         ipoib_cm_free_rx_ring(priv, priv->cm.srq_ring);
1449         priv->cm.srq_ring = NULL;
1450
1451         mtx_destroy(&priv->cm.mb_queue.ifq_mtx);
1452 }
1453
1454 #endif /* CONFIG_INFINIBAND_IPOIB_CM */