]> CyberLeo.Net >> Repos - FreeBSD/stable/10.git/blob - sys/dev/cxgbe/iw_cxgbe/cq.c
MFC r317912: sh: Fix INTOFF leak after a builtin with different locale
[FreeBSD/stable/10.git] / sys / dev / cxgbe / iw_cxgbe / cq.c
1 /*
2  * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 #include "opt_inet.h"
36
37 #ifdef TCP_OFFLOAD
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/kernel.h>
41 #include <sys/ktr.h>
42 #include <sys/bus.h>
43 #include <sys/lock.h>
44 #include <sys/mutex.h>
45 #include <sys/rwlock.h>
46 #include <sys/socket.h>
47 #include <sys/sbuf.h>
48
49 #include "iw_cxgbe.h"
50 #include "user.h"
51
52 static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
53                       struct c4iw_dev_ucontext *uctx)
54 {
55         struct adapter *sc = rdev->adap;
56         struct fw_ri_res_wr *res_wr;
57         struct fw_ri_res *res;
58         int wr_len;
59         struct c4iw_wr_wait wr_wait;
60         struct wrqe *wr;
61
62         wr_len = sizeof *res_wr + sizeof *res;
63         wr = alloc_wrqe(wr_len, &sc->sge.mgmtq);
64                 if (wr == NULL)
65                         return (0);
66         res_wr = wrtod(wr);
67         memset(res_wr, 0, wr_len);
68         res_wr->op_nres = cpu_to_be32(
69                         V_FW_WR_OP(FW_RI_RES_WR) |
70                         V_FW_RI_RES_WR_NRES(1) |
71                         F_FW_WR_COMPL);
72         res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
73         res_wr->cookie = (unsigned long) &wr_wait;
74         res = res_wr->res;
75         res->u.cq.restype = FW_RI_RES_TYPE_CQ;
76         res->u.cq.op = FW_RI_RES_OP_RESET;
77         res->u.cq.iqid = cpu_to_be32(cq->cqid);
78
79         c4iw_init_wr_wait(&wr_wait);
80
81         t4_wrq_tx(sc, wr);
82
83         c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
84
85         kfree(cq->sw_queue);
86         contigfree(cq->queue, cq->memsize, M_DEVBUF);
87         c4iw_put_cqid(rdev, cq->cqid, uctx);
88         return 0;
89 }
90
91 static int
92 create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
93     struct c4iw_dev_ucontext *uctx)
94 {
95         struct adapter *sc = rdev->adap;
96         struct fw_ri_res_wr *res_wr;
97         struct fw_ri_res *res;
98         int wr_len;
99         int user = (uctx != &rdev->uctx);
100         struct c4iw_wr_wait wr_wait;
101         int ret;
102         struct wrqe *wr;
103
104         cq->cqid = c4iw_get_cqid(rdev, uctx);
105         if (!cq->cqid) {
106                 ret = -ENOMEM;
107                 goto err1;
108         }
109
110         if (!user) {
111                 cq->sw_queue = kzalloc(cq->memsize, GFP_KERNEL);
112                 if (!cq->sw_queue) {
113                         ret = -ENOMEM;
114                         goto err2;
115                 }
116         }
117
118         cq->queue = contigmalloc(cq->memsize, M_DEVBUF, M_NOWAIT, 0ul, ~0ul,
119             PAGE_SIZE, 0);
120         if (cq->queue)
121                 cq->dma_addr = vtophys(cq->queue);
122         else {
123                 ret = -ENOMEM;
124                 goto err3;
125         }
126
127         pci_unmap_addr_set(cq, mapping, cq->dma_addr);
128         memset(cq->queue, 0, cq->memsize);
129
130         /* build fw_ri_res_wr */
131         wr_len = sizeof *res_wr + sizeof *res;
132
133         wr = alloc_wrqe(wr_len, &sc->sge.mgmtq);
134         if (wr == NULL)
135                 return (0);
136         res_wr = wrtod(wr);
137
138         memset(res_wr, 0, wr_len);
139         res_wr->op_nres = cpu_to_be32(
140                         V_FW_WR_OP(FW_RI_RES_WR) |
141                         V_FW_RI_RES_WR_NRES(1) |
142                         F_FW_WR_COMPL);
143         res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
144         res_wr->cookie = (unsigned long) &wr_wait;
145         res = res_wr->res;
146         res->u.cq.restype = FW_RI_RES_TYPE_CQ;
147         res->u.cq.op = FW_RI_RES_OP_WRITE;
148         res->u.cq.iqid = cpu_to_be32(cq->cqid);
149         //Fixme: Always use first queue id for IQANDSTINDEX. Linux does the same.
150         res->u.cq.iqandst_to_iqandstindex = cpu_to_be32(
151                         V_FW_RI_RES_WR_IQANUS(0) |
152                         V_FW_RI_RES_WR_IQANUD(1) |
153                         F_FW_RI_RES_WR_IQANDST |
154                         V_FW_RI_RES_WR_IQANDSTINDEX(sc->sge.ofld_rxq[0].iq.abs_id));
155         res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
156                         F_FW_RI_RES_WR_IQDROPRSS |
157                         V_FW_RI_RES_WR_IQPCIECH(2) |
158                         V_FW_RI_RES_WR_IQINTCNTTHRESH(0) |
159                         F_FW_RI_RES_WR_IQO |
160                         V_FW_RI_RES_WR_IQESIZE(1));
161         res->u.cq.iqsize = cpu_to_be16(cq->size);
162         res->u.cq.iqaddr = cpu_to_be64(cq->dma_addr);
163
164         c4iw_init_wr_wait(&wr_wait);
165
166         t4_wrq_tx(sc, wr);
167
168         CTR2(KTR_IW_CXGBE, "%s wait_event wr_wait %p", __func__, &wr_wait);
169         ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
170         if (ret)
171                 goto err4;
172
173         cq->gen = 1;
174         cq->gts = (void *)((unsigned long)rman_get_virtual(sc->regs_res) +
175             sc->sge_gts_reg);
176         cq->rdev = rdev;
177
178         if (user) {
179                 cq->ugts = (u64)((char*)rman_get_virtual(sc->udbs_res) +
180                     (cq->cqid << rdev->cqshift));
181                 cq->ugts &= PAGE_MASK;
182                 CTR5(KTR_IW_CXGBE,
183                     "%s: UGTS %p cqid %x cqshift %d page_mask %x", __func__,
184                     cq->ugts, cq->cqid, rdev->cqshift, PAGE_MASK);
185         }
186         return 0;
187 err4:
188         contigfree(cq->queue, cq->memsize, M_DEVBUF);
189 err3:
190         kfree(cq->sw_queue);
191 err2:
192         c4iw_put_cqid(rdev, cq->cqid, uctx);
193 err1:
194         return ret;
195 }
196
197 static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
198 {
199         struct t4_cqe cqe;
200
201         CTR5(KTR_IW_CXGBE, "%s wq %p cq %p sw_cidx %u sw_pidx %u", __func__, wq,
202             cq, cq->sw_cidx, cq->sw_pidx);
203         memset(&cqe, 0, sizeof(cqe));
204         cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
205                                  V_CQE_OPCODE(FW_RI_SEND) |
206                                  V_CQE_TYPE(0) |
207                                  V_CQE_SWCQE(1) |
208                                  V_CQE_QPID(wq->sq.qid));
209         cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
210         cq->sw_queue[cq->sw_pidx] = cqe;
211         t4_swcq_produce(cq);
212 }
213
214 int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
215 {
216         int flushed = 0;
217         int in_use = wq->rq.in_use - count;
218
219         BUG_ON(in_use < 0);
220         CTR5(KTR_IW_CXGBE, "%s wq %p cq %p rq.in_use %u skip count %u",
221             __func__, wq, cq, wq->rq.in_use, count);
222         while (in_use--) {
223                 insert_recv_cqe(wq, cq);
224                 flushed++;
225         }
226         return flushed;
227 }
228
229 static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
230                           struct t4_swsqe *swcqe)
231 {
232         struct t4_cqe cqe;
233
234         CTR5(KTR_IW_CXGBE, "%s wq %p cq %p sw_cidx %u sw_pidx %u", __func__, wq,
235             cq, cq->sw_cidx, cq->sw_pidx);
236         memset(&cqe, 0, sizeof(cqe));
237         cqe.header = cpu_to_be32(V_CQE_STATUS(T4_ERR_SWFLUSH) |
238                                  V_CQE_OPCODE(swcqe->opcode) |
239                                  V_CQE_TYPE(1) |
240                                  V_CQE_SWCQE(1) |
241                                  V_CQE_QPID(wq->sq.qid));
242         CQE_WRID_SQ_IDX(&cqe) = swcqe->idx;
243         cqe.bits_type_ts = cpu_to_be64(V_CQE_GENBIT((u64)cq->gen));
244         cq->sw_queue[cq->sw_pidx] = cqe;
245         t4_swcq_produce(cq);
246 }
247
248 int c4iw_flush_sq(struct t4_wq *wq, struct t4_cq *cq, int count)
249 {
250         int flushed = 0;
251         struct t4_swsqe *swsqe = &wq->sq.sw_sq[wq->sq.cidx + count];
252         int in_use = wq->sq.in_use - count;
253
254         BUG_ON(in_use < 0);
255         while (in_use--) {
256                 swsqe->signaled = 0;
257                 insert_sq_cqe(wq, cq, swsqe);
258                 swsqe++;
259                 if (swsqe == (wq->sq.sw_sq + wq->sq.size))
260                         swsqe = wq->sq.sw_sq;
261                 flushed++;
262         }
263         return flushed;
264 }
265
266 /*
267  * Move all CQEs from the HWCQ into the SWCQ.
268  */
269 void c4iw_flush_hw_cq(struct t4_cq *cq)
270 {
271         struct t4_cqe *cqe = NULL, *swcqe;
272         int ret;
273
274         CTR3(KTR_IW_CXGBE, "%s cq %p cqid 0x%x", __func__, cq, cq->cqid);
275         ret = t4_next_hw_cqe(cq, &cqe);
276         while (!ret) {
277                 CTR3(KTR_IW_CXGBE, "%s flushing hwcq cidx 0x%x swcq pidx 0x%x",
278                     __func__, cq->cidx, cq->sw_pidx);
279                 swcqe = &cq->sw_queue[cq->sw_pidx];
280                 *swcqe = *cqe;
281                 swcqe->header |= cpu_to_be32(V_CQE_SWCQE(1));
282                 t4_swcq_produce(cq);
283                 t4_hwcq_consume(cq);
284                 ret = t4_next_hw_cqe(cq, &cqe);
285         }
286 }
287
288 static int cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq)
289 {
290         if (CQE_OPCODE(cqe) == FW_RI_TERMINATE)
291                 return 0;
292
293         if ((CQE_OPCODE(cqe) == FW_RI_RDMA_WRITE) && RQ_TYPE(cqe))
294                 return 0;
295
296         if ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) && SQ_TYPE(cqe))
297                 return 0;
298
299         if (CQE_SEND_OPCODE(cqe) && RQ_TYPE(cqe) && t4_rq_empty(wq))
300                 return 0;
301         return 1;
302 }
303
304 void c4iw_count_scqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
305 {
306         struct t4_cqe *cqe;
307         u32 ptr;
308
309         *count = 0;
310         ptr = cq->sw_cidx;
311         while (ptr != cq->sw_pidx) {
312                 cqe = &cq->sw_queue[ptr];
313                 if ((SQ_TYPE(cqe) || ((CQE_OPCODE(cqe) == FW_RI_READ_RESP) &&
314                                       wq->sq.oldest_read)) &&
315                     (CQE_QPID(cqe) == wq->sq.qid))
316                         (*count)++;
317                 if (++ptr == cq->size)
318                         ptr = 0;
319         }
320         CTR3(KTR_IW_CXGBE, "%s cq %p count %d", __func__, cq, *count);
321 }
322
323 void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
324 {
325         struct t4_cqe *cqe;
326         u32 ptr;
327
328         *count = 0;
329         CTR2(KTR_IW_CXGBE, "%s count zero %d", __func__, *count);
330         ptr = cq->sw_cidx;
331         while (ptr != cq->sw_pidx) {
332                 cqe = &cq->sw_queue[ptr];
333                 if (RQ_TYPE(cqe) && (CQE_OPCODE(cqe) != FW_RI_READ_RESP) &&
334                     (CQE_QPID(cqe) == wq->sq.qid) && cqe_completes_wr(cqe, wq))
335                         (*count)++;
336                 if (++ptr == cq->size)
337                         ptr = 0;
338         }
339         CTR3(KTR_IW_CXGBE, "%s cq %p count %d", __func__, cq, *count);
340 }
341
342 static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
343 {
344         struct t4_swsqe *swsqe;
345         u16 ptr = wq->sq.cidx;
346         int count = wq->sq.in_use;
347         int unsignaled = 0;
348
349         swsqe = &wq->sq.sw_sq[ptr];
350         while (count--)
351                 if (!swsqe->signaled) {
352                         if (++ptr == wq->sq.size)
353                                 ptr = 0;
354                         swsqe = &wq->sq.sw_sq[ptr];
355                         unsignaled++;
356                 } else if (swsqe->complete) {
357
358                         /*
359                          * Insert this completed cqe into the swcq.
360                          */
361                         CTR3(KTR_IW_CXGBE,
362                             "%s moving cqe into swcq sq idx %u cq idx %u",
363                             __func__, ptr, cq->sw_pidx);
364                         swsqe->cqe.header |= htonl(V_CQE_SWCQE(1));
365                         cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
366                         t4_swcq_produce(cq);
367                         swsqe->signaled = 0;
368                         wq->sq.in_use -= unsignaled;
369                         break;
370                 } else
371                         break;
372 }
373
374 static void create_read_req_cqe(struct t4_wq *wq, struct t4_cqe *hw_cqe,
375                                 struct t4_cqe *read_cqe)
376 {
377         read_cqe->u.scqe.cidx = wq->sq.oldest_read->idx;
378         read_cqe->len = cpu_to_be32(wq->sq.oldest_read->read_len);
379         read_cqe->header = htonl(V_CQE_QPID(CQE_QPID(hw_cqe)) |
380                                  V_CQE_SWCQE(SW_CQE(hw_cqe)) |
381                                  V_CQE_OPCODE(FW_RI_READ_REQ) |
382                                  V_CQE_TYPE(1));
383         read_cqe->bits_type_ts = hw_cqe->bits_type_ts;
384 }
385
386 /*
387  * Return a ptr to the next read wr in the SWSQ or NULL.
388  */
389 static void advance_oldest_read(struct t4_wq *wq)
390 {
391
392         u32 rptr = wq->sq.oldest_read - wq->sq.sw_sq + 1;
393
394         if (rptr == wq->sq.size)
395                 rptr = 0;
396         while (rptr != wq->sq.pidx) {
397                 wq->sq.oldest_read = &wq->sq.sw_sq[rptr];
398
399                 if (wq->sq.oldest_read->opcode == FW_RI_READ_REQ)
400                         return;
401                 if (++rptr == wq->sq.size)
402                         rptr = 0;
403         }
404         wq->sq.oldest_read = NULL;
405 }
406
407 /*
408  * poll_cq
409  *
410  * Caller must:
411  *     check the validity of the first CQE,
412  *     supply the wq assicated with the qpid.
413  *
414  * credit: cq credit to return to sge.
415  * cqe_flushed: 1 iff the CQE is flushed.
416  * cqe: copy of the polled CQE.
417  *
418  * return value:
419  *    0             CQE returned ok.
420  *    -EAGAIN       CQE skipped, try again.
421  *    -EOVERFLOW    CQ overflow detected.
422  */
423 static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
424                    u8 *cqe_flushed, u64 *cookie, u32 *credit)
425 {
426         int ret = 0;
427         struct t4_cqe *hw_cqe, read_cqe;
428
429         *cqe_flushed = 0;
430         *credit = 0;
431         ret = t4_next_cqe(cq, &hw_cqe);
432         if (ret)
433                 return ret;
434
435         CTR6(KTR_IW_CXGBE,
436             "%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x", __func__,
437             CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe), CQE_GENBIT(hw_cqe),
438             CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe));
439         CTR5(KTR_IW_CXGBE,
440             "%s opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x",
441             __func__, CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
442             CQE_WRID_LOW(hw_cqe));
443
444         /*
445          * skip cqe's not affiliated with a QP.
446          */
447         if (wq == NULL) {
448                 ret = -EAGAIN;
449                 goto skip_cqe;
450         }
451
452         /*
453          * Special cqe for drain WR completions...
454          */
455         if (CQE_OPCODE(hw_cqe) == C4IW_DRAIN_OPCODE) {
456                 *cookie = CQE_DRAIN_COOKIE(hw_cqe);
457                 *cqe = *hw_cqe;
458                 goto skip_cqe;
459         }
460
461         /*
462          * Gotta tweak READ completions:
463          *      1) the cqe doesn't contain the sq_wptr from the wr.
464          *      2) opcode not reflected from the wr.
465          *      3) read_len not reflected from the wr.
466          *      4) cq_type is RQ_TYPE not SQ_TYPE.
467          */
468         if (RQ_TYPE(hw_cqe) && (CQE_OPCODE(hw_cqe) == FW_RI_READ_RESP)) {
469
470                 /*
471                  * If this is an unsolicited read response, then the read
472                  * was generated by the kernel driver as part of peer-2-peer
473                  * connection setup.  So ignore the completion.
474                  */
475                 if (!wq->sq.oldest_read) {
476                         if (CQE_STATUS(hw_cqe))
477                                 t4_set_wq_in_error(wq);
478                         ret = -EAGAIN;
479                         goto skip_cqe;
480                 }
481
482                 /*
483                  * Don't write to the HWCQ, so create a new read req CQE
484                  * in local memory.
485                  */
486                 create_read_req_cqe(wq, hw_cqe, &read_cqe);
487                 hw_cqe = &read_cqe;
488                 advance_oldest_read(wq);
489         }
490
491         if (CQE_STATUS(hw_cqe) || t4_wq_in_error(wq)) {
492                 *cqe_flushed = t4_wq_in_error(wq);
493                 t4_set_wq_in_error(wq);
494                 goto proc_cqe;
495         }
496
497         if (CQE_OPCODE(hw_cqe) == FW_RI_TERMINATE) {
498                 ret = -EAGAIN;
499                 goto skip_cqe;
500         }
501
502         /*
503          * RECV completion.
504          */
505         if (RQ_TYPE(hw_cqe)) {
506
507                 /*
508                  * HW only validates 4 bits of MSN.  So we must validate that
509                  * the MSN in the SEND is the next expected MSN.  If its not,
510                  * then we complete this with T4_ERR_MSN and mark the wq in
511                  * error.
512                  */
513
514                 if (t4_rq_empty(wq)) {
515                         t4_set_wq_in_error(wq);
516                         ret = -EAGAIN;
517                         goto skip_cqe;
518                 }
519                 if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) {
520                         t4_set_wq_in_error(wq);
521                         hw_cqe->header |= htonl(V_CQE_STATUS(T4_ERR_MSN));
522                         goto proc_cqe;
523                 }
524                 goto proc_cqe;
525         }
526
527         /*
528          * If we get here its a send completion.
529          *
530          * Handle out of order completion. These get stuffed
531          * in the SW SQ. Then the SW SQ is walked to move any
532          * now in-order completions into the SW CQ.  This handles
533          * 2 cases:
534          *      1) reaping unsignaled WRs when the first subsequent
535          *         signaled WR is completed.
536          *      2) out of order read completions.
537          */
538         if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
539                 struct t4_swsqe *swsqe;
540
541                 CTR2(KTR_IW_CXGBE,
542                     "%s out of order completion going in sw_sq at idx %u",
543                     __func__, CQE_WRID_SQ_IDX(hw_cqe));
544                 swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
545                 swsqe->cqe = *hw_cqe;
546                 swsqe->complete = 1;
547                 ret = -EAGAIN;
548                 goto flush_wq;
549         }
550
551 proc_cqe:
552         *cqe = *hw_cqe;
553
554         /*
555          * Reap the associated WR(s) that are freed up with this
556          * completion.
557          */
558         if (SQ_TYPE(hw_cqe)) {
559                 wq->sq.cidx = CQE_WRID_SQ_IDX(hw_cqe);
560                 CTR2(KTR_IW_CXGBE, "%s completing sq idx %u",
561                      __func__, wq->sq.cidx);
562                 *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
563                 t4_sq_consume(wq);
564         } else {
565                 CTR2(KTR_IW_CXGBE, "%s completing rq idx %u",
566                      __func__, wq->rq.cidx);
567                 *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
568                 BUG_ON(t4_rq_empty(wq));
569                 t4_rq_consume(wq);
570         }
571
572 flush_wq:
573         /*
574          * Flush any completed cqes that are now in-order.
575          */
576         flush_completed_wrs(wq, cq);
577
578 skip_cqe:
579         if (SW_CQE(hw_cqe)) {
580                 CTR4(KTR_IW_CXGBE, "%s cq %p cqid 0x%x skip sw cqe cidx %u",
581                      __func__, cq, cq->cqid, cq->sw_cidx);
582                 t4_swcq_consume(cq);
583         } else {
584                 CTR4(KTR_IW_CXGBE, "%s cq %p cqid 0x%x skip hw cqe cidx %u",
585                      __func__, cq, cq->cqid, cq->cidx);
586                 t4_hwcq_consume(cq);
587         }
588         return ret;
589 }
590
591 /*
592  * Get one cq entry from c4iw and map it to openib.
593  *
594  * Returns:
595  *      0                       cqe returned
596  *      -ENODATA                EMPTY;
597  *      -EAGAIN                 caller must try again
598  *      any other -errno        fatal error
599  */
600 static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
601 {
602         struct c4iw_qp *qhp = NULL;
603         struct t4_cqe cqe = {0, 0}, *rd_cqe;
604         struct t4_wq *wq;
605         u32 credit = 0;
606         u8 cqe_flushed;
607         u64 cookie = 0;
608         int ret;
609
610         ret = t4_next_cqe(&chp->cq, &rd_cqe);
611
612         if (ret)
613                 return ret;
614
615         qhp = get_qhp(chp->rhp, CQE_QPID(rd_cqe));
616         if (!qhp)
617                 wq = NULL;
618         else {
619                 spin_lock(&qhp->lock);
620                 wq = &(qhp->wq);
621         }
622         ret = poll_cq(wq, &(chp->cq), &cqe, &cqe_flushed, &cookie, &credit);
623         if (ret)
624                 goto out;
625
626         wc->wr_id = cookie;
627         wc->qp = &qhp->ibqp;
628         wc->vendor_err = CQE_STATUS(&cqe);
629         wc->wc_flags = 0;
630
631         CTR5(KTR_IW_CXGBE, "%s qpid 0x%x type %d opcode %d status 0x%x",
632             __func__, CQE_QPID(&cqe), CQE_TYPE(&cqe), CQE_OPCODE(&cqe),
633             CQE_STATUS(&cqe));
634         CTR5(KTR_IW_CXGBE, "%s len %u wrid hi 0x%x lo 0x%x cookie 0x%llx",
635             __func__, CQE_LEN(&cqe), CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe),
636             (unsigned long long)cookie);
637
638         if (CQE_TYPE(&cqe) == 0) {
639                 if (!CQE_STATUS(&cqe))
640                         wc->byte_len = CQE_LEN(&cqe);
641                 else
642                         wc->byte_len = 0;
643                 wc->opcode = IB_WC_RECV;
644                 if (CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_INV ||
645                     CQE_OPCODE(&cqe) == FW_RI_SEND_WITH_SE_INV) {
646                         wc->ex.invalidate_rkey = CQE_WRID_STAG(&cqe);
647                         wc->wc_flags |= IB_WC_WITH_INVALIDATE;
648                 }
649         } else {
650                 switch (CQE_OPCODE(&cqe)) {
651                 case FW_RI_RDMA_WRITE:
652                         wc->opcode = IB_WC_RDMA_WRITE;
653                         break;
654                 case FW_RI_READ_REQ:
655                         wc->opcode = IB_WC_RDMA_READ;
656                         wc->byte_len = CQE_LEN(&cqe);
657                         break;
658                 case FW_RI_SEND_WITH_INV:
659                 case FW_RI_SEND_WITH_SE_INV:
660                         wc->opcode = IB_WC_SEND;
661                         wc->wc_flags |= IB_WC_WITH_INVALIDATE;
662                         break;
663                 case FW_RI_SEND:
664                 case FW_RI_SEND_WITH_SE:
665                         wc->opcode = IB_WC_SEND;
666                         break;
667                 case FW_RI_BIND_MW:
668                         wc->opcode = IB_WC_BIND_MW;
669                         break;
670
671                 case FW_RI_LOCAL_INV:
672                         wc->opcode = IB_WC_LOCAL_INV;
673                         break;
674                 case FW_RI_FAST_REGISTER:
675                         wc->opcode = IB_WC_FAST_REG_MR;
676                         break;
677                 case C4IW_DRAIN_OPCODE:
678                         wc->opcode = IB_WC_SEND;
679                         break;
680                 default:
681                         printf("Unexpected opcode %d "
682                                "in the CQE received for QPID = 0x%0x\n",
683                                CQE_OPCODE(&cqe), CQE_QPID(&cqe));
684                         ret = -EINVAL;
685                         goto out;
686                 }
687         }
688
689         if (cqe_flushed)
690                 wc->status = IB_WC_WR_FLUSH_ERR;
691         else {
692
693                 switch (CQE_STATUS(&cqe)) {
694                 case T4_ERR_SUCCESS:
695                         wc->status = IB_WC_SUCCESS;
696                         break;
697                 case T4_ERR_STAG:
698                         wc->status = IB_WC_LOC_ACCESS_ERR;
699                         break;
700                 case T4_ERR_PDID:
701                         wc->status = IB_WC_LOC_PROT_ERR;
702                         break;
703                 case T4_ERR_QPID:
704                 case T4_ERR_ACCESS:
705                         wc->status = IB_WC_LOC_ACCESS_ERR;
706                         break;
707                 case T4_ERR_WRAP:
708                         wc->status = IB_WC_GENERAL_ERR;
709                         break;
710                 case T4_ERR_BOUND:
711                         wc->status = IB_WC_LOC_LEN_ERR;
712                         break;
713                 case T4_ERR_INVALIDATE_SHARED_MR:
714                 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
715                         wc->status = IB_WC_MW_BIND_ERR;
716                         break;
717                 case T4_ERR_CRC:
718                 case T4_ERR_MARKER:
719                 case T4_ERR_PDU_LEN_ERR:
720                 case T4_ERR_OUT_OF_RQE:
721                 case T4_ERR_DDP_VERSION:
722                 case T4_ERR_RDMA_VERSION:
723                 case T4_ERR_DDP_QUEUE_NUM:
724                 case T4_ERR_MSN:
725                 case T4_ERR_TBIT:
726                 case T4_ERR_MO:
727                 case T4_ERR_MSN_RANGE:
728                 case T4_ERR_IRD_OVERFLOW:
729                 case T4_ERR_OPCODE:
730                 case T4_ERR_INTERNAL_ERR:
731                         wc->status = IB_WC_FATAL_ERR;
732                         break;
733                 case T4_ERR_SWFLUSH:
734                         wc->status = IB_WC_WR_FLUSH_ERR;
735                         break;
736                 default:
737                         printf("Unexpected cqe_status 0x%x for QPID = 0x%0x\n",
738                                CQE_STATUS(&cqe), CQE_QPID(&cqe));
739                         wc->status = IB_WC_FATAL_ERR;
740                 }
741         }
742 out:
743         if (wq)
744                 spin_unlock(&qhp->lock);
745         return ret;
746 }
747
748 int c4iw_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
749 {
750         struct c4iw_cq *chp;
751         unsigned long flags;
752         int npolled;
753         int err = 0;
754
755         chp = to_c4iw_cq(ibcq);
756
757         spin_lock_irqsave(&chp->lock, flags);
758         for (npolled = 0; npolled < num_entries; ++npolled) {
759                 do {
760                         err = c4iw_poll_cq_one(chp, wc + npolled);
761                 } while (err == -EAGAIN);
762                 if (err)
763                         break;
764         }
765         spin_unlock_irqrestore(&chp->lock, flags);
766         return !err || err == -ENODATA ? npolled : err;
767 }
768
769 int c4iw_destroy_cq(struct ib_cq *ib_cq)
770 {
771         struct c4iw_cq *chp;
772         struct c4iw_ucontext *ucontext;
773
774         CTR2(KTR_IW_CXGBE, "%s ib_cq %p", __func__, ib_cq);
775         chp = to_c4iw_cq(ib_cq);
776
777         remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
778         atomic_dec(&chp->refcnt);
779         wait_event(chp->wait, !atomic_read(&chp->refcnt));
780
781         ucontext = ib_cq->uobject ? to_c4iw_ucontext(ib_cq->uobject->context)
782                                   : NULL;
783         destroy_cq(&chp->rhp->rdev, &chp->cq,
784                    ucontext ? &ucontext->uctx : &chp->cq.rdev->uctx);
785         kfree(chp);
786         return 0;
787 }
788
789 struct ib_cq *
790 c4iw_create_cq(struct ib_device *ibdev, int entries, int vector,
791     struct ib_ucontext *ib_context, struct ib_udata *udata)
792 {
793         struct c4iw_dev *rhp;
794         struct c4iw_cq *chp;
795         struct c4iw_create_cq_resp uresp;
796         struct c4iw_ucontext *ucontext = NULL;
797         int ret;
798         size_t memsize, hwentries;
799         struct c4iw_mm_entry *mm, *mm2;
800
801         CTR3(KTR_IW_CXGBE, "%s ib_dev %p entries %d", __func__, ibdev, entries);
802
803         rhp = to_c4iw_dev(ibdev);
804
805         chp = kzalloc(sizeof(*chp), GFP_KERNEL);
806         if (!chp)
807                 return ERR_PTR(-ENOMEM);
808
809         if (ib_context)
810                 ucontext = to_c4iw_ucontext(ib_context);
811
812         /* account for the status page. */
813         entries++;
814
815         /* IQ needs one extra entry to differentiate full vs empty. */
816         entries++;
817
818         /*
819          * entries must be multiple of 16 for HW.
820          */
821         entries = roundup(entries, 16);
822
823         /*
824          * Make actual HW queue 2x to avoid cidx_inc overflows.
825          */
826         hwentries = entries * 2;
827
828         /*
829          * Make HW queue at least 64 entries so GTS updates aren't too
830          * frequent.
831          */
832         if (hwentries < 64)
833                 hwentries = 64;
834
835         memsize = hwentries * sizeof *chp->cq.queue;
836
837         /*
838          * memsize must be a multiple of the page size if its a user cq.
839          */
840         if (ucontext) {
841                 memsize = roundup(memsize, PAGE_SIZE);
842                 hwentries = memsize / sizeof *chp->cq.queue;
843                 while (hwentries > T4_MAX_IQ_SIZE) {
844                         memsize -= PAGE_SIZE;
845                         hwentries = memsize / sizeof *chp->cq.queue;
846                 }
847         }
848         chp->cq.size = hwentries;
849         chp->cq.memsize = memsize;
850
851         ret = create_cq(&rhp->rdev, &chp->cq,
852                         ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
853         if (ret)
854                 goto err1;
855
856         chp->rhp = rhp;
857         chp->cq.size--;                         /* status page */
858         chp->ibcq.cqe = entries - 2;
859         spin_lock_init(&chp->lock);
860         spin_lock_init(&chp->comp_handler_lock);
861         atomic_set(&chp->refcnt, 1);
862         init_waitqueue_head(&chp->wait);
863         ret = insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
864         if (ret)
865                 goto err2;
866
867         if (ucontext) {
868                 mm = kmalloc(sizeof *mm, GFP_KERNEL);
869                 if (!mm)
870                         goto err3;
871                 mm2 = kmalloc(sizeof *mm2, GFP_KERNEL);
872                 if (!mm2)
873                         goto err4;
874
875                 memset(&uresp, 0, sizeof(uresp));
876                 uresp.qid_mask = rhp->rdev.cqmask;
877                 uresp.cqid = chp->cq.cqid;
878                 uresp.size = chp->cq.size;
879                 uresp.memsize = chp->cq.memsize;
880                 spin_lock(&ucontext->mmap_lock);
881                 uresp.key = ucontext->key;
882                 ucontext->key += PAGE_SIZE;
883                 uresp.gts_key = ucontext->key;
884                 ucontext->key += PAGE_SIZE;
885                 spin_unlock(&ucontext->mmap_lock);
886                 ret = ib_copy_to_udata(udata, &uresp,
887                                         sizeof(uresp) - sizeof(uresp.reserved));
888                 if (ret)
889                         goto err5;
890
891                 mm->key = uresp.key;
892                 mm->addr = vtophys(chp->cq.queue);
893                 mm->len = chp->cq.memsize;
894                 insert_mmap(ucontext, mm);
895
896                 mm2->key = uresp.gts_key;
897                 mm2->addr = chp->cq.ugts;
898                 mm2->len = PAGE_SIZE;
899                 insert_mmap(ucontext, mm2);
900         }
901         CTR6(KTR_IW_CXGBE,
902             "%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx",
903             __func__, chp->cq.cqid, chp, chp->cq.size, chp->cq.memsize,
904             (unsigned long long) chp->cq.dma_addr);
905         return &chp->ibcq;
906 err5:
907         kfree(mm2);
908 err4:
909         kfree(mm);
910 err3:
911         remove_handle(rhp, &rhp->cqidr, chp->cq.cqid);
912 err2:
913         destroy_cq(&chp->rhp->rdev, &chp->cq,
914                    ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
915 err1:
916         kfree(chp);
917         return ERR_PTR(ret);
918 }
919
920 int c4iw_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata)
921 {
922         return -ENOSYS;
923 }
924
925 int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
926 {
927         struct c4iw_cq *chp;
928         int ret;
929         unsigned long flag;
930
931         chp = to_c4iw_cq(ibcq);
932         spin_lock_irqsave(&chp->lock, flag);
933         ret = t4_arm_cq(&chp->cq,
934                         (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED);
935         spin_unlock_irqrestore(&chp->lock, flag);
936         if (ret && !(flags & IB_CQ_REPORT_MISSED_EVENTS))
937                 ret = 0;
938         return ret;
939 }
940 #endif