]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/mthca/mthca_srq.c
libucl: vendor import snapshort 20210314
[FreeBSD/FreeBSD.git] / sys / dev / mthca / mthca_srq.c
1 /*
2  * Copyright (c) 2005 Cisco Systems. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/slab.h>
34 #include <linux/string.h>
35 #include <linux/sched.h>
36
37 #include <asm/io.h>
38
39 #include "mthca_dev.h"
40 #include "mthca_cmd.h"
41 #include "mthca_memfree.h"
42 #include "mthca_wqe.h"
43
44 enum {
45         MTHCA_MAX_DIRECT_SRQ_SIZE = 4 * PAGE_SIZE
46 };
47
48 struct mthca_tavor_srq_context {
49         __be64 wqe_base_ds;     /* low 6 bits is descriptor size */
50         __be32 state_pd;
51         __be32 lkey;
52         __be32 uar;
53         __be16 limit_watermark;
54         __be16 wqe_cnt;
55         u32    reserved[2];
56 };
57
58 struct mthca_arbel_srq_context {
59         __be32 state_logsize_srqn;
60         __be32 lkey;
61         __be32 db_index;
62         __be32 logstride_usrpage;
63         __be64 wqe_base;
64         __be32 eq_pd;
65         __be16 limit_watermark;
66         __be16 wqe_cnt;
67         u16    reserved1;
68         __be16 wqe_counter;
69         u32    reserved2[3];
70 };
71
72 static void *get_wqe(struct mthca_srq *srq, int n)
73 {
74         if (srq->is_direct)
75                 return srq->queue.direct.buf + (n << srq->wqe_shift);
76         else
77                 return srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].buf +
78                         ((n << srq->wqe_shift) & (PAGE_SIZE - 1));
79 }
80
81 /*
82  * Return a pointer to the location within a WQE that we're using as a
83  * link when the WQE is in the free list.  We use the imm field
84  * because in the Tavor case, posting a WQE may overwrite the next
85  * segment of the previous WQE, but a receive WQE will never touch the
86  * imm field.  This avoids corrupting our free list if the previous
87  * WQE has already completed and been put on the free list when we
88  * post the next WQE.
89  */
90 static inline int *wqe_to_link(void *wqe)
91 {
92         return (int *) (wqe + offsetof(struct mthca_next_seg, imm));
93 }
94
95 static void mthca_tavor_init_srq_context(struct mthca_dev *dev,
96                                          struct mthca_pd *pd,
97                                          struct mthca_srq *srq,
98                                          struct mthca_tavor_srq_context *context)
99 {
100         memset(context, 0, sizeof *context);
101
102         context->wqe_base_ds = cpu_to_be64(1 << (srq->wqe_shift - 4));
103         context->state_pd    = cpu_to_be32(pd->pd_num);
104         context->lkey        = cpu_to_be32(srq->mr.ibmr.lkey);
105
106         if (pd->ibpd.uobject)
107                 context->uar =
108                         cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index);
109         else
110                 context->uar = cpu_to_be32(dev->driver_uar.index);
111 }
112
113 static void mthca_arbel_init_srq_context(struct mthca_dev *dev,
114                                          struct mthca_pd *pd,
115                                          struct mthca_srq *srq,
116                                          struct mthca_arbel_srq_context *context)
117 {
118         int logsize;
119
120         memset(context, 0, sizeof *context);
121         logsize = ilog2(srq->max);
122         context->state_logsize_srqn = cpu_to_be32(logsize << 24 | srq->srqn);
123         context->lkey = cpu_to_be32(srq->mr.ibmr.lkey);
124         context->db_index = cpu_to_be32(srq->db_index);
125         context->logstride_usrpage = cpu_to_be32((srq->wqe_shift - 4) << 29);
126         if (pd->ibpd.uobject)
127                 context->logstride_usrpage |=
128                         cpu_to_be32(to_mucontext(pd->ibpd.uobject->context)->uar.index);
129         else
130                 context->logstride_usrpage |= cpu_to_be32(dev->driver_uar.index);
131         context->eq_pd = cpu_to_be32(MTHCA_EQ_ASYNC << 24 | pd->pd_num);
132 }
133
134 static void mthca_free_srq_buf(struct mthca_dev *dev, struct mthca_srq *srq)
135 {
136         mthca_buf_free(dev, srq->max << srq->wqe_shift, &srq->queue,
137                        srq->is_direct, &srq->mr);
138         kfree(srq->wrid);
139 }
140
141 static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd,
142                                struct mthca_srq *srq)
143 {
144         struct mthca_data_seg *scatter;
145         void *wqe;
146         int err;
147         int i;
148
149         if (pd->ibpd.uobject)
150                 return 0;
151
152         srq->wrid = kmalloc(srq->max * sizeof (u64), GFP_KERNEL);
153         if (!srq->wrid)
154                 return -ENOMEM;
155
156         err = mthca_buf_alloc(dev, srq->max << srq->wqe_shift,
157                               MTHCA_MAX_DIRECT_SRQ_SIZE,
158                               &srq->queue, &srq->is_direct, pd, 1, &srq->mr);
159         if (err) {
160                 kfree(srq->wrid);
161                 return err;
162         }
163
164         /*
165          * Now initialize the SRQ buffer so that all of the WQEs are
166          * linked into the list of free WQEs.  In addition, set the
167          * scatter list L_Keys to the sentry value of 0x100.
168          */
169         for (i = 0; i < srq->max; ++i) {
170                 struct mthca_next_seg *next;
171
172                 next = wqe = get_wqe(srq, i);
173
174                 if (i < srq->max - 1) {
175                         *wqe_to_link(wqe) = i + 1;
176                         next->nda_op = htonl(((i + 1) << srq->wqe_shift) | 1);
177                 } else {
178                         *wqe_to_link(wqe) = -1;
179                         next->nda_op = 0;
180                 }
181
182                 for (scatter = wqe + sizeof (struct mthca_next_seg);
183                      (void *) scatter < wqe + (1 << srq->wqe_shift);
184                      ++scatter)
185                         scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
186         }
187
188         srq->last = get_wqe(srq, srq->max - 1);
189
190         return 0;
191 }
192
193 int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd,
194                     struct ib_srq_attr *attr, struct mthca_srq *srq)
195 {
196         struct mthca_mailbox *mailbox;
197         int ds;
198         int err;
199
200         /* Sanity check SRQ size before proceeding */
201         if (attr->max_wr  > dev->limits.max_srq_wqes ||
202             attr->max_sge > dev->limits.max_srq_sge)
203                 return -EINVAL;
204
205         srq->max      = attr->max_wr;
206         srq->max_gs   = attr->max_sge;
207         srq->counter  = 0;
208
209         if (mthca_is_memfree(dev))
210                 srq->max = roundup_pow_of_two(srq->max + 1);
211         else
212                 srq->max = srq->max + 1;
213
214         ds = max(64UL,
215                  roundup_pow_of_two(sizeof (struct mthca_next_seg) +
216                                     srq->max_gs * sizeof (struct mthca_data_seg)));
217
218         if (!mthca_is_memfree(dev) && (ds > dev->limits.max_desc_sz))
219                 return -EINVAL;
220
221         srq->wqe_shift = ilog2(ds);
222
223         srq->srqn = mthca_alloc(&dev->srq_table.alloc);
224         if (srq->srqn == -1)
225                 return -ENOMEM;
226
227         if (mthca_is_memfree(dev)) {
228                 err = mthca_table_get(dev, dev->srq_table.table, srq->srqn);
229                 if (err)
230                         goto err_out;
231
232                 if (!pd->ibpd.uobject) {
233                         srq->db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SRQ,
234                                                        srq->srqn, &srq->db);
235                         if (srq->db_index < 0) {
236                                 err = -ENOMEM;
237                                 goto err_out_icm;
238                         }
239                 }
240         }
241
242         mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
243         if (IS_ERR(mailbox)) {
244                 err = PTR_ERR(mailbox);
245                 goto err_out_db;
246         }
247
248         err = mthca_alloc_srq_buf(dev, pd, srq);
249         if (err)
250                 goto err_out_mailbox;
251
252         spin_lock_init(&srq->lock);
253         srq->refcount = 1;
254         init_waitqueue_head(&srq->wait);
255         mutex_init(&srq->mutex);
256
257         if (mthca_is_memfree(dev))
258                 mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf);
259         else
260                 mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf);
261
262         err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn);
263
264         if (err) {
265                 mthca_warn(dev, "SW2HW_SRQ failed (%d)\n", err);
266                 goto err_out_free_buf;
267         }
268
269         spin_lock_irq(&dev->srq_table.lock);
270         if (mthca_array_set(&dev->srq_table.srq,
271                             srq->srqn & (dev->limits.num_srqs - 1),
272                             srq)) {
273                 spin_unlock_irq(&dev->srq_table.lock);
274                 goto err_out_free_srq;
275         }
276         spin_unlock_irq(&dev->srq_table.lock);
277
278         mthca_free_mailbox(dev, mailbox);
279
280         srq->first_free = 0;
281         srq->last_free  = srq->max - 1;
282
283         attr->max_wr    = srq->max - 1;
284         attr->max_sge   = srq->max_gs;
285
286         return 0;
287
288 err_out_free_srq:
289         err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn);
290         if (err)
291                 mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
292
293 err_out_free_buf:
294         if (!pd->ibpd.uobject)
295                 mthca_free_srq_buf(dev, srq);
296
297 err_out_mailbox:
298         mthca_free_mailbox(dev, mailbox);
299
300 err_out_db:
301         if (!pd->ibpd.uobject && mthca_is_memfree(dev))
302                 mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
303
304 err_out_icm:
305         mthca_table_put(dev, dev->srq_table.table, srq->srqn);
306
307 err_out:
308         mthca_free(&dev->srq_table.alloc, srq->srqn);
309
310         return err;
311 }
312
313 static inline int get_srq_refcount(struct mthca_dev *dev, struct mthca_srq *srq)
314 {
315         int c;
316
317         spin_lock_irq(&dev->srq_table.lock);
318         c = srq->refcount;
319         spin_unlock_irq(&dev->srq_table.lock);
320
321         return c;
322 }
323
324 void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq)
325 {
326         struct mthca_mailbox *mailbox;
327         int err;
328
329         mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
330         if (IS_ERR(mailbox)) {
331                 mthca_warn(dev, "No memory for mailbox to free SRQ.\n");
332                 return;
333         }
334
335         err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn);
336         if (err)
337                 mthca_warn(dev, "HW2SW_SRQ failed (%d)\n", err);
338
339         spin_lock_irq(&dev->srq_table.lock);
340         mthca_array_clear(&dev->srq_table.srq,
341                           srq->srqn & (dev->limits.num_srqs - 1));
342         --srq->refcount;
343         spin_unlock_irq(&dev->srq_table.lock);
344
345         wait_event(srq->wait, !get_srq_refcount(dev, srq));
346
347         if (!srq->ibsrq.uobject) {
348                 mthca_free_srq_buf(dev, srq);
349                 if (mthca_is_memfree(dev))
350                         mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index);
351         }
352
353         mthca_table_put(dev, dev->srq_table.table, srq->srqn);
354         mthca_free(&dev->srq_table.alloc, srq->srqn);
355         mthca_free_mailbox(dev, mailbox);
356 }
357
358 int mthca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
359                      enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
360 {
361         struct mthca_dev *dev = to_mdev(ibsrq->device);
362         struct mthca_srq *srq = to_msrq(ibsrq);
363         int ret = 0;
364
365         /* We don't support resizing SRQs (yet?) */
366         if (attr_mask & IB_SRQ_MAX_WR)
367                 return -EINVAL;
368
369         if (attr_mask & IB_SRQ_LIMIT) {
370                 u32 max_wr = mthca_is_memfree(dev) ? srq->max - 1 : srq->max;
371                 if (attr->srq_limit > max_wr)
372                         return -EINVAL;
373
374                 mutex_lock(&srq->mutex);
375                 ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit);
376                 mutex_unlock(&srq->mutex);
377         }
378
379         return ret;
380 }
381
382 int mthca_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
383 {
384         struct mthca_dev *dev = to_mdev(ibsrq->device);
385         struct mthca_srq *srq = to_msrq(ibsrq);
386         struct mthca_mailbox *mailbox;
387         struct mthca_arbel_srq_context *arbel_ctx;
388         struct mthca_tavor_srq_context *tavor_ctx;
389         int err;
390
391         mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
392         if (IS_ERR(mailbox))
393                 return PTR_ERR(mailbox);
394
395         err = mthca_QUERY_SRQ(dev, srq->srqn, mailbox);
396         if (err)
397                 goto out;
398
399         if (mthca_is_memfree(dev)) {
400                 arbel_ctx = mailbox->buf;
401                 srq_attr->srq_limit = be16_to_cpu(arbel_ctx->limit_watermark);
402         } else {
403                 tavor_ctx = mailbox->buf;
404                 srq_attr->srq_limit = be16_to_cpu(tavor_ctx->limit_watermark);
405         }
406
407         srq_attr->max_wr  = srq->max - 1;
408         srq_attr->max_sge = srq->max_gs;
409
410 out:
411         mthca_free_mailbox(dev, mailbox);
412
413         return err;
414 }
415
416 void mthca_srq_event(struct mthca_dev *dev, u32 srqn,
417                      enum ib_event_type event_type)
418 {
419         struct mthca_srq *srq;
420         struct ib_event event;
421
422         spin_lock(&dev->srq_table.lock);
423         srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1));
424         if (srq)
425                 ++srq->refcount;
426         spin_unlock(&dev->srq_table.lock);
427
428         if (!srq) {
429                 mthca_warn(dev, "Async event for bogus SRQ %08x\n", srqn);
430                 return;
431         }
432
433         if (!srq->ibsrq.event_handler)
434                 goto out;
435
436         event.device      = &dev->ib_dev;
437         event.event       = event_type;
438         event.element.srq = &srq->ibsrq;
439         srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context);
440
441 out:
442         spin_lock(&dev->srq_table.lock);
443         if (!--srq->refcount)
444                 wake_up(&srq->wait);
445         spin_unlock(&dev->srq_table.lock);
446 }
447
448 /*
449  * This function must be called with IRQs disabled.
450  */
451 void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr)
452 {
453         int ind;
454         struct mthca_next_seg *last_free;
455
456         ind = wqe_addr >> srq->wqe_shift;
457
458         spin_lock(&srq->lock);
459
460         last_free = get_wqe(srq, srq->last_free);
461         *wqe_to_link(last_free) = ind;
462         last_free->nda_op = htonl((ind << srq->wqe_shift) | 1);
463         *wqe_to_link(get_wqe(srq, ind)) = -1;
464         srq->last_free = ind;
465
466         spin_unlock(&srq->lock);
467 }
468
469 int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
470                               struct ib_recv_wr **bad_wr)
471 {
472         struct mthca_dev *dev = to_mdev(ibsrq->device);
473         struct mthca_srq *srq = to_msrq(ibsrq);
474         unsigned long flags;
475         int err = 0;
476         int first_ind;
477         int ind;
478         int next_ind;
479         int nreq;
480         int i;
481         void *wqe;
482         void *prev_wqe;
483
484         spin_lock_irqsave(&srq->lock, flags);
485
486         first_ind = srq->first_free;
487
488         for (nreq = 0; wr; wr = wr->next) {
489                 ind       = srq->first_free;
490                 wqe       = get_wqe(srq, ind);
491                 next_ind  = *wqe_to_link(wqe);
492
493                 if (unlikely(next_ind < 0)) {
494                         mthca_err(dev, "SRQ %06x full\n", srq->srqn);
495                         err = -ENOMEM;
496                         *bad_wr = wr;
497                         break;
498                 }
499
500                 prev_wqe  = srq->last;
501                 srq->last = wqe;
502
503                 ((struct mthca_next_seg *) wqe)->ee_nds = 0;
504                 /* flags field will always remain 0 */
505
506                 wqe += sizeof (struct mthca_next_seg);
507
508                 if (unlikely(wr->num_sge > srq->max_gs)) {
509                         err = -EINVAL;
510                         *bad_wr = wr;
511                         srq->last = prev_wqe;
512                         break;
513                 }
514
515                 for (i = 0; i < wr->num_sge; ++i) {
516                         mthca_set_data_seg(wqe, wr->sg_list + i);
517                         wqe += sizeof (struct mthca_data_seg);
518                 }
519
520                 if (i < srq->max_gs)
521                         mthca_set_data_seg_inval(wqe);
522
523                 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
524                         cpu_to_be32(MTHCA_NEXT_DBD);
525
526                 srq->wrid[ind]  = wr->wr_id;
527                 srq->first_free = next_ind;
528
529                 ++nreq;
530                 if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
531                         nreq = 0;
532
533                         /*
534                          * Make sure that descriptors are written
535                          * before doorbell is rung.
536                          */
537                         wmb();
538
539                         mthca_write64(first_ind << srq->wqe_shift, srq->srqn << 8,
540                                       dev->kar + MTHCA_RECEIVE_DOORBELL,
541                                       MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
542
543                         first_ind = srq->first_free;
544                 }
545         }
546
547         if (likely(nreq)) {
548                 /*
549                  * Make sure that descriptors are written before
550                  * doorbell is rung.
551                  */
552                 wmb();
553
554                 mthca_write64(first_ind << srq->wqe_shift, (srq->srqn << 8) | nreq,
555                               dev->kar + MTHCA_RECEIVE_DOORBELL,
556                               MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
557         }
558
559         /*
560          * Make sure doorbells don't leak out of SRQ spinlock and
561          * reach the HCA out of order:
562          */
563         mmiowb();
564
565         spin_unlock_irqrestore(&srq->lock, flags);
566         return err;
567 }
568
569 int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
570                               struct ib_recv_wr **bad_wr)
571 {
572         struct mthca_dev *dev = to_mdev(ibsrq->device);
573         struct mthca_srq *srq = to_msrq(ibsrq);
574         unsigned long flags;
575         int err = 0;
576         int ind;
577         int next_ind;
578         int nreq;
579         int i;
580         void *wqe;
581
582         spin_lock_irqsave(&srq->lock, flags);
583
584         for (nreq = 0; wr; ++nreq, wr = wr->next) {
585                 ind       = srq->first_free;
586                 wqe       = get_wqe(srq, ind);
587                 next_ind  = *wqe_to_link(wqe);
588
589                 if (unlikely(next_ind < 0)) {
590                         mthca_err(dev, "SRQ %06x full\n", srq->srqn);
591                         err = -ENOMEM;
592                         *bad_wr = wr;
593                         break;
594                 }
595
596                 ((struct mthca_next_seg *) wqe)->ee_nds = 0;
597                 /* flags field will always remain 0 */
598
599                 wqe += sizeof (struct mthca_next_seg);
600
601                 if (unlikely(wr->num_sge > srq->max_gs)) {
602                         err = -EINVAL;
603                         *bad_wr = wr;
604                         break;
605                 }
606
607                 for (i = 0; i < wr->num_sge; ++i) {
608                         mthca_set_data_seg(wqe, wr->sg_list + i);
609                         wqe += sizeof (struct mthca_data_seg);
610                 }
611
612                 if (i < srq->max_gs)
613                         mthca_set_data_seg_inval(wqe);
614
615                 srq->wrid[ind]  = wr->wr_id;
616                 srq->first_free = next_ind;
617         }
618
619         if (likely(nreq)) {
620                 srq->counter += nreq;
621
622                 /*
623                  * Make sure that descriptors are written before
624                  * we write doorbell record.
625                  */
626                 wmb();
627                 *srq->db = cpu_to_be32(srq->counter);
628         }
629
630         spin_unlock_irqrestore(&srq->lock, flags);
631         return err;
632 }
633
634 int mthca_max_srq_sge(struct mthca_dev *dev)
635 {
636         if (mthca_is_memfree(dev))
637                 return dev->limits.max_sg;
638
639         /*
640          * SRQ allocations are based on powers of 2 for Tavor,
641          * (although they only need to be multiples of 16 bytes).
642          *
643          * Therefore, we need to base the max number of sg entries on
644          * the largest power of 2 descriptor size that is <= to the
645          * actual max WQE descriptor size, rather than return the
646          * max_sg value given by the firmware (which is based on WQE
647          * sizes as multiples of 16, not powers of 2).
648          *
649          * If SRQ implementation is changed for Tavor to be based on
650          * multiples of 16, the calculation below can be deleted and
651          * the FW max_sg value returned.
652          */
653         return min_t(int, dev->limits.max_sg,
654                      ((1 << (fls(dev->limits.max_desc_sz) - 1)) -
655                       sizeof (struct mthca_next_seg)) /
656                      sizeof (struct mthca_data_seg));
657 }
658
659 int mthca_init_srq_table(struct mthca_dev *dev)
660 {
661         int err;
662
663         if (!(dev->mthca_flags & MTHCA_FLAG_SRQ))
664                 return 0;
665
666         spin_lock_init(&dev->srq_table.lock);
667
668         err = mthca_alloc_init(&dev->srq_table.alloc,
669                                dev->limits.num_srqs,
670                                dev->limits.num_srqs - 1,
671                                dev->limits.reserved_srqs);
672         if (err)
673                 return err;
674
675         err = mthca_array_init(&dev->srq_table.srq,
676                                dev->limits.num_srqs);
677         if (err)
678                 mthca_alloc_cleanup(&dev->srq_table.alloc);
679
680         return err;
681 }
682
683 void mthca_cleanup_srq_table(struct mthca_dev *dev)
684 {
685         if (!(dev->mthca_flags & MTHCA_FLAG_SRQ))
686                 return;
687
688         mthca_array_cleanup(&dev->srq_table.srq, dev->limits.num_srqs);
689         mthca_alloc_cleanup(&dev->srq_table.alloc);
690 }