]> CyberLeo.Net >> Repos - FreeBSD/releng/9.2.git/blob - sys/contrib/rdma/rdma_verbs.c
- Copy stable/9 to releng/9.2 as part of the 9.2-RELEASE cycle.
[FreeBSD/releng/9.2.git] / sys / contrib / rdma / rdma_verbs.c
1 /*
2  * Copyright (c) 2004 Mellanox Technologies Ltd.  All rights reserved.
3  * Copyright (c) 2004 Infinicon Corporation.  All rights reserved.
4  * Copyright (c) 2004 Intel Corporation.  All rights reserved.
5  * Copyright (c) 2004 Topspin Corporation.  All rights reserved.
6  * Copyright (c) 2004 Voltaire Corporation.  All rights reserved.
7  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8  * Copyright (c) 2005, 2006 Cisco Systems.  All rights reserved.
9  *
10  * This software is available to you under a choice of one of two
11  * licenses.  You may choose to be licensed under the terms of the GNU
12  * General Public License (GPL) Version 2, available from the file
13  * COPYING in the main directory of this source tree, or the
14  * OpenIB.org BSD license below:
15  *
16  *     Redistribution and use in source and binary forms, with or
17  *     without modification, are permitted provided that the following
18  *     conditions are met:
19  *
20  *      - Redistributions of source code must retain the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer.
23  *
24  *      - Redistributions in binary form must reproduce the above
25  *        copyright notice, this list of conditions and the following
26  *        disclaimer in the documentation and/or other materials
27  *        provided with the distribution.
28  *
29  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36  * SOFTWARE.
37  *
38  * $Id: verbs.c 1349 2004-12-16 21:09:43Z roland $
39  */
40
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/libkern.h>
48 #include <sys/module.h>
49 #include <sys/endian.h>
50
51 #include <contrib/rdma/ib_verbs.h>
52 #include <contrib/rdma/ib_cache.h>
53
54 int ib_rate_to_mult(enum ib_rate rate)
55 {
56         switch (rate) {
57         case IB_RATE_2_5_GBPS: return  1;
58         case IB_RATE_5_GBPS:   return  2;
59         case IB_RATE_10_GBPS:  return  4;
60         case IB_RATE_20_GBPS:  return  8;
61         case IB_RATE_30_GBPS:  return 12;
62         case IB_RATE_40_GBPS:  return 16;
63         case IB_RATE_60_GBPS:  return 24;
64         case IB_RATE_80_GBPS:  return 32;
65         case IB_RATE_120_GBPS: return 48;
66         default:               return -1;
67         }
68 }
69
70 enum ib_rate mult_to_ib_rate(int mult)
71 {
72         switch (mult) {
73         case 1:  return IB_RATE_2_5_GBPS;
74         case 2:  return IB_RATE_5_GBPS;
75         case 4:  return IB_RATE_10_GBPS;
76         case 8:  return IB_RATE_20_GBPS;
77         case 12: return IB_RATE_30_GBPS;
78         case 16: return IB_RATE_40_GBPS;
79         case 24: return IB_RATE_60_GBPS;
80         case 32: return IB_RATE_80_GBPS;
81         case 48: return IB_RATE_120_GBPS;
82         default: return IB_RATE_PORT_CURRENT;
83         }
84 }
85
86 enum rdma_transport_type
87 rdma_node_get_transport(enum rdma_node_type node_type)
88 {
89         switch (node_type) {
90         case RDMA_NODE_IB_CA:
91         case RDMA_NODE_IB_SWITCH:
92         case RDMA_NODE_IB_ROUTER:
93                 return RDMA_TRANSPORT_IB;
94         case RDMA_NODE_RNIC:
95                 return RDMA_TRANSPORT_IWARP;
96         default:
97                 panic("bad condition");
98                 return 0;
99         }
100 }
101
102 /* Protection domains */
103
104 struct ib_pd *ib_alloc_pd(struct ib_device *device)
105 {
106         struct ib_pd *pd;
107
108         pd = device->alloc_pd(device, NULL, NULL);
109
110         if (!IS_ERR(pd)) {
111                 pd->device  = device;
112                 pd->uobject = NULL;
113                 atomic_store_rel_int(&pd->usecnt, 0);
114         }
115
116         return pd;
117 }
118
119 int ib_dealloc_pd(struct ib_pd *pd)
120 {
121         if (atomic_load_acq_int(&pd->usecnt))
122                 return (EBUSY);
123
124         return pd->device->dealloc_pd(pd);
125 }
126
127 /* Address handles */
128
129 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
130 {
131         struct ib_ah *ah;
132
133         ah = pd->device->create_ah(pd, ah_attr);
134
135         if (!IS_ERR(ah)) {
136                 ah->device  = pd->device;
137                 ah->pd      = pd;
138                 ah->uobject = NULL;
139                 atomic_add_acq_int(&pd->usecnt, 1);
140         }
141
142         return ah;
143 }
144
145 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
146                        struct ib_grh *grh, struct ib_ah_attr *ah_attr)
147 {
148         u32 flow_class;
149         u16 gid_index;
150         int ret;
151
152         memset(ah_attr, 0, sizeof *ah_attr);
153         ah_attr->dlid = wc->slid;
154         ah_attr->sl = wc->sl;
155         ah_attr->src_path_bits = wc->dlid_path_bits;
156         ah_attr->port_num = port_num;
157
158         if (wc->wc_flags & IB_WC_GRH) {
159                 ah_attr->ah_flags = IB_AH_GRH;
160                 ah_attr->grh.dgid = grh->sgid;
161
162                 ret = ib_find_cached_gid(device, &grh->dgid, &port_num,
163                                          &gid_index);
164                 if (ret)
165                         return ret;
166
167                 ah_attr->grh.sgid_index = (u8) gid_index;
168                 flow_class = be32toh(grh->version_tclass_flow);
169                 ah_attr->grh.flow_label = flow_class & 0xFFFFF;
170                 ah_attr->grh.hop_limit = 0xFF;
171                 ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF;
172         }
173         return 0;
174 }
175
176 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
177                                    struct ib_grh *grh, u8 port_num)
178 {
179         struct ib_ah_attr ah_attr;
180         int ret;
181
182         ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr);
183         if (ret)
184                 return ERR_PTR(ret);
185
186         return ib_create_ah(pd, &ah_attr);
187 }
188
189 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
190 {
191         return ah->device->modify_ah ?
192                 ah->device->modify_ah(ah, ah_attr) :
193                 ENOSYS;
194 }
195
196 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
197 {
198         return ah->device->query_ah ?
199                 ah->device->query_ah(ah, ah_attr) :
200                 ENOSYS;
201 }
202
203 int ib_destroy_ah(struct ib_ah *ah)
204 {
205         struct ib_pd *pd;
206         int ret;
207
208         pd = ah->pd;
209         ret = ah->device->destroy_ah(ah);
210         if (!ret)
211                 atomic_subtract_acq_int(&pd->usecnt, 1);
212
213         return ret;
214 }
215
216 /* Shared receive queues */
217
218 struct ib_srq *ib_create_srq(struct ib_pd *pd,
219                              struct ib_srq_init_attr *srq_init_attr)
220 {
221         struct ib_srq *srq;
222
223         if (!pd->device->create_srq)
224                 return ERR_PTR(ENOSYS);
225
226         srq = pd->device->create_srq(pd, srq_init_attr, NULL);
227
228         if (!IS_ERR(srq)) {
229                 srq->device        = pd->device;
230                 srq->pd            = pd;
231                 srq->uobject       = NULL;
232                 srq->event_handler = srq_init_attr->event_handler;
233                 srq->srq_context   = srq_init_attr->srq_context;
234                 atomic_add_acq_int(&pd->usecnt, 1);
235                 atomic_store_rel_int(&srq->usecnt, 0);
236         }
237
238         return srq;
239 }
240
241 int ib_modify_srq(struct ib_srq *srq,
242                   struct ib_srq_attr *srq_attr,
243                   enum ib_srq_attr_mask srq_attr_mask)
244 {
245         return srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL);
246 }
247
248 int ib_query_srq(struct ib_srq *srq,
249                  struct ib_srq_attr *srq_attr)
250 {
251         return srq->device->query_srq ?
252                 srq->device->query_srq(srq, srq_attr) : ENOSYS;
253 }
254
255 int ib_destroy_srq(struct ib_srq *srq)
256 {
257         struct ib_pd *pd;
258         int ret;
259
260         if (atomic_load_acq_int(&srq->usecnt))
261                 return (EBUSY);
262
263         pd = srq->pd;
264
265         ret = srq->device->destroy_srq(srq);
266         if (!ret)
267                 atomic_subtract_acq_int(&pd->usecnt, 1);
268
269         return ret;
270 }
271
272 /* Queue pairs */
273
274 struct ib_qp *ib_create_qp(struct ib_pd *pd,
275                            struct ib_qp_init_attr *qp_init_attr)
276 {
277         struct ib_qp *qp;
278
279         qp = pd->device->create_qp(pd, qp_init_attr, NULL);
280
281         if (!IS_ERR(qp)) {
282                 qp->device        = pd->device;
283                 qp->pd            = pd;
284                 qp->send_cq       = qp_init_attr->send_cq;
285                 qp->recv_cq       = qp_init_attr->recv_cq;
286                 qp->srq           = qp_init_attr->srq;
287                 qp->uobject       = NULL;
288                 qp->event_handler = qp_init_attr->event_handler;
289                 qp->qp_context    = qp_init_attr->qp_context;
290                 qp->qp_type       = qp_init_attr->qp_type;
291                 atomic_add_acq_int(&pd->usecnt, 1);
292                 atomic_add_acq_int(&qp_init_attr->send_cq->usecnt, 1);
293                 atomic_add_acq_int(&qp_init_attr->recv_cq->usecnt, 1);
294                 if (qp_init_attr->srq)
295                         atomic_add_acq_int(&qp_init_attr->srq->usecnt, 1);
296         }
297
298         return qp;
299 }
300
301 static const struct {
302         int                     valid;
303         enum ib_qp_attr_mask    req_param[IB_QPT_RAW_ETY + 1];
304         enum ib_qp_attr_mask    opt_param[IB_QPT_RAW_ETY + 1];
305 } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
306         [IB_QPS_RESET] = {
307                 [IB_QPS_RESET] = { .valid = 1 },
308                 [IB_QPS_ERR]   = { .valid = 1 },
309                 [IB_QPS_INIT]  = {
310                         .valid = 1,
311                         .req_param = {
312                                 [IB_QPT_UD]  = (IB_QP_PKEY_INDEX                |
313                                                 IB_QP_PORT                      |
314                                                 IB_QP_QKEY),
315                                 [IB_QPT_UC]  = (IB_QP_PKEY_INDEX                |
316                                                 IB_QP_PORT                      |
317                                                 IB_QP_ACCESS_FLAGS),
318                                 [IB_QPT_RC]  = (IB_QP_PKEY_INDEX                |
319                                                 IB_QP_PORT                      |
320                                                 IB_QP_ACCESS_FLAGS),
321                                 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX                |
322                                                 IB_QP_QKEY),
323                                 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX                |
324                                                 IB_QP_QKEY),
325                         }
326                 },
327         },
328         [IB_QPS_INIT]  = {
329                 [IB_QPS_RESET] = { .valid = 1 },
330                 [IB_QPS_ERR] =   { .valid = 1 },
331                 [IB_QPS_INIT]  = {
332                         .valid = 1,
333                         .opt_param = {
334                                 [IB_QPT_UD]  = (IB_QP_PKEY_INDEX                |
335                                                 IB_QP_PORT                      |
336                                                 IB_QP_QKEY),
337                                 [IB_QPT_UC]  = (IB_QP_PKEY_INDEX                |
338                                                 IB_QP_PORT                      |
339                                                 IB_QP_ACCESS_FLAGS),
340                                 [IB_QPT_RC]  = (IB_QP_PKEY_INDEX                |
341                                                 IB_QP_PORT                      |
342                                                 IB_QP_ACCESS_FLAGS),
343                                 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX                |
344                                                 IB_QP_QKEY),
345                                 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX                |
346                                                 IB_QP_QKEY),
347                         }
348                 },
349                 [IB_QPS_RTR]   = {
350                         .valid = 1,
351                         .req_param = {
352                                 [IB_QPT_UC]  = (IB_QP_AV                        |
353                                                 IB_QP_PATH_MTU                  |
354                                                 IB_QP_DEST_QPN                  |
355                                                 IB_QP_RQ_PSN),
356                                 [IB_QPT_RC]  = (IB_QP_AV                        |
357                                                 IB_QP_PATH_MTU                  |
358                                                 IB_QP_DEST_QPN                  |
359                                                 IB_QP_RQ_PSN                    |
360                                                 IB_QP_MAX_DEST_RD_ATOMIC        |
361                                                 IB_QP_MIN_RNR_TIMER),
362                         },
363                         .opt_param = {
364                                  [IB_QPT_UD]  = (IB_QP_PKEY_INDEX               |
365                                                  IB_QP_QKEY),
366                                  [IB_QPT_UC]  = (IB_QP_ALT_PATH                 |
367                                                  IB_QP_ACCESS_FLAGS             |
368                                                  IB_QP_PKEY_INDEX),
369                                  [IB_QPT_RC]  = (IB_QP_ALT_PATH                 |
370                                                  IB_QP_ACCESS_FLAGS             |
371                                                  IB_QP_PKEY_INDEX),
372                                  [IB_QPT_SMI] = (IB_QP_PKEY_INDEX               |
373                                                  IB_QP_QKEY),
374                                  [IB_QPT_GSI] = (IB_QP_PKEY_INDEX               |
375                                                  IB_QP_QKEY),
376                          }
377                 }
378         },
379         [IB_QPS_RTR]   = {
380                 [IB_QPS_RESET] = { .valid = 1 },
381                 [IB_QPS_ERR] =   { .valid = 1 },
382                 [IB_QPS_RTS]   = {
383                         .valid = 1,
384                         .req_param = {
385                                 [IB_QPT_UD]  = IB_QP_SQ_PSN,
386                                 [IB_QPT_UC]  = IB_QP_SQ_PSN,
387                                 [IB_QPT_RC]  = (IB_QP_TIMEOUT                   |
388                                                 IB_QP_RETRY_CNT                 |
389                                                 IB_QP_RNR_RETRY                 |
390                                                 IB_QP_SQ_PSN                    |
391                                                 IB_QP_MAX_QP_RD_ATOMIC),
392                                 [IB_QPT_SMI] = IB_QP_SQ_PSN,
393                                 [IB_QPT_GSI] = IB_QP_SQ_PSN,
394                         },
395                         .opt_param = {
396                                  [IB_QPT_UD]  = (IB_QP_CUR_STATE                |
397                                                  IB_QP_QKEY),
398                                  [IB_QPT_UC]  = (IB_QP_CUR_STATE                |
399                                                  IB_QP_ALT_PATH                 |
400                                                  IB_QP_ACCESS_FLAGS             |
401                                                  IB_QP_PATH_MIG_STATE),
402                                  [IB_QPT_RC]  = (IB_QP_CUR_STATE                |
403                                                  IB_QP_ALT_PATH                 |
404                                                  IB_QP_ACCESS_FLAGS             |
405                                                  IB_QP_MIN_RNR_TIMER            |
406                                                  IB_QP_PATH_MIG_STATE),
407                                  [IB_QPT_SMI] = (IB_QP_CUR_STATE                |
408                                                  IB_QP_QKEY),
409                                  [IB_QPT_GSI] = (IB_QP_CUR_STATE                |
410                                                  IB_QP_QKEY),
411                          }
412                 }
413         },
414         [IB_QPS_RTS]   = {
415                 [IB_QPS_RESET] = { .valid = 1 },
416                 [IB_QPS_ERR] =   { .valid = 1 },
417                 [IB_QPS_RTS]   = {
418                         .valid = 1,
419                         .opt_param = {
420                                 [IB_QPT_UD]  = (IB_QP_CUR_STATE                 |
421                                                 IB_QP_QKEY),
422                                 [IB_QPT_UC]  = (IB_QP_CUR_STATE                 |
423                                                 IB_QP_ACCESS_FLAGS              |
424                                                 IB_QP_ALT_PATH                  |
425                                                 IB_QP_PATH_MIG_STATE),
426                                 [IB_QPT_RC]  = (IB_QP_CUR_STATE                 |
427                                                 IB_QP_ACCESS_FLAGS              |
428                                                 IB_QP_ALT_PATH                  |
429                                                 IB_QP_PATH_MIG_STATE            |
430                                                 IB_QP_MIN_RNR_TIMER),
431                                 [IB_QPT_SMI] = (IB_QP_CUR_STATE                 |
432                                                 IB_QP_QKEY),
433                                 [IB_QPT_GSI] = (IB_QP_CUR_STATE                 |
434                                                 IB_QP_QKEY),
435                         }
436                 },
437                 [IB_QPS_SQD]   = {
438                         .valid = 1,
439                         .opt_param = {
440                                 [IB_QPT_UD]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
441                                 [IB_QPT_UC]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
442                                 [IB_QPT_RC]  = IB_QP_EN_SQD_ASYNC_NOTIFY,
443                                 [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
444                                 [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
445                         }
446                 },
447         },
448         [IB_QPS_SQD]   = {
449                 [IB_QPS_RESET] = { .valid = 1 },
450                 [IB_QPS_ERR] =   { .valid = 1 },
451                 [IB_QPS_RTS]   = {
452                         .valid = 1,
453                         .opt_param = {
454                                 [IB_QPT_UD]  = (IB_QP_CUR_STATE                 |
455                                                 IB_QP_QKEY),
456                                 [IB_QPT_UC]  = (IB_QP_CUR_STATE                 |
457                                                 IB_QP_ALT_PATH                  |
458                                                 IB_QP_ACCESS_FLAGS              |
459                                                 IB_QP_PATH_MIG_STATE),
460                                 [IB_QPT_RC]  = (IB_QP_CUR_STATE                 |
461                                                 IB_QP_ALT_PATH                  |
462                                                 IB_QP_ACCESS_FLAGS              |
463                                                 IB_QP_MIN_RNR_TIMER             |
464                                                 IB_QP_PATH_MIG_STATE),
465                                 [IB_QPT_SMI] = (IB_QP_CUR_STATE                 |
466                                                 IB_QP_QKEY),
467                                 [IB_QPT_GSI] = (IB_QP_CUR_STATE                 |
468                                                 IB_QP_QKEY),
469                         }
470                 },
471                 [IB_QPS_SQD]   = {
472                         .valid = 1,
473                         .opt_param = {
474                                 [IB_QPT_UD]  = (IB_QP_PKEY_INDEX                |
475                                                 IB_QP_QKEY),
476                                 [IB_QPT_UC]  = (IB_QP_AV                        |
477                                                 IB_QP_ALT_PATH                  |
478                                                 IB_QP_ACCESS_FLAGS              |
479                                                 IB_QP_PKEY_INDEX                |
480                                                 IB_QP_PATH_MIG_STATE),
481                                 [IB_QPT_RC]  = (IB_QP_PORT                      |
482                                                 IB_QP_AV                        |
483                                                 IB_QP_TIMEOUT                   |
484                                                 IB_QP_RETRY_CNT                 |
485                                                 IB_QP_RNR_RETRY                 |
486                                                 IB_QP_MAX_QP_RD_ATOMIC          |
487                                                 IB_QP_MAX_DEST_RD_ATOMIC        |
488                                                 IB_QP_ALT_PATH                  |
489                                                 IB_QP_ACCESS_FLAGS              |
490                                                 IB_QP_PKEY_INDEX                |
491                                                 IB_QP_MIN_RNR_TIMER             |
492                                                 IB_QP_PATH_MIG_STATE),
493                                 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX                |
494                                                 IB_QP_QKEY),
495                                 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX                |
496                                                 IB_QP_QKEY),
497                         }
498                 }
499         },
500         [IB_QPS_SQE]   = {
501                 [IB_QPS_RESET] = { .valid = 1 },
502                 [IB_QPS_ERR] =   { .valid = 1 },
503                 [IB_QPS_RTS]   = {
504                         .valid = 1,
505                         .opt_param = {
506                                 [IB_QPT_UD]  = (IB_QP_CUR_STATE                 |
507                                                 IB_QP_QKEY),
508                                 [IB_QPT_UC]  = (IB_QP_CUR_STATE                 |
509                                                 IB_QP_ACCESS_FLAGS),
510                                 [IB_QPT_SMI] = (IB_QP_CUR_STATE                 |
511                                                 IB_QP_QKEY),
512                                 [IB_QPT_GSI] = (IB_QP_CUR_STATE                 |
513                                                 IB_QP_QKEY),
514                         }
515                 }
516         },
517         [IB_QPS_ERR] = {
518                 [IB_QPS_RESET] = { .valid = 1 },
519                 [IB_QPS_ERR] =   { .valid = 1 }
520         }
521 };
522
523 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
524                        enum ib_qp_type type, enum ib_qp_attr_mask mask)
525 {
526         enum ib_qp_attr_mask req_param, opt_param;
527
528         if (cur_state  < 0 || cur_state  > IB_QPS_ERR ||
529             next_state < 0 || next_state > IB_QPS_ERR)
530                 return 0;
531
532         if (mask & IB_QP_CUR_STATE  &&
533             cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
534             cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
535                 return 0;
536
537         if (!qp_state_table[cur_state][next_state].valid)
538                 return 0;
539
540         req_param = qp_state_table[cur_state][next_state].req_param[type];
541         opt_param = qp_state_table[cur_state][next_state].opt_param[type];
542
543         if ((mask & req_param) != req_param)
544                 return 0;
545
546         if (mask & ~(req_param | opt_param | IB_QP_STATE))
547                 return 0;
548
549         return 1;
550 }
551
552 int ib_modify_qp(struct ib_qp *qp,
553                  struct ib_qp_attr *qp_attr,
554                  int qp_attr_mask)
555 {
556         return qp->device->modify_qp(qp, qp_attr, qp_attr_mask, NULL);
557 }
558
559 int ib_query_qp(struct ib_qp *qp,
560                 struct ib_qp_attr *qp_attr,
561                 int qp_attr_mask,
562                 struct ib_qp_init_attr *qp_init_attr)
563 {
564         return qp->device->query_qp ?
565                 qp->device->query_qp(qp, qp_attr, qp_attr_mask, qp_init_attr) :
566                 ENOSYS;
567 }
568
569 int ib_destroy_qp(struct ib_qp *qp)
570 {
571         struct ib_pd *pd;
572         struct ib_cq *scq, *rcq;
573         struct ib_srq *srq;
574         int ret;
575
576         pd  = qp->pd;
577         scq = qp->send_cq;
578         rcq = qp->recv_cq;
579         srq = qp->srq;
580
581         ret = qp->device->destroy_qp(qp);
582         if (!ret) {
583                 atomic_subtract_acq_int(&pd->usecnt, 1);
584                 atomic_subtract_acq_int(&scq->usecnt, 1);
585                 atomic_subtract_acq_int(&rcq->usecnt, 1);
586                 if (srq)
587                         atomic_subtract_acq_int(&srq->usecnt, 1);
588         }
589
590         return ret;
591 }
592
593 /* Completion queues */
594
595 struct ib_cq *ib_create_cq(struct ib_device *device,
596                            ib_comp_handler comp_handler,
597                            void (*event_handler)(struct ib_event *, void *),
598                            void *cq_context, int cqe, int comp_vector)
599 {
600         struct ib_cq *cq;
601
602         cq = device->create_cq(device, cqe, comp_vector, NULL, NULL);
603
604         if (!IS_ERR(cq)) {
605                 cq->device        = device;
606                 cq->uobject       = NULL;
607                 cq->comp_handler  = comp_handler;
608                 cq->event_handler = event_handler;
609                 cq->cq_context    = cq_context;
610                 atomic_store_rel_int(&cq->usecnt, 0);
611         }
612
613         return cq;
614 }
615
616 int ib_destroy_cq(struct ib_cq *cq)
617 {
618         if (atomic_load_acq_int(&cq->usecnt))
619                 return (EBUSY);
620
621         return cq->device->destroy_cq(cq);
622 }
623
624 int ib_resize_cq(struct ib_cq *cq, int cqe)
625 {
626         return cq->device->resize_cq ?
627                 cq->device->resize_cq(cq, cqe, NULL) : ENOSYS;
628 }
629
630 /* Memory regions */
631
632 struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
633 {
634         struct ib_mr *mr;
635
636         mr = pd->device->get_dma_mr(pd, mr_access_flags);
637
638         if (!IS_ERR(mr)) {
639                 mr->device  = pd->device;
640                 mr->pd      = pd;
641                 mr->uobject = NULL;
642                 atomic_add_acq_int(&pd->usecnt, 1);
643                 atomic_store_rel_int(&mr->usecnt, 0);
644         }
645
646         return mr;
647 }
648
649 struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
650                              struct ib_phys_buf *phys_buf_array,
651                              int num_phys_buf,
652                              int mr_access_flags,
653                              u64 *iova_start)
654 {
655         struct ib_mr *mr;
656
657         mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf,
658                                      mr_access_flags, iova_start);
659
660         if (!IS_ERR(mr)) {
661                 mr->device  = pd->device;
662                 mr->pd      = pd;
663                 mr->uobject = NULL;
664                 atomic_add_acq_int(&pd->usecnt, 1);
665                 atomic_store_rel_int(&mr->usecnt, 0);
666         }
667
668         return mr;
669 }
670
671 int ib_rereg_phys_mr(struct ib_mr *mr,
672                      int mr_rereg_mask,
673                      struct ib_pd *pd,
674                      struct ib_phys_buf *phys_buf_array,
675                      int num_phys_buf,
676                      int mr_access_flags,
677                      u64 *iova_start)
678 {
679         struct ib_pd *old_pd;
680         int ret;
681
682         if (!mr->device->rereg_phys_mr)
683                 return (ENOSYS);
684
685         if (atomic_load_acq_int(&mr->usecnt))
686                 return (EBUSY);
687
688         old_pd = mr->pd;
689
690         ret = mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd,
691                                         phys_buf_array, num_phys_buf,
692                                         mr_access_flags, iova_start);
693
694         if (!ret && (mr_rereg_mask & IB_MR_REREG_PD)) {
695                 atomic_subtract_acq_int(&old_pd->usecnt, 1);
696                 atomic_add_acq_int(&pd->usecnt, 1);
697         }
698
699         return ret;
700 }
701
702 int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
703 {
704         return mr->device->query_mr ?
705                 mr->device->query_mr(mr, mr_attr) : ENOSYS;
706 }
707
708 int ib_dereg_mr(struct ib_mr *mr)
709 {
710         struct ib_pd *pd;
711         int ret;
712
713         if (atomic_load_acq_int(&mr->usecnt))
714                 return (EBUSY);
715
716         pd = mr->pd;
717         ret = mr->device->dereg_mr(mr);
718         if (!ret)
719                 atomic_subtract_acq_int(&pd->usecnt, 1);
720
721         return ret;
722 }
723
724 /* Memory windows */
725
726 struct ib_mw *ib_alloc_mw(struct ib_pd *pd)
727 {
728         struct ib_mw *mw;
729
730         if (!pd->device->alloc_mw)
731                 return ERR_PTR(ENOSYS);
732
733         mw = pd->device->alloc_mw(pd);
734         if (!IS_ERR(mw)) {
735                 mw->device  = pd->device;
736                 mw->pd      = pd;
737                 mw->uobject = NULL;
738                 atomic_add_acq_int(&pd->usecnt, 1);
739         }
740
741         return mw;
742 }
743
744 int ib_dealloc_mw(struct ib_mw *mw)
745 {
746         struct ib_pd *pd;
747         int ret;
748
749         pd = mw->pd;
750         ret = mw->device->dealloc_mw(mw);
751         if (!ret)
752                 atomic_subtract_acq_int(&pd->usecnt, 1);
753
754         return ret;
755 }
756
757 /* "Fast" memory regions */
758
759 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
760                             int mr_access_flags,
761                             struct ib_fmr_attr *fmr_attr)
762 {
763         struct ib_fmr *fmr;
764
765         if (!pd->device->alloc_fmr)
766                 return ERR_PTR(ENOSYS);
767
768         fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
769         if (!IS_ERR(fmr)) {
770                 fmr->device = pd->device;
771                 fmr->pd     = pd;
772                 atomic_add_acq_int(&pd->usecnt, 1);
773         }
774
775         return fmr;
776 }
777
778 int ib_unmap_fmr(struct ib_fmr_list_head *fmr_list)
779 {
780         struct ib_fmr *fmr;
781
782         if (TAILQ_EMPTY(fmr_list))
783                 return 0;
784
785         fmr = TAILQ_FIRST(fmr_list);
786         return fmr->device->unmap_fmr(fmr_list);
787 }
788
789 int ib_dealloc_fmr(struct ib_fmr *fmr)
790 {
791         struct ib_pd *pd;
792         int ret;
793
794         pd = fmr->pd;
795         ret = fmr->device->dealloc_fmr(fmr);
796         if (!ret)
797                 atomic_subtract_acq_int(&pd->usecnt, 1);
798
799         return ret;
800 }
801
802 /* Multicast groups */
803
804 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
805 {
806         if (!qp->device->attach_mcast)
807                 return (ENOSYS);
808         if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
809                 return (EINVAL);
810
811         return qp->device->attach_mcast(qp, gid, lid);
812 }
813
814 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
815 {
816         if (!qp->device->detach_mcast)
817                 return (ENOSYS);
818         if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
819                 return (EINVAL);
820
821         return qp->device->detach_mcast(qp, gid, lid);
822 }