2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
39 #include <linux/errno.h>
40 #include <linux/err.h>
41 #include <linux/string.h>
43 #include <rdma/ib_verbs.h>
44 #include <rdma/ib_cache.h>
46 int ib_rate_to_mult(enum ib_rate rate)
49 case IB_RATE_2_5_GBPS: return 1;
50 case IB_RATE_5_GBPS: return 2;
51 case IB_RATE_10_GBPS: return 4;
52 case IB_RATE_20_GBPS: return 8;
53 case IB_RATE_30_GBPS: return 12;
54 case IB_RATE_40_GBPS: return 16;
55 case IB_RATE_60_GBPS: return 24;
56 case IB_RATE_80_GBPS: return 32;
57 case IB_RATE_120_GBPS: return 48;
61 EXPORT_SYMBOL(ib_rate_to_mult);
63 enum ib_rate mult_to_ib_rate(int mult)
66 case 1: return IB_RATE_2_5_GBPS;
67 case 2: return IB_RATE_5_GBPS;
68 case 4: return IB_RATE_10_GBPS;
69 case 8: return IB_RATE_20_GBPS;
70 case 12: return IB_RATE_30_GBPS;
71 case 16: return IB_RATE_40_GBPS;
72 case 24: return IB_RATE_60_GBPS;
73 case 32: return IB_RATE_80_GBPS;
74 case 48: return IB_RATE_120_GBPS;
75 default: return IB_RATE_PORT_CURRENT;
78 EXPORT_SYMBOL(mult_to_ib_rate);
80 enum rdma_transport_type
81 rdma_node_get_transport(enum rdma_node_type node_type)
85 case RDMA_NODE_IB_SWITCH:
86 case RDMA_NODE_IB_ROUTER:
87 return RDMA_TRANSPORT_IB;
89 return RDMA_TRANSPORT_IWARP;
95 EXPORT_SYMBOL(rdma_node_get_transport);
97 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num)
99 if (device->get_link_layer)
100 return device->get_link_layer(device, port_num);
102 switch (rdma_node_get_transport(device->node_type)) {
103 case RDMA_TRANSPORT_IB:
104 return IB_LINK_LAYER_INFINIBAND;
105 case RDMA_TRANSPORT_IWARP:
106 return IB_LINK_LAYER_ETHERNET;
108 return IB_LINK_LAYER_UNSPECIFIED;
111 EXPORT_SYMBOL(rdma_port_get_link_layer);
113 /* Protection domains */
115 struct ib_pd *ib_alloc_pd(struct ib_device *device)
119 pd = device->alloc_pd(device, NULL, NULL);
124 atomic_set(&pd->usecnt, 0);
129 EXPORT_SYMBOL(ib_alloc_pd);
131 int ib_dealloc_pd(struct ib_pd *pd)
133 if (atomic_read(&pd->usecnt))
136 return pd->device->dealloc_pd(pd);
138 EXPORT_SYMBOL(ib_dealloc_pd);
140 /* Address handles */
142 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
146 ah = pd->device->create_ah(pd, ah_attr);
149 ah->device = pd->device;
152 atomic_inc(&pd->usecnt);
157 EXPORT_SYMBOL(ib_create_ah);
159 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
160 struct ib_grh *grh, struct ib_ah_attr *ah_attr)
166 memset(ah_attr, 0, sizeof *ah_attr);
167 ah_attr->dlid = wc->slid;
168 ah_attr->sl = wc->sl;
169 ah_attr->src_path_bits = wc->dlid_path_bits;
170 ah_attr->port_num = port_num;
172 if (wc->wc_flags & IB_WC_GRH) {
173 ah_attr->ah_flags = IB_AH_GRH;
174 ah_attr->grh.dgid = grh->sgid;
176 ret = ib_find_cached_gid(device, &grh->dgid, &port_num,
181 ah_attr->grh.sgid_index = (u8) gid_index;
182 flow_class = be32_to_cpu(grh->version_tclass_flow);
183 ah_attr->grh.flow_label = flow_class & 0xFFFFF;
184 ah_attr->grh.hop_limit = 0xFF;
185 ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF;
189 EXPORT_SYMBOL(ib_init_ah_from_wc);
191 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, struct ib_wc *wc,
192 struct ib_grh *grh, u8 port_num)
194 struct ib_ah_attr ah_attr;
197 ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr);
201 return ib_create_ah(pd, &ah_attr);
203 EXPORT_SYMBOL(ib_create_ah_from_wc);
205 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
207 return ah->device->modify_ah ?
208 ah->device->modify_ah(ah, ah_attr) :
211 EXPORT_SYMBOL(ib_modify_ah);
213 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
215 return ah->device->query_ah ?
216 ah->device->query_ah(ah, ah_attr) :
219 EXPORT_SYMBOL(ib_query_ah);
221 int ib_destroy_ah(struct ib_ah *ah)
227 ret = ah->device->destroy_ah(ah);
229 atomic_dec(&pd->usecnt);
233 EXPORT_SYMBOL(ib_destroy_ah);
235 /* Shared receive queues */
237 struct ib_srq *ib_create_srq(struct ib_pd *pd,
238 struct ib_srq_init_attr *srq_init_attr)
242 if (!pd->device->create_srq)
243 return ERR_PTR(-ENOSYS);
245 srq = pd->device->create_srq(pd, srq_init_attr, NULL);
248 srq->device = pd->device;
251 srq->event_handler = srq_init_attr->event_handler;
252 srq->srq_context = srq_init_attr->srq_context;
255 atomic_inc(&pd->usecnt);
256 atomic_set(&srq->usecnt, 0);
261 EXPORT_SYMBOL(ib_create_srq);
263 struct ib_srq *ib_create_xrc_srq(struct ib_pd *pd,
264 struct ib_cq *xrc_cq,
265 struct ib_xrcd *xrcd,
266 struct ib_srq_init_attr *srq_init_attr)
270 if (!pd->device->create_xrc_srq)
271 return ERR_PTR(-ENOSYS);
273 srq = pd->device->create_xrc_srq(pd, xrc_cq, xrcd, srq_init_attr, NULL);
276 srq->device = pd->device;
279 srq->event_handler = srq_init_attr->event_handler;
280 srq->srq_context = srq_init_attr->srq_context;
281 srq->xrc_cq = xrc_cq;
283 atomic_inc(&pd->usecnt);
284 atomic_inc(&xrcd->usecnt);
285 atomic_inc(&xrc_cq->usecnt);
286 atomic_set(&srq->usecnt, 0);
291 EXPORT_SYMBOL(ib_create_xrc_srq);
293 int ib_modify_srq(struct ib_srq *srq,
294 struct ib_srq_attr *srq_attr,
295 enum ib_srq_attr_mask srq_attr_mask)
297 return srq->device->modify_srq ?
298 srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) :
301 EXPORT_SYMBOL(ib_modify_srq);
303 int ib_query_srq(struct ib_srq *srq,
304 struct ib_srq_attr *srq_attr)
306 return srq->device->query_srq ?
307 srq->device->query_srq(srq, srq_attr) : -ENOSYS;
309 EXPORT_SYMBOL(ib_query_srq);
311 int ib_destroy_srq(struct ib_srq *srq)
314 struct ib_cq *xrc_cq;
315 struct ib_xrcd *xrcd;
318 if (atomic_read(&srq->usecnt))
322 xrc_cq = srq->xrc_cq;
325 ret = srq->device->destroy_srq(srq);
327 atomic_dec(&pd->usecnt);
329 atomic_dec(&xrc_cq->usecnt);
331 atomic_dec(&xrcd->usecnt);
336 EXPORT_SYMBOL(ib_destroy_srq);
340 struct ib_qp *ib_create_qp(struct ib_pd *pd,
341 struct ib_qp_init_attr *qp_init_attr)
345 qp = pd->device->create_qp(pd, qp_init_attr, NULL);
348 qp->device = pd->device;
350 qp->send_cq = qp_init_attr->send_cq;
351 qp->recv_cq = qp_init_attr->recv_cq;
352 qp->srq = qp_init_attr->srq;
354 qp->event_handler = qp_init_attr->event_handler;
355 qp->qp_context = qp_init_attr->qp_context;
356 qp->qp_type = qp_init_attr->qp_type;
357 qp->xrcd = qp->qp_type == IB_QPT_XRC ?
358 qp_init_attr->xrc_domain : NULL;
359 atomic_inc(&pd->usecnt);
360 atomic_inc(&qp_init_attr->send_cq->usecnt);
361 atomic_inc(&qp_init_attr->recv_cq->usecnt);
362 if (qp_init_attr->srq)
363 atomic_inc(&qp_init_attr->srq->usecnt);
364 if (qp->qp_type == IB_QPT_XRC)
365 atomic_inc(&qp->xrcd->usecnt);
370 EXPORT_SYMBOL(ib_create_qp);
372 static const struct {
374 enum ib_qp_attr_mask req_param[IB_QPT_RAW_ETH + 1];
375 enum ib_qp_attr_mask opt_param[IB_QPT_RAW_ETH + 1];
376 } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
378 [IB_QPS_RESET] = { .valid = 1 },
382 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
385 [IB_QPT_RAW_ETH] = IB_QP_PORT,
386 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
389 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
392 [IB_QPT_XRC] = (IB_QP_PKEY_INDEX |
395 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
397 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
403 [IB_QPS_RESET] = { .valid = 1 },
404 [IB_QPS_ERR] = { .valid = 1 },
408 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
411 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
414 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
417 [IB_QPT_XRC] = (IB_QP_PKEY_INDEX |
420 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
422 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
429 [IB_QPT_UC] = (IB_QP_AV |
433 [IB_QPT_RC] = (IB_QP_AV |
437 IB_QP_MAX_DEST_RD_ATOMIC |
438 IB_QP_MIN_RNR_TIMER),
439 [IB_QPT_XRC] = (IB_QP_AV |
443 IB_QP_MAX_DEST_RD_ATOMIC |
444 IB_QP_MIN_RNR_TIMER),
447 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
449 [IB_QPT_UC] = (IB_QP_ALT_PATH |
452 [IB_QPT_RC] = (IB_QP_ALT_PATH |
455 [IB_QPT_XRC] = (IB_QP_ALT_PATH |
458 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
460 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
466 [IB_QPS_RESET] = { .valid = 1 },
467 [IB_QPS_ERR] = { .valid = 1 },
471 [IB_QPT_UD] = IB_QP_SQ_PSN,
472 [IB_QPT_UC] = IB_QP_SQ_PSN,
473 [IB_QPT_RC] = (IB_QP_TIMEOUT |
477 IB_QP_MAX_QP_RD_ATOMIC),
478 [IB_QPT_XRC] = (IB_QP_TIMEOUT |
482 IB_QP_MAX_QP_RD_ATOMIC),
483 [IB_QPT_SMI] = IB_QP_SQ_PSN,
484 [IB_QPT_GSI] = IB_QP_SQ_PSN,
487 [IB_QPT_UD] = (IB_QP_CUR_STATE |
489 [IB_QPT_UC] = (IB_QP_CUR_STATE |
492 IB_QP_PATH_MIG_STATE),
493 [IB_QPT_RC] = (IB_QP_CUR_STATE |
496 IB_QP_MIN_RNR_TIMER |
497 IB_QP_PATH_MIG_STATE),
498 [IB_QPT_XRC] = (IB_QP_CUR_STATE |
501 IB_QP_MIN_RNR_TIMER |
502 IB_QP_PATH_MIG_STATE),
503 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
505 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
511 [IB_QPS_RESET] = { .valid = 1 },
512 [IB_QPS_ERR] = { .valid = 1 },
516 [IB_QPT_UD] = (IB_QP_CUR_STATE |
518 [IB_QPT_UC] = (IB_QP_CUR_STATE |
521 IB_QP_PATH_MIG_STATE),
522 [IB_QPT_RC] = (IB_QP_CUR_STATE |
525 IB_QP_PATH_MIG_STATE |
526 IB_QP_MIN_RNR_TIMER),
527 [IB_QPT_XRC] = (IB_QP_CUR_STATE |
530 IB_QP_PATH_MIG_STATE |
531 IB_QP_MIN_RNR_TIMER),
532 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
534 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
541 [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY,
542 [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
543 [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
544 [IB_QPT_XRC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
545 [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
546 [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
551 [IB_QPS_RESET] = { .valid = 1 },
552 [IB_QPS_ERR] = { .valid = 1 },
556 [IB_QPT_UD] = (IB_QP_CUR_STATE |
558 [IB_QPT_UC] = (IB_QP_CUR_STATE |
561 IB_QP_PATH_MIG_STATE),
562 [IB_QPT_RC] = (IB_QP_CUR_STATE |
565 IB_QP_MIN_RNR_TIMER |
566 IB_QP_PATH_MIG_STATE),
567 [IB_QPT_XRC] = (IB_QP_CUR_STATE |
570 IB_QP_MIN_RNR_TIMER |
571 IB_QP_PATH_MIG_STATE),
572 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
574 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
581 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
583 [IB_QPT_UC] = (IB_QP_AV |
587 IB_QP_PATH_MIG_STATE),
588 [IB_QPT_RC] = (IB_QP_PORT |
593 IB_QP_MAX_QP_RD_ATOMIC |
594 IB_QP_MAX_DEST_RD_ATOMIC |
598 IB_QP_MIN_RNR_TIMER |
599 IB_QP_PATH_MIG_STATE),
600 [IB_QPT_XRC] = (IB_QP_PORT |
605 IB_QP_MAX_QP_RD_ATOMIC |
606 IB_QP_MAX_DEST_RD_ATOMIC |
610 IB_QP_MIN_RNR_TIMER |
611 IB_QP_PATH_MIG_STATE),
612 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
614 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
620 [IB_QPS_RESET] = { .valid = 1 },
621 [IB_QPS_ERR] = { .valid = 1 },
625 [IB_QPT_UD] = (IB_QP_CUR_STATE |
627 [IB_QPT_UC] = (IB_QP_CUR_STATE |
629 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
631 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
637 [IB_QPS_RESET] = { .valid = 1 },
638 [IB_QPS_ERR] = { .valid = 1 }
642 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
643 enum ib_qp_type type, enum ib_qp_attr_mask mask)
645 enum ib_qp_attr_mask req_param, opt_param;
647 if (cur_state < 0 || cur_state > IB_QPS_ERR ||
648 next_state < 0 || next_state > IB_QPS_ERR)
651 if (mask & IB_QP_CUR_STATE &&
652 cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
653 cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
656 if (!qp_state_table[cur_state][next_state].valid)
659 req_param = qp_state_table[cur_state][next_state].req_param[type];
660 opt_param = qp_state_table[cur_state][next_state].opt_param[type];
662 if ((mask & req_param) != req_param)
665 if (mask & ~(req_param | opt_param | IB_QP_STATE))
670 EXPORT_SYMBOL(ib_modify_qp_is_ok);
672 int ib_modify_qp(struct ib_qp *qp,
673 struct ib_qp_attr *qp_attr,
676 return qp->device->modify_qp(qp, qp_attr, qp_attr_mask, NULL);
678 EXPORT_SYMBOL(ib_modify_qp);
680 int ib_query_qp(struct ib_qp *qp,
681 struct ib_qp_attr *qp_attr,
683 struct ib_qp_init_attr *qp_init_attr)
685 return qp->device->query_qp ?
686 qp->device->query_qp(qp, qp_attr, qp_attr_mask, qp_init_attr) :
689 EXPORT_SYMBOL(ib_query_qp);
691 int ib_destroy_qp(struct ib_qp *qp)
694 struct ib_cq *scq, *rcq;
696 struct ib_xrcd *xrcd;
697 enum ib_qp_type qp_type = qp->qp_type;
706 ret = qp->device->destroy_qp(qp);
708 atomic_dec(&pd->usecnt);
709 atomic_dec(&scq->usecnt);
710 atomic_dec(&rcq->usecnt);
712 atomic_dec(&srq->usecnt);
713 if (qp_type == IB_QPT_XRC)
714 atomic_dec(&xrcd->usecnt);
719 EXPORT_SYMBOL(ib_destroy_qp);
721 /* Completion queues */
723 struct ib_cq *ib_create_cq(struct ib_device *device,
724 ib_comp_handler comp_handler,
725 void (*event_handler)(struct ib_event *, void *),
726 void *cq_context, int cqe, int comp_vector)
730 cq = device->create_cq(device, cqe, comp_vector, NULL, NULL);
735 cq->comp_handler = comp_handler;
736 cq->event_handler = event_handler;
737 cq->cq_context = cq_context;
738 atomic_set(&cq->usecnt, 0);
743 EXPORT_SYMBOL(ib_create_cq);
745 int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
747 return cq->device->modify_cq ?
748 cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS;
750 EXPORT_SYMBOL(ib_modify_cq);
752 int ib_destroy_cq(struct ib_cq *cq)
754 if (atomic_read(&cq->usecnt))
757 return cq->device->destroy_cq(cq);
759 EXPORT_SYMBOL(ib_destroy_cq);
761 int ib_resize_cq(struct ib_cq *cq, int cqe)
763 return cq->device->resize_cq ?
764 cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS;
766 EXPORT_SYMBOL(ib_resize_cq);
770 struct ib_mr *ib_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
774 mr = pd->device->get_dma_mr(pd, mr_access_flags);
777 mr->device = pd->device;
780 atomic_inc(&pd->usecnt);
781 atomic_set(&mr->usecnt, 0);
786 EXPORT_SYMBOL(ib_get_dma_mr);
788 struct ib_mr *ib_reg_phys_mr(struct ib_pd *pd,
789 struct ib_phys_buf *phys_buf_array,
796 if (!pd->device->reg_phys_mr)
797 return ERR_PTR(-ENOSYS);
799 mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf,
800 mr_access_flags, iova_start);
803 mr->device = pd->device;
806 atomic_inc(&pd->usecnt);
807 atomic_set(&mr->usecnt, 0);
812 EXPORT_SYMBOL(ib_reg_phys_mr);
814 int ib_rereg_phys_mr(struct ib_mr *mr,
817 struct ib_phys_buf *phys_buf_array,
822 struct ib_pd *old_pd;
825 if (!mr->device->rereg_phys_mr)
828 if (atomic_read(&mr->usecnt))
833 ret = mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd,
834 phys_buf_array, num_phys_buf,
835 mr_access_flags, iova_start);
837 if (!ret && (mr_rereg_mask & IB_MR_REREG_PD)) {
838 atomic_dec(&old_pd->usecnt);
839 atomic_inc(&pd->usecnt);
844 EXPORT_SYMBOL(ib_rereg_phys_mr);
846 int ib_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
848 return mr->device->query_mr ?
849 mr->device->query_mr(mr, mr_attr) : -ENOSYS;
851 EXPORT_SYMBOL(ib_query_mr);
853 int ib_dereg_mr(struct ib_mr *mr)
858 if (atomic_read(&mr->usecnt))
862 ret = mr->device->dereg_mr(mr);
864 atomic_dec(&pd->usecnt);
868 EXPORT_SYMBOL(ib_dereg_mr);
870 struct ib_mr *ib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len)
874 if (!pd->device->alloc_fast_reg_mr)
875 return ERR_PTR(-ENOSYS);
877 mr = pd->device->alloc_fast_reg_mr(pd, max_page_list_len);
880 mr->device = pd->device;
883 atomic_inc(&pd->usecnt);
884 atomic_set(&mr->usecnt, 0);
889 EXPORT_SYMBOL(ib_alloc_fast_reg_mr);
891 struct ib_fast_reg_page_list *ib_alloc_fast_reg_page_list(struct ib_device *device,
892 int max_page_list_len)
894 struct ib_fast_reg_page_list *page_list;
896 if (!device->alloc_fast_reg_page_list)
897 return ERR_PTR(-ENOSYS);
899 page_list = device->alloc_fast_reg_page_list(device, max_page_list_len);
901 if (!IS_ERR(page_list)) {
902 page_list->device = device;
903 page_list->max_page_list_len = max_page_list_len;
908 EXPORT_SYMBOL(ib_alloc_fast_reg_page_list);
910 void ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
912 page_list->device->free_fast_reg_page_list(page_list);
914 EXPORT_SYMBOL(ib_free_fast_reg_page_list);
918 struct ib_mw *ib_alloc_mw(struct ib_pd *pd)
922 if (!pd->device->alloc_mw)
923 return ERR_PTR(-ENOSYS);
925 mw = pd->device->alloc_mw(pd);
927 mw->device = pd->device;
930 atomic_inc(&pd->usecnt);
935 EXPORT_SYMBOL(ib_alloc_mw);
937 int ib_dealloc_mw(struct ib_mw *mw)
943 ret = mw->device->dealloc_mw(mw);
945 atomic_dec(&pd->usecnt);
949 EXPORT_SYMBOL(ib_dealloc_mw);
951 /* "Fast" memory regions */
953 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
955 struct ib_fmr_attr *fmr_attr)
959 if (!pd->device->alloc_fmr)
960 return ERR_PTR(-ENOSYS);
962 fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
964 fmr->device = pd->device;
966 atomic_inc(&pd->usecnt);
971 EXPORT_SYMBOL(ib_alloc_fmr);
973 int ib_unmap_fmr(struct list_head *fmr_list)
977 if (list_empty(fmr_list))
980 fmr = list_entry(fmr_list->next, struct ib_fmr, list);
981 return fmr->device->unmap_fmr(fmr_list);
983 EXPORT_SYMBOL(ib_unmap_fmr);
985 int ib_dealloc_fmr(struct ib_fmr *fmr)
991 ret = fmr->device->dealloc_fmr(fmr);
993 atomic_dec(&pd->usecnt);
997 EXPORT_SYMBOL(ib_dealloc_fmr);
999 /* Multicast groups */
1001 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1003 if (!qp->device->attach_mcast)
1006 switch (rdma_node_get_transport(qp->device->node_type)) {
1007 case RDMA_TRANSPORT_IB:
1008 if (qp->qp_type == IB_QPT_RAW_ETH) {
1009 /* In raw Etherent mgids the 63 msb's should be 0 */
1010 if (gid->global.subnet_prefix & cpu_to_be64(~1ULL))
1012 } else if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
1015 case RDMA_TRANSPORT_IWARP:
1016 if (qp->qp_type != IB_QPT_RAW_ETH)
1020 return qp->device->attach_mcast(qp, gid, lid);
1022 EXPORT_SYMBOL(ib_attach_mcast);
1024 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1026 if (!qp->device->detach_mcast)
1029 switch (rdma_node_get_transport(qp->device->node_type)) {
1030 case RDMA_TRANSPORT_IB:
1031 if (qp->qp_type == IB_QPT_RAW_ETH) {
1032 /* In raw Etherent mgids the 63 msb's should be 0 */
1033 if (gid->global.subnet_prefix & cpu_to_be64(~1ULL))
1035 } else if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UD)
1038 case RDMA_TRANSPORT_IWARP:
1039 if (qp->qp_type != IB_QPT_RAW_ETH)
1043 return qp->device->detach_mcast(qp, gid, lid);
1045 EXPORT_SYMBOL(ib_detach_mcast);
1047 int ib_dealloc_xrcd(struct ib_xrcd *xrcd)
1049 if (atomic_read(&xrcd->usecnt))
1052 return xrcd->device->dealloc_xrcd(xrcd);
1054 EXPORT_SYMBOL(ib_dealloc_xrcd);
1056 struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device)
1058 struct ib_xrcd *xrcd;
1060 if (!device->alloc_xrcd)
1061 return ERR_PTR(-ENOSYS);
1063 xrcd = device->alloc_xrcd(device, NULL, NULL);
1064 if (!IS_ERR(xrcd)) {
1065 xrcd->device = device;
1067 xrcd->uobject = NULL;
1068 atomic_set(&xrcd->usecnt, 0);
1072 EXPORT_SYMBOL(ib_alloc_xrcd);