2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0
4 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
5 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
6 * Copyright (c) 2004 Intel Corporation. All rights reserved.
7 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
8 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
9 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
10 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
12 * This software is available to you under a choice of one of two
13 * licenses. You may choose to be licensed under the terms of the GNU
14 * General Public License (GPL) Version 2, available from the file
15 * COPYING in the main directory of this source tree, or the
16 * OpenIB.org BSD license below:
18 * Redistribution and use in source and binary forms, with or
19 * without modification, are permitted provided that the following
22 * - Redistributions of source code must retain the above
23 * copyright notice, this list of conditions and the following
26 * - Redistributions in binary form must reproduce the above
27 * copyright notice, this list of conditions and the following
28 * disclaimer in the documentation and/or other materials
29 * provided with the distribution.
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
35 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
36 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
37 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
44 #include <linux/errno.h>
45 #include <linux/err.h>
46 #include <linux/string.h>
47 #include <linux/slab.h>
49 #include <linux/in6.h>
51 #include <rdma/ib_verbs.h>
52 #include <rdma/ib_cache.h>
53 #include <rdma/ib_addr.h>
55 #include <netinet/ip.h>
56 #include <netinet/ip6.h>
58 #include <machine/in_cksum.h>
60 #include "core_priv.h"
62 static const char * const ib_events[] = {
63 [IB_EVENT_CQ_ERR] = "CQ error",
64 [IB_EVENT_QP_FATAL] = "QP fatal error",
65 [IB_EVENT_QP_REQ_ERR] = "QP request error",
66 [IB_EVENT_QP_ACCESS_ERR] = "QP access error",
67 [IB_EVENT_COMM_EST] = "communication established",
68 [IB_EVENT_SQ_DRAINED] = "send queue drained",
69 [IB_EVENT_PATH_MIG] = "path migration successful",
70 [IB_EVENT_PATH_MIG_ERR] = "path migration error",
71 [IB_EVENT_DEVICE_FATAL] = "device fatal error",
72 [IB_EVENT_PORT_ACTIVE] = "port active",
73 [IB_EVENT_PORT_ERR] = "port error",
74 [IB_EVENT_LID_CHANGE] = "LID change",
75 [IB_EVENT_PKEY_CHANGE] = "P_key change",
76 [IB_EVENT_SM_CHANGE] = "SM change",
77 [IB_EVENT_SRQ_ERR] = "SRQ error",
78 [IB_EVENT_SRQ_LIMIT_REACHED] = "SRQ limit reached",
79 [IB_EVENT_QP_LAST_WQE_REACHED] = "last WQE reached",
80 [IB_EVENT_CLIENT_REREGISTER] = "client reregister",
81 [IB_EVENT_GID_CHANGE] = "GID changed",
84 const char *__attribute_const__ ib_event_msg(enum ib_event_type event)
88 return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ?
89 ib_events[index] : "unrecognized event";
91 EXPORT_SYMBOL(ib_event_msg);
93 static const char * const wc_statuses[] = {
94 [IB_WC_SUCCESS] = "success",
95 [IB_WC_LOC_LEN_ERR] = "local length error",
96 [IB_WC_LOC_QP_OP_ERR] = "local QP operation error",
97 [IB_WC_LOC_EEC_OP_ERR] = "local EE context operation error",
98 [IB_WC_LOC_PROT_ERR] = "local protection error",
99 [IB_WC_WR_FLUSH_ERR] = "WR flushed",
100 [IB_WC_MW_BIND_ERR] = "memory management operation error",
101 [IB_WC_BAD_RESP_ERR] = "bad response error",
102 [IB_WC_LOC_ACCESS_ERR] = "local access error",
103 [IB_WC_REM_INV_REQ_ERR] = "invalid request error",
104 [IB_WC_REM_ACCESS_ERR] = "remote access error",
105 [IB_WC_REM_OP_ERR] = "remote operation error",
106 [IB_WC_RETRY_EXC_ERR] = "transport retry counter exceeded",
107 [IB_WC_RNR_RETRY_EXC_ERR] = "RNR retry counter exceeded",
108 [IB_WC_LOC_RDD_VIOL_ERR] = "local RDD violation error",
109 [IB_WC_REM_INV_RD_REQ_ERR] = "remote invalid RD request",
110 [IB_WC_REM_ABORT_ERR] = "operation aborted",
111 [IB_WC_INV_EECN_ERR] = "invalid EE context number",
112 [IB_WC_INV_EEC_STATE_ERR] = "invalid EE context state",
113 [IB_WC_FATAL_ERR] = "fatal error",
114 [IB_WC_RESP_TIMEOUT_ERR] = "response timeout error",
115 [IB_WC_GENERAL_ERR] = "general error",
118 const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status)
120 size_t index = status;
122 return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ?
123 wc_statuses[index] : "unrecognized status";
125 EXPORT_SYMBOL(ib_wc_status_msg);
127 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate)
130 case IB_RATE_2_5_GBPS: return 1;
131 case IB_RATE_5_GBPS: return 2;
132 case IB_RATE_10_GBPS: return 4;
133 case IB_RATE_20_GBPS: return 8;
134 case IB_RATE_30_GBPS: return 12;
135 case IB_RATE_40_GBPS: return 16;
136 case IB_RATE_60_GBPS: return 24;
137 case IB_RATE_80_GBPS: return 32;
138 case IB_RATE_120_GBPS: return 48;
139 case IB_RATE_28_GBPS: return 11;
140 case IB_RATE_50_GBPS: return 20;
141 case IB_RATE_400_GBPS: return 160;
142 case IB_RATE_600_GBPS: return 240;
146 EXPORT_SYMBOL(ib_rate_to_mult);
148 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult)
151 case 1: return IB_RATE_2_5_GBPS;
152 case 2: return IB_RATE_5_GBPS;
153 case 4: return IB_RATE_10_GBPS;
154 case 8: return IB_RATE_20_GBPS;
155 case 12: return IB_RATE_30_GBPS;
156 case 16: return IB_RATE_40_GBPS;
157 case 24: return IB_RATE_60_GBPS;
158 case 32: return IB_RATE_80_GBPS;
159 case 48: return IB_RATE_120_GBPS;
160 case 6: return IB_RATE_14_GBPS;
161 case 22: return IB_RATE_56_GBPS;
162 case 45: return IB_RATE_112_GBPS;
163 case 67: return IB_RATE_168_GBPS;
164 case 10: return IB_RATE_25_GBPS;
165 case 40: return IB_RATE_100_GBPS;
166 case 80: return IB_RATE_200_GBPS;
167 case 120: return IB_RATE_300_GBPS;
168 case 11: return IB_RATE_28_GBPS;
169 case 20: return IB_RATE_50_GBPS;
170 case 160: return IB_RATE_400_GBPS;
171 case 240: return IB_RATE_600_GBPS;
172 default: return IB_RATE_PORT_CURRENT;
175 EXPORT_SYMBOL(mult_to_ib_rate);
177 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate)
180 case IB_RATE_2_5_GBPS: return 2500;
181 case IB_RATE_5_GBPS: return 5000;
182 case IB_RATE_10_GBPS: return 10000;
183 case IB_RATE_20_GBPS: return 20000;
184 case IB_RATE_30_GBPS: return 30000;
185 case IB_RATE_40_GBPS: return 40000;
186 case IB_RATE_60_GBPS: return 60000;
187 case IB_RATE_80_GBPS: return 80000;
188 case IB_RATE_120_GBPS: return 120000;
189 case IB_RATE_14_GBPS: return 14062;
190 case IB_RATE_56_GBPS: return 56250;
191 case IB_RATE_112_GBPS: return 112500;
192 case IB_RATE_168_GBPS: return 168750;
193 case IB_RATE_25_GBPS: return 25781;
194 case IB_RATE_100_GBPS: return 103125;
195 case IB_RATE_200_GBPS: return 206250;
196 case IB_RATE_300_GBPS: return 309375;
197 case IB_RATE_28_GBPS: return 28125;
198 case IB_RATE_50_GBPS: return 53125;
199 case IB_RATE_400_GBPS: return 425000;
200 case IB_RATE_600_GBPS: return 637500;
204 EXPORT_SYMBOL(ib_rate_to_mbps);
206 __attribute_const__ enum rdma_transport_type
207 rdma_node_get_transport(enum rdma_node_type node_type)
210 case RDMA_NODE_IB_CA:
211 case RDMA_NODE_IB_SWITCH:
212 case RDMA_NODE_IB_ROUTER:
213 return RDMA_TRANSPORT_IB;
215 return RDMA_TRANSPORT_IWARP;
216 case RDMA_NODE_USNIC:
217 return RDMA_TRANSPORT_USNIC;
218 case RDMA_NODE_USNIC_UDP:
219 return RDMA_TRANSPORT_USNIC_UDP;
225 EXPORT_SYMBOL(rdma_node_get_transport);
227 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num)
229 if (device->get_link_layer)
230 return device->get_link_layer(device, port_num);
232 switch (rdma_node_get_transport(device->node_type)) {
233 case RDMA_TRANSPORT_IB:
234 return IB_LINK_LAYER_INFINIBAND;
235 case RDMA_TRANSPORT_IWARP:
236 case RDMA_TRANSPORT_USNIC:
237 case RDMA_TRANSPORT_USNIC_UDP:
238 return IB_LINK_LAYER_ETHERNET;
240 return IB_LINK_LAYER_UNSPECIFIED;
243 EXPORT_SYMBOL(rdma_port_get_link_layer);
245 /* Protection domains */
248 * ib_alloc_pd - Allocates an unused protection domain.
249 * @device: The device on which to allocate the protection domain.
251 * A protection domain object provides an association between QPs, shared
252 * receive queues, address handles, memory regions, and memory windows.
254 * Every PD has a local_dma_lkey which can be used as the lkey value for local
257 struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
261 int mr_access_flags = 0;
263 pd = device->alloc_pd(device, NULL, NULL);
269 pd->__internal_mr = NULL;
270 atomic_set(&pd->usecnt, 0);
273 if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
274 pd->local_dma_lkey = device->local_dma_lkey;
276 mr_access_flags |= IB_ACCESS_LOCAL_WRITE;
278 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
279 pr_warn("%s: enabling unsafe global rkey\n", caller);
280 mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE;
283 if (mr_access_flags) {
286 mr = pd->device->get_dma_mr(pd, mr_access_flags);
292 mr->device = pd->device;
295 mr->need_inval = false;
297 pd->__internal_mr = mr;
299 if (!(device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY))
300 pd->local_dma_lkey = pd->__internal_mr->lkey;
302 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY)
303 pd->unsafe_global_rkey = pd->__internal_mr->rkey;
308 EXPORT_SYMBOL(__ib_alloc_pd);
311 * ib_dealloc_pd - Deallocates a protection domain.
312 * @pd: The protection domain to deallocate.
314 * It is an error to call this function while any resources in the pd still
315 * exist. The caller is responsible to synchronously destroy them and
316 * guarantee no new allocations will happen.
318 void ib_dealloc_pd(struct ib_pd *pd)
322 if (pd->__internal_mr) {
323 ret = pd->device->dereg_mr(pd->__internal_mr);
325 pd->__internal_mr = NULL;
328 /* uverbs manipulates usecnt with proper locking, while the kabi
329 requires the caller to guarantee we can't race here. */
330 WARN_ON(atomic_read(&pd->usecnt));
332 /* Making delalloc_pd a void return is a WIP, no driver should return
334 ret = pd->device->dealloc_pd(pd);
335 WARN_ONCE(ret, "Infiniband HW driver failed dealloc_pd");
337 EXPORT_SYMBOL(ib_dealloc_pd);
339 /* Address handles */
341 struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr)
345 ah = pd->device->create_ah(pd, ah_attr, NULL);
348 ah->device = pd->device;
351 atomic_inc(&pd->usecnt);
356 EXPORT_SYMBOL(ib_create_ah);
358 static int ib_get_header_version(const union rdma_network_hdr *hdr)
360 const struct ip *ip4h = (const struct ip *)&hdr->roce4grh;
361 struct ip ip4h_checked;
362 const struct ip6_hdr *ip6h = (const struct ip6_hdr *)&hdr->ibgrh;
364 /* If it's IPv6, the version must be 6, otherwise, the first
365 * 20 bytes (before the IPv4 header) are garbled.
367 if ((ip6h->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION)
368 return (ip4h->ip_v == 4) ? 4 : 0;
369 /* version may be 6 or 4 because the first 20 bytes could be garbled */
371 /* RoCE v2 requires no options, thus header length
374 if (ip4h->ip_hl != 5)
378 * We can't write on scattered buffers so we need to copy to
381 memcpy(&ip4h_checked, ip4h, sizeof(ip4h_checked));
382 ip4h_checked.ip_sum = 0;
383 #if defined(INET) || defined(INET6)
384 ip4h_checked.ip_sum = in_cksum_hdr(&ip4h_checked);
386 /* if IPv4 header checksum is OK, believe it */
387 if (ip4h->ip_sum == ip4h_checked.ip_sum)
392 static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device,
394 const struct ib_grh *grh)
398 if (rdma_protocol_ib(device, port_num))
399 return RDMA_NETWORK_IB;
401 grh_version = ib_get_header_version((const union rdma_network_hdr *)grh);
403 if (grh_version == 4)
404 return RDMA_NETWORK_IPV4;
406 if (grh->next_hdr == IPPROTO_UDP)
407 return RDMA_NETWORK_IPV6;
409 return RDMA_NETWORK_ROCE_V1;
412 struct find_gid_index_context {
414 enum ib_gid_type gid_type;
417 static bool find_gid_index(const union ib_gid *gid,
418 const struct ib_gid_attr *gid_attr,
421 struct find_gid_index_context *ctx =
422 (struct find_gid_index_context *)context;
424 if (ctx->gid_type != gid_attr->gid_type)
426 if (rdma_vlan_dev_vlan_id(gid_attr->ndev) != ctx->vlan_id)
431 static int get_sgid_index_from_eth(struct ib_device *device, u8 port_num,
432 u16 vlan_id, const union ib_gid *sgid,
433 enum ib_gid_type gid_type,
436 struct find_gid_index_context context = {.vlan_id = vlan_id,
437 .gid_type = gid_type};
439 return ib_find_gid_by_filter(device, sgid, port_num, find_gid_index,
440 &context, gid_index);
443 static int get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
444 enum rdma_network_type net_type,
445 union ib_gid *sgid, union ib_gid *dgid)
447 struct sockaddr_in src_in;
448 struct sockaddr_in dst_in;
449 __be32 src_saddr, dst_saddr;
454 if (net_type == RDMA_NETWORK_IPV4) {
455 memcpy(&src_in.sin_addr.s_addr,
456 &hdr->roce4grh.ip_src, 4);
457 memcpy(&dst_in.sin_addr.s_addr,
458 &hdr->roce4grh.ip_dst, 4);
459 src_saddr = src_in.sin_addr.s_addr;
460 dst_saddr = dst_in.sin_addr.s_addr;
461 ipv6_addr_set_v4mapped(src_saddr,
462 (struct in6_addr *)sgid);
463 ipv6_addr_set_v4mapped(dst_saddr,
464 (struct in6_addr *)dgid);
466 } else if (net_type == RDMA_NETWORK_IPV6 ||
467 net_type == RDMA_NETWORK_IB) {
468 *dgid = hdr->ibgrh.dgid;
469 *sgid = hdr->ibgrh.sgid;
476 int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
477 const struct ib_wc *wc, const struct ib_grh *grh,
478 struct ib_ah_attr *ah_attr)
483 enum rdma_network_type net_type = RDMA_NETWORK_IB;
484 enum ib_gid_type gid_type = IB_GID_TYPE_IB;
489 memset(ah_attr, 0, sizeof *ah_attr);
490 if (rdma_cap_eth_ah(device, port_num)) {
491 if (wc->wc_flags & IB_WC_WITH_NETWORK_HDR_TYPE)
492 net_type = wc->network_hdr_type;
494 net_type = ib_get_net_type_by_grh(device, port_num, grh);
495 gid_type = ib_network_to_gid_type(net_type);
497 ret = get_gids_from_rdma_hdr((const union rdma_network_hdr *)grh, net_type,
502 if (rdma_protocol_roce(device, port_num)) {
503 struct ib_gid_attr dgid_attr;
504 const u16 vlan_id = (wc->wc_flags & IB_WC_WITH_VLAN) ?
505 wc->vlan_id : 0xffff;
507 if (!(wc->wc_flags & IB_WC_GRH))
510 ret = get_sgid_index_from_eth(device, port_num, vlan_id,
511 &dgid, gid_type, &gid_index);
515 ret = ib_get_cached_gid(device, port_num, gid_index, &dgid, &dgid_attr);
519 if (dgid_attr.ndev == NULL)
522 ret = rdma_addr_find_l2_eth_by_grh(&dgid, &sgid, ah_attr->dmac,
523 dgid_attr.ndev, &hoplimit);
525 dev_put(dgid_attr.ndev);
530 ah_attr->dlid = wc->slid;
531 ah_attr->sl = wc->sl;
532 ah_attr->src_path_bits = wc->dlid_path_bits;
533 ah_attr->port_num = port_num;
535 if (wc->wc_flags & IB_WC_GRH) {
536 ah_attr->ah_flags = IB_AH_GRH;
537 ah_attr->grh.dgid = sgid;
539 if (!rdma_cap_eth_ah(device, port_num)) {
540 if (dgid.global.interface_id != cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) {
541 ret = ib_find_cached_gid_by_port(device, &dgid,
550 ah_attr->grh.sgid_index = (u8) gid_index;
551 flow_class = be32_to_cpu(grh->version_tclass_flow);
552 ah_attr->grh.flow_label = flow_class & 0xFFFFF;
553 ah_attr->grh.hop_limit = hoplimit;
554 ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF;
558 EXPORT_SYMBOL(ib_init_ah_from_wc);
560 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
561 const struct ib_grh *grh, u8 port_num)
563 struct ib_ah_attr ah_attr;
566 ret = ib_init_ah_from_wc(pd->device, port_num, wc, grh, &ah_attr);
570 return ib_create_ah(pd, &ah_attr);
572 EXPORT_SYMBOL(ib_create_ah_from_wc);
574 int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
576 return ah->device->modify_ah ?
577 ah->device->modify_ah(ah, ah_attr) :
580 EXPORT_SYMBOL(ib_modify_ah);
582 int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr)
584 return ah->device->query_ah ?
585 ah->device->query_ah(ah, ah_attr) :
588 EXPORT_SYMBOL(ib_query_ah);
590 int ib_destroy_ah(struct ib_ah *ah)
596 ret = ah->device->destroy_ah(ah);
598 atomic_dec(&pd->usecnt);
602 EXPORT_SYMBOL(ib_destroy_ah);
604 /* Shared receive queues */
606 struct ib_srq *ib_create_srq(struct ib_pd *pd,
607 struct ib_srq_init_attr *srq_init_attr)
611 if (!pd->device->create_srq)
612 return ERR_PTR(-ENOSYS);
614 srq = pd->device->create_srq(pd, srq_init_attr, NULL);
617 srq->device = pd->device;
620 srq->event_handler = srq_init_attr->event_handler;
621 srq->srq_context = srq_init_attr->srq_context;
622 srq->srq_type = srq_init_attr->srq_type;
623 if (srq->srq_type == IB_SRQT_XRC) {
624 srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
625 srq->ext.xrc.cq = srq_init_attr->ext.xrc.cq;
626 atomic_inc(&srq->ext.xrc.xrcd->usecnt);
627 atomic_inc(&srq->ext.xrc.cq->usecnt);
629 atomic_inc(&pd->usecnt);
630 atomic_set(&srq->usecnt, 0);
635 EXPORT_SYMBOL(ib_create_srq);
637 int ib_modify_srq(struct ib_srq *srq,
638 struct ib_srq_attr *srq_attr,
639 enum ib_srq_attr_mask srq_attr_mask)
641 return srq->device->modify_srq ?
642 srq->device->modify_srq(srq, srq_attr, srq_attr_mask, NULL) :
645 EXPORT_SYMBOL(ib_modify_srq);
647 int ib_query_srq(struct ib_srq *srq,
648 struct ib_srq_attr *srq_attr)
650 return srq->device->query_srq ?
651 srq->device->query_srq(srq, srq_attr) : -ENOSYS;
653 EXPORT_SYMBOL(ib_query_srq);
655 int ib_destroy_srq(struct ib_srq *srq)
658 enum ib_srq_type srq_type;
659 struct ib_xrcd *uninitialized_var(xrcd);
660 struct ib_cq *uninitialized_var(cq);
663 if (atomic_read(&srq->usecnt))
667 srq_type = srq->srq_type;
668 if (srq_type == IB_SRQT_XRC) {
669 xrcd = srq->ext.xrc.xrcd;
670 cq = srq->ext.xrc.cq;
673 ret = srq->device->destroy_srq(srq);
675 atomic_dec(&pd->usecnt);
676 if (srq_type == IB_SRQT_XRC) {
677 atomic_dec(&xrcd->usecnt);
678 atomic_dec(&cq->usecnt);
684 EXPORT_SYMBOL(ib_destroy_srq);
688 static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
690 struct ib_qp *qp = context;
693 spin_lock_irqsave(&qp->device->event_handler_lock, flags);
694 list_for_each_entry(event->element.qp, &qp->open_list, open_list)
695 if (event->element.qp->event_handler)
696 event->element.qp->event_handler(event, event->element.qp->qp_context);
697 spin_unlock_irqrestore(&qp->device->event_handler_lock, flags);
700 static void __ib_insert_xrcd_qp(struct ib_xrcd *xrcd, struct ib_qp *qp)
702 mutex_lock(&xrcd->tgt_qp_mutex);
703 list_add(&qp->xrcd_list, &xrcd->tgt_qp_list);
704 mutex_unlock(&xrcd->tgt_qp_mutex);
707 static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
708 void (*event_handler)(struct ib_event *, void *),
714 qp = kzalloc(sizeof *qp, GFP_KERNEL);
716 return ERR_PTR(-ENOMEM);
718 qp->real_qp = real_qp;
719 atomic_inc(&real_qp->usecnt);
720 qp->device = real_qp->device;
721 qp->event_handler = event_handler;
722 qp->qp_context = qp_context;
723 qp->qp_num = real_qp->qp_num;
724 qp->qp_type = real_qp->qp_type;
726 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
727 list_add(&qp->open_list, &real_qp->open_list);
728 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
733 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
734 struct ib_qp_open_attr *qp_open_attr)
736 struct ib_qp *qp, *real_qp;
738 if (qp_open_attr->qp_type != IB_QPT_XRC_TGT)
739 return ERR_PTR(-EINVAL);
741 qp = ERR_PTR(-EINVAL);
742 mutex_lock(&xrcd->tgt_qp_mutex);
743 list_for_each_entry(real_qp, &xrcd->tgt_qp_list, xrcd_list) {
744 if (real_qp->qp_num == qp_open_attr->qp_num) {
745 qp = __ib_open_qp(real_qp, qp_open_attr->event_handler,
746 qp_open_attr->qp_context);
750 mutex_unlock(&xrcd->tgt_qp_mutex);
753 EXPORT_SYMBOL(ib_open_qp);
755 static struct ib_qp *ib_create_xrc_qp(struct ib_qp *qp,
756 struct ib_qp_init_attr *qp_init_attr)
758 struct ib_qp *real_qp = qp;
760 qp->event_handler = __ib_shared_qp_event_handler;
763 qp->send_cq = qp->recv_cq = NULL;
765 qp->xrcd = qp_init_attr->xrcd;
766 atomic_inc(&qp_init_attr->xrcd->usecnt);
767 INIT_LIST_HEAD(&qp->open_list);
769 qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
770 qp_init_attr->qp_context);
772 __ib_insert_xrcd_qp(qp_init_attr->xrcd, real_qp);
774 real_qp->device->destroy_qp(real_qp);
778 struct ib_qp *ib_create_qp(struct ib_pd *pd,
779 struct ib_qp_init_attr *qp_init_attr)
781 struct ib_device *device = pd ? pd->device : qp_init_attr->xrcd->device;
784 if (qp_init_attr->rwq_ind_tbl &&
785 (qp_init_attr->recv_cq ||
786 qp_init_attr->srq || qp_init_attr->cap.max_recv_wr ||
787 qp_init_attr->cap.max_recv_sge))
788 return ERR_PTR(-EINVAL);
790 qp = device->create_qp(pd, qp_init_attr, NULL);
797 qp->qp_type = qp_init_attr->qp_type;
798 qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl;
800 atomic_set(&qp->usecnt, 0);
801 spin_lock_init(&qp->mr_lock);
803 if (qp_init_attr->qp_type == IB_QPT_XRC_TGT)
804 return ib_create_xrc_qp(qp, qp_init_attr);
806 qp->event_handler = qp_init_attr->event_handler;
807 qp->qp_context = qp_init_attr->qp_context;
808 if (qp_init_attr->qp_type == IB_QPT_XRC_INI) {
812 qp->recv_cq = qp_init_attr->recv_cq;
813 if (qp_init_attr->recv_cq)
814 atomic_inc(&qp_init_attr->recv_cq->usecnt);
815 qp->srq = qp_init_attr->srq;
817 atomic_inc(&qp_init_attr->srq->usecnt);
821 qp->send_cq = qp_init_attr->send_cq;
824 atomic_inc(&pd->usecnt);
825 if (qp_init_attr->send_cq)
826 atomic_inc(&qp_init_attr->send_cq->usecnt);
827 if (qp_init_attr->rwq_ind_tbl)
828 atomic_inc(&qp->rwq_ind_tbl->usecnt);
831 * Note: all hw drivers guarantee that max_send_sge is lower than
832 * the device RDMA WRITE SGE limit but not all hw drivers ensure that
833 * max_send_sge <= max_sge_rd.
835 qp->max_write_sge = qp_init_attr->cap.max_send_sge;
836 qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge,
837 device->attrs.max_sge_rd);
841 EXPORT_SYMBOL(ib_create_qp);
843 static const struct {
845 enum ib_qp_attr_mask req_param[IB_QPT_MAX];
846 enum ib_qp_attr_mask opt_param[IB_QPT_MAX];
847 } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
849 [IB_QPS_RESET] = { .valid = 1 },
853 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
856 [IB_QPT_RAW_PACKET] = IB_QP_PORT,
857 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
860 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
863 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
866 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
869 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
871 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
877 [IB_QPS_RESET] = { .valid = 1 },
878 [IB_QPS_ERR] = { .valid = 1 },
882 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
885 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
888 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
891 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
894 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
897 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
899 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
906 [IB_QPT_UC] = (IB_QP_AV |
910 [IB_QPT_RC] = (IB_QP_AV |
914 IB_QP_MAX_DEST_RD_ATOMIC |
915 IB_QP_MIN_RNR_TIMER),
916 [IB_QPT_XRC_INI] = (IB_QP_AV |
920 [IB_QPT_XRC_TGT] = (IB_QP_AV |
924 IB_QP_MAX_DEST_RD_ATOMIC |
925 IB_QP_MIN_RNR_TIMER),
928 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
930 [IB_QPT_UC] = (IB_QP_ALT_PATH |
933 [IB_QPT_RC] = (IB_QP_ALT_PATH |
936 [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH |
939 [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH |
942 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
944 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
950 [IB_QPS_RESET] = { .valid = 1 },
951 [IB_QPS_ERR] = { .valid = 1 },
955 [IB_QPT_UD] = IB_QP_SQ_PSN,
956 [IB_QPT_UC] = IB_QP_SQ_PSN,
957 [IB_QPT_RC] = (IB_QP_TIMEOUT |
961 IB_QP_MAX_QP_RD_ATOMIC),
962 [IB_QPT_XRC_INI] = (IB_QP_TIMEOUT |
966 IB_QP_MAX_QP_RD_ATOMIC),
967 [IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT |
969 [IB_QPT_SMI] = IB_QP_SQ_PSN,
970 [IB_QPT_GSI] = IB_QP_SQ_PSN,
973 [IB_QPT_UD] = (IB_QP_CUR_STATE |
975 [IB_QPT_UC] = (IB_QP_CUR_STATE |
978 IB_QP_PATH_MIG_STATE),
979 [IB_QPT_RC] = (IB_QP_CUR_STATE |
982 IB_QP_MIN_RNR_TIMER |
983 IB_QP_PATH_MIG_STATE),
984 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
987 IB_QP_PATH_MIG_STATE),
988 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
991 IB_QP_MIN_RNR_TIMER |
992 IB_QP_PATH_MIG_STATE),
993 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
995 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1001 [IB_QPS_RESET] = { .valid = 1 },
1002 [IB_QPS_ERR] = { .valid = 1 },
1006 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1008 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1009 IB_QP_ACCESS_FLAGS |
1011 IB_QP_PATH_MIG_STATE),
1012 [IB_QPT_RC] = (IB_QP_CUR_STATE |
1013 IB_QP_ACCESS_FLAGS |
1015 IB_QP_PATH_MIG_STATE |
1016 IB_QP_MIN_RNR_TIMER),
1017 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
1018 IB_QP_ACCESS_FLAGS |
1020 IB_QP_PATH_MIG_STATE),
1021 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
1022 IB_QP_ACCESS_FLAGS |
1024 IB_QP_PATH_MIG_STATE |
1025 IB_QP_MIN_RNR_TIMER),
1026 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1028 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1035 [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1036 [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1037 [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1038 [IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1039 [IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */
1040 [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1041 [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
1046 [IB_QPS_RESET] = { .valid = 1 },
1047 [IB_QPS_ERR] = { .valid = 1 },
1051 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1053 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1055 IB_QP_ACCESS_FLAGS |
1056 IB_QP_PATH_MIG_STATE),
1057 [IB_QPT_RC] = (IB_QP_CUR_STATE |
1059 IB_QP_ACCESS_FLAGS |
1060 IB_QP_MIN_RNR_TIMER |
1061 IB_QP_PATH_MIG_STATE),
1062 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
1064 IB_QP_ACCESS_FLAGS |
1065 IB_QP_PATH_MIG_STATE),
1066 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
1068 IB_QP_ACCESS_FLAGS |
1069 IB_QP_MIN_RNR_TIMER |
1070 IB_QP_PATH_MIG_STATE),
1071 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1073 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1080 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
1082 [IB_QPT_UC] = (IB_QP_AV |
1084 IB_QP_ACCESS_FLAGS |
1086 IB_QP_PATH_MIG_STATE),
1087 [IB_QPT_RC] = (IB_QP_PORT |
1092 IB_QP_MAX_QP_RD_ATOMIC |
1093 IB_QP_MAX_DEST_RD_ATOMIC |
1095 IB_QP_ACCESS_FLAGS |
1097 IB_QP_MIN_RNR_TIMER |
1098 IB_QP_PATH_MIG_STATE),
1099 [IB_QPT_XRC_INI] = (IB_QP_PORT |
1104 IB_QP_MAX_QP_RD_ATOMIC |
1106 IB_QP_ACCESS_FLAGS |
1108 IB_QP_PATH_MIG_STATE),
1109 [IB_QPT_XRC_TGT] = (IB_QP_PORT |
1112 IB_QP_MAX_DEST_RD_ATOMIC |
1114 IB_QP_ACCESS_FLAGS |
1116 IB_QP_MIN_RNR_TIMER |
1117 IB_QP_PATH_MIG_STATE),
1118 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
1120 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
1126 [IB_QPS_RESET] = { .valid = 1 },
1127 [IB_QPS_ERR] = { .valid = 1 },
1131 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1133 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1134 IB_QP_ACCESS_FLAGS),
1135 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1137 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1143 [IB_QPS_RESET] = { .valid = 1 },
1144 [IB_QPS_ERR] = { .valid = 1 }
1148 int ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1149 enum ib_qp_type type, enum ib_qp_attr_mask mask,
1150 enum rdma_link_layer ll)
1152 enum ib_qp_attr_mask req_param, opt_param;
1154 if (cur_state < 0 || cur_state > IB_QPS_ERR ||
1155 next_state < 0 || next_state > IB_QPS_ERR)
1158 if (mask & IB_QP_CUR_STATE &&
1159 cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
1160 cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
1163 if (!qp_state_table[cur_state][next_state].valid)
1166 req_param = qp_state_table[cur_state][next_state].req_param[type];
1167 opt_param = qp_state_table[cur_state][next_state].opt_param[type];
1169 if ((mask & req_param) != req_param)
1172 if (mask & ~(req_param | opt_param | IB_QP_STATE))
1177 EXPORT_SYMBOL(ib_modify_qp_is_ok);
1179 int ib_resolve_eth_dmac(struct ib_device *device,
1180 struct ib_ah_attr *ah_attr)
1182 struct ib_gid_attr sgid_attr;
1187 if (ah_attr->port_num < rdma_start_port(device) ||
1188 ah_attr->port_num > rdma_end_port(device))
1191 if (!rdma_cap_eth_ah(device, ah_attr->port_num))
1194 if (rdma_is_multicast_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) {
1195 if (ipv6_addr_v4mapped((struct in6_addr *)ah_attr->grh.dgid.raw)) {
1198 memcpy(&addr, ah_attr->grh.dgid.raw + 12, 4);
1199 ip_eth_mc_map(addr, (char *)ah_attr->dmac);
1201 ipv6_eth_mc_map((struct in6_addr *)ah_attr->grh.dgid.raw,
1202 (char *)ah_attr->dmac);
1207 ret = ib_query_gid(device,
1209 ah_attr->grh.sgid_index,
1213 if (!sgid_attr.ndev)
1216 ret = rdma_addr_find_l2_eth_by_grh(&sgid,
1219 sgid_attr.ndev, &hop_limit);
1220 dev_put(sgid_attr.ndev);
1222 ah_attr->grh.hop_limit = hop_limit;
1225 EXPORT_SYMBOL(ib_resolve_eth_dmac);
1228 int ib_modify_qp(struct ib_qp *qp,
1229 struct ib_qp_attr *qp_attr,
1232 if (qp_attr_mask & IB_QP_AV) {
1235 ret = ib_resolve_eth_dmac(qp->device, &qp_attr->ah_attr);
1240 return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
1242 EXPORT_SYMBOL(ib_modify_qp);
1244 int ib_query_qp(struct ib_qp *qp,
1245 struct ib_qp_attr *qp_attr,
1247 struct ib_qp_init_attr *qp_init_attr)
1249 return qp->device->query_qp ?
1250 qp->device->query_qp(qp->real_qp, qp_attr, qp_attr_mask, qp_init_attr) :
1253 EXPORT_SYMBOL(ib_query_qp);
1255 int ib_close_qp(struct ib_qp *qp)
1257 struct ib_qp *real_qp;
1258 unsigned long flags;
1260 real_qp = qp->real_qp;
1264 spin_lock_irqsave(&real_qp->device->event_handler_lock, flags);
1265 list_del(&qp->open_list);
1266 spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags);
1268 atomic_dec(&real_qp->usecnt);
1273 EXPORT_SYMBOL(ib_close_qp);
1275 static int __ib_destroy_shared_qp(struct ib_qp *qp)
1277 struct ib_xrcd *xrcd;
1278 struct ib_qp *real_qp;
1281 real_qp = qp->real_qp;
1282 xrcd = real_qp->xrcd;
1284 mutex_lock(&xrcd->tgt_qp_mutex);
1286 if (atomic_read(&real_qp->usecnt) == 0)
1287 list_del(&real_qp->xrcd_list);
1290 mutex_unlock(&xrcd->tgt_qp_mutex);
1293 ret = ib_destroy_qp(real_qp);
1295 atomic_dec(&xrcd->usecnt);
1297 __ib_insert_xrcd_qp(xrcd, real_qp);
1303 int ib_destroy_qp(struct ib_qp *qp)
1306 struct ib_cq *scq, *rcq;
1308 struct ib_rwq_ind_table *ind_tbl;
1311 if (atomic_read(&qp->usecnt))
1314 if (qp->real_qp != qp)
1315 return __ib_destroy_shared_qp(qp);
1321 ind_tbl = qp->rwq_ind_tbl;
1323 ret = qp->device->destroy_qp(qp);
1326 atomic_dec(&pd->usecnt);
1328 atomic_dec(&scq->usecnt);
1330 atomic_dec(&rcq->usecnt);
1332 atomic_dec(&srq->usecnt);
1334 atomic_dec(&ind_tbl->usecnt);
1339 EXPORT_SYMBOL(ib_destroy_qp);
1341 /* Completion queues */
1343 struct ib_cq *ib_create_cq(struct ib_device *device,
1344 ib_comp_handler comp_handler,
1345 void (*event_handler)(struct ib_event *, void *),
1347 const struct ib_cq_init_attr *cq_attr)
1351 cq = device->create_cq(device, cq_attr, NULL, NULL);
1354 cq->device = device;
1356 cq->comp_handler = comp_handler;
1357 cq->event_handler = event_handler;
1358 cq->cq_context = cq_context;
1359 atomic_set(&cq->usecnt, 0);
1364 EXPORT_SYMBOL(ib_create_cq);
1366 int ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
1368 return cq->device->modify_cq ?
1369 cq->device->modify_cq(cq, cq_count, cq_period) : -ENOSYS;
1371 EXPORT_SYMBOL(ib_modify_cq);
1373 int ib_destroy_cq(struct ib_cq *cq)
1375 if (atomic_read(&cq->usecnt))
1378 return cq->device->destroy_cq(cq);
1380 EXPORT_SYMBOL(ib_destroy_cq);
1382 int ib_resize_cq(struct ib_cq *cq, int cqe)
1384 return cq->device->resize_cq ?
1385 cq->device->resize_cq(cq, cqe, NULL) : -ENOSYS;
1387 EXPORT_SYMBOL(ib_resize_cq);
1389 /* Memory regions */
1391 int ib_dereg_mr(struct ib_mr *mr)
1393 struct ib_pd *pd = mr->pd;
1396 ret = mr->device->dereg_mr(mr);
1398 atomic_dec(&pd->usecnt);
1402 EXPORT_SYMBOL(ib_dereg_mr);
1405 * ib_alloc_mr() - Allocates a memory region
1406 * @pd: protection domain associated with the region
1407 * @mr_type: memory region type
1408 * @max_num_sg: maximum sg entries available for registration.
1411 * Memory registeration page/sg lists must not exceed max_num_sg.
1412 * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed
1413 * max_num_sg * used_page_size.
1416 struct ib_mr *ib_alloc_mr(struct ib_pd *pd,
1417 enum ib_mr_type mr_type,
1422 if (!pd->device->alloc_mr)
1423 return ERR_PTR(-ENOSYS);
1425 mr = pd->device->alloc_mr(pd, mr_type, max_num_sg);
1427 mr->device = pd->device;
1430 atomic_inc(&pd->usecnt);
1431 mr->need_inval = false;
1436 EXPORT_SYMBOL(ib_alloc_mr);
1438 /* "Fast" memory regions */
1440 struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
1441 int mr_access_flags,
1442 struct ib_fmr_attr *fmr_attr)
1446 if (!pd->device->alloc_fmr)
1447 return ERR_PTR(-ENOSYS);
1449 fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr);
1451 fmr->device = pd->device;
1453 atomic_inc(&pd->usecnt);
1458 EXPORT_SYMBOL(ib_alloc_fmr);
1460 int ib_unmap_fmr(struct list_head *fmr_list)
1464 if (list_empty(fmr_list))
1467 fmr = list_entry(fmr_list->next, struct ib_fmr, list);
1468 return fmr->device->unmap_fmr(fmr_list);
1470 EXPORT_SYMBOL(ib_unmap_fmr);
1472 int ib_dealloc_fmr(struct ib_fmr *fmr)
1478 ret = fmr->device->dealloc_fmr(fmr);
1480 atomic_dec(&pd->usecnt);
1484 EXPORT_SYMBOL(ib_dealloc_fmr);
1486 /* Multicast groups */
1488 static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
1490 struct ib_qp_init_attr init_attr = {};
1491 struct ib_qp_attr attr = {};
1492 int num_eth_ports = 0;
1495 /* If QP state >= init, it is assigned to a port and we can check this
1498 if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) {
1499 if (attr.qp_state >= IB_QPS_INIT) {
1500 if (rdma_port_get_link_layer(qp->device, attr.port_num) !=
1501 IB_LINK_LAYER_INFINIBAND)
1507 /* Can't get a quick answer, iterate over all ports */
1508 for (port = 0; port < qp->device->phys_port_cnt; port++)
1509 if (rdma_port_get_link_layer(qp->device, port) !=
1510 IB_LINK_LAYER_INFINIBAND)
1513 /* If we have at lease one Ethernet port, RoCE annex declares that
1514 * multicast LID should be ignored. We can't tell at this step if the
1515 * QP belongs to an IB or Ethernet port.
1520 /* If all the ports are IB, we can check according to IB spec. */
1522 return !(lid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
1523 lid == be16_to_cpu(IB_LID_PERMISSIVE));
1526 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1530 if (!qp->device->attach_mcast)
1533 if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
1534 qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
1537 ret = qp->device->attach_mcast(qp, gid, lid);
1539 atomic_inc(&qp->usecnt);
1542 EXPORT_SYMBOL(ib_attach_mcast);
1544 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
1548 if (!qp->device->detach_mcast)
1551 if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
1552 qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
1555 ret = qp->device->detach_mcast(qp, gid, lid);
1557 atomic_dec(&qp->usecnt);
1560 EXPORT_SYMBOL(ib_detach_mcast);
1562 struct ib_xrcd *ib_alloc_xrcd(struct ib_device *device)
1564 struct ib_xrcd *xrcd;
1566 if (!device->alloc_xrcd)
1567 return ERR_PTR(-ENOSYS);
1569 xrcd = device->alloc_xrcd(device, NULL, NULL);
1570 if (!IS_ERR(xrcd)) {
1571 xrcd->device = device;
1573 atomic_set(&xrcd->usecnt, 0);
1574 mutex_init(&xrcd->tgt_qp_mutex);
1575 INIT_LIST_HEAD(&xrcd->tgt_qp_list);
1580 EXPORT_SYMBOL(ib_alloc_xrcd);
1582 int ib_dealloc_xrcd(struct ib_xrcd *xrcd)
1587 if (atomic_read(&xrcd->usecnt))
1590 while (!list_empty(&xrcd->tgt_qp_list)) {
1591 qp = list_entry(xrcd->tgt_qp_list.next, struct ib_qp, xrcd_list);
1592 ret = ib_destroy_qp(qp);
1597 return xrcd->device->dealloc_xrcd(xrcd);
1599 EXPORT_SYMBOL(ib_dealloc_xrcd);
1602 * ib_create_wq - Creates a WQ associated with the specified protection
1604 * @pd: The protection domain associated with the WQ.
1605 * @wq_init_attr: A list of initial attributes required to create the
1606 * WQ. If WQ creation succeeds, then the attributes are updated to
1607 * the actual capabilities of the created WQ.
1609 * wq_init_attr->max_wr and wq_init_attr->max_sge determine
1610 * the requested size of the WQ, and set to the actual values allocated
1612 * If ib_create_wq() succeeds, then max_wr and max_sge will always be
1613 * at least as large as the requested values.
1615 struct ib_wq *ib_create_wq(struct ib_pd *pd,
1616 struct ib_wq_init_attr *wq_attr)
1620 if (!pd->device->create_wq)
1621 return ERR_PTR(-ENOSYS);
1623 wq = pd->device->create_wq(pd, wq_attr, NULL);
1625 wq->event_handler = wq_attr->event_handler;
1626 wq->wq_context = wq_attr->wq_context;
1627 wq->wq_type = wq_attr->wq_type;
1628 wq->cq = wq_attr->cq;
1629 wq->device = pd->device;
1632 atomic_inc(&pd->usecnt);
1633 atomic_inc(&wq_attr->cq->usecnt);
1634 atomic_set(&wq->usecnt, 0);
1638 EXPORT_SYMBOL(ib_create_wq);
1641 * ib_destroy_wq - Destroys the specified WQ.
1642 * @wq: The WQ to destroy.
1644 int ib_destroy_wq(struct ib_wq *wq)
1647 struct ib_cq *cq = wq->cq;
1648 struct ib_pd *pd = wq->pd;
1650 if (atomic_read(&wq->usecnt))
1653 err = wq->device->destroy_wq(wq);
1655 atomic_dec(&pd->usecnt);
1656 atomic_dec(&cq->usecnt);
1660 EXPORT_SYMBOL(ib_destroy_wq);
1663 * ib_modify_wq - Modifies the specified WQ.
1664 * @wq: The WQ to modify.
1665 * @wq_attr: On input, specifies the WQ attributes to modify.
1666 * @wq_attr_mask: A bit-mask used to specify which attributes of the WQ
1667 * are being modified.
1668 * On output, the current values of selected WQ attributes are returned.
1670 int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
1675 if (!wq->device->modify_wq)
1678 err = wq->device->modify_wq(wq, wq_attr, wq_attr_mask, NULL);
1681 EXPORT_SYMBOL(ib_modify_wq);
1684 * ib_create_rwq_ind_table - Creates a RQ Indirection Table.
1685 * @device: The device on which to create the rwq indirection table.
1686 * @ib_rwq_ind_table_init_attr: A list of initial attributes required to
1687 * create the Indirection Table.
1689 * Note: The life time of ib_rwq_ind_table_init_attr->ind_tbl is not less
1690 * than the created ib_rwq_ind_table object and the caller is responsible
1691 * for its memory allocation/free.
1693 struct ib_rwq_ind_table *ib_create_rwq_ind_table(struct ib_device *device,
1694 struct ib_rwq_ind_table_init_attr *init_attr)
1696 struct ib_rwq_ind_table *rwq_ind_table;
1700 if (!device->create_rwq_ind_table)
1701 return ERR_PTR(-ENOSYS);
1703 table_size = (1 << init_attr->log_ind_tbl_size);
1704 rwq_ind_table = device->create_rwq_ind_table(device,
1706 if (IS_ERR(rwq_ind_table))
1707 return rwq_ind_table;
1709 rwq_ind_table->ind_tbl = init_attr->ind_tbl;
1710 rwq_ind_table->log_ind_tbl_size = init_attr->log_ind_tbl_size;
1711 rwq_ind_table->device = device;
1712 rwq_ind_table->uobject = NULL;
1713 atomic_set(&rwq_ind_table->usecnt, 0);
1715 for (i = 0; i < table_size; i++)
1716 atomic_inc(&rwq_ind_table->ind_tbl[i]->usecnt);
1718 return rwq_ind_table;
1720 EXPORT_SYMBOL(ib_create_rwq_ind_table);
1723 * ib_destroy_rwq_ind_table - Destroys the specified Indirection Table.
1724 * @wq_ind_table: The Indirection Table to destroy.
1726 int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table)
1729 u32 table_size = (1 << rwq_ind_table->log_ind_tbl_size);
1730 struct ib_wq **ind_tbl = rwq_ind_table->ind_tbl;
1732 if (atomic_read(&rwq_ind_table->usecnt))
1735 err = rwq_ind_table->device->destroy_rwq_ind_table(rwq_ind_table);
1737 for (i = 0; i < table_size; i++)
1738 atomic_dec(&ind_tbl[i]->usecnt);
1743 EXPORT_SYMBOL(ib_destroy_rwq_ind_table);
1745 struct ib_flow *ib_create_flow(struct ib_qp *qp,
1746 struct ib_flow_attr *flow_attr,
1749 struct ib_flow *flow_id;
1750 if (!qp->device->create_flow)
1751 return ERR_PTR(-ENOSYS);
1753 flow_id = qp->device->create_flow(qp, flow_attr, domain);
1754 if (!IS_ERR(flow_id))
1755 atomic_inc(&qp->usecnt);
1758 EXPORT_SYMBOL(ib_create_flow);
1760 int ib_destroy_flow(struct ib_flow *flow_id)
1763 struct ib_qp *qp = flow_id->qp;
1765 err = qp->device->destroy_flow(flow_id);
1767 atomic_dec(&qp->usecnt);
1770 EXPORT_SYMBOL(ib_destroy_flow);
1772 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
1773 struct ib_mr_status *mr_status)
1775 return mr->device->check_mr_status ?
1776 mr->device->check_mr_status(mr, check_mask, mr_status) : -ENOSYS;
1778 EXPORT_SYMBOL(ib_check_mr_status);
1780 int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
1783 if (!device->set_vf_link_state)
1786 return device->set_vf_link_state(device, vf, port, state);
1788 EXPORT_SYMBOL(ib_set_vf_link_state);
1790 int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
1791 struct ifla_vf_info *info)
1793 if (!device->get_vf_config)
1796 return device->get_vf_config(device, vf, port, info);
1798 EXPORT_SYMBOL(ib_get_vf_config);
1800 int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
1801 struct ifla_vf_stats *stats)
1803 if (!device->get_vf_stats)
1806 return device->get_vf_stats(device, vf, port, stats);
1808 EXPORT_SYMBOL(ib_get_vf_stats);
1810 int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
1813 if (!device->set_vf_guid)
1816 return device->set_vf_guid(device, vf, port, guid, type);
1818 EXPORT_SYMBOL(ib_set_vf_guid);
1821 * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list
1822 * and set it the memory region.
1823 * @mr: memory region
1824 * @sg: dma mapped scatterlist
1825 * @sg_nents: number of entries in sg
1826 * @sg_offset: offset in bytes into sg
1827 * @page_size: page vector desired page size
1830 * - The first sg element is allowed to have an offset.
1831 * - Each sg element must either be aligned to page_size or virtually
1832 * contiguous to the previous element. In case an sg element has a
1833 * non-contiguous offset, the mapping prefix will not include it.
1834 * - The last sg element is allowed to have length less than page_size.
1835 * - If sg_nents total byte length exceeds the mr max_num_sge * page_size
1836 * then only max_num_sg entries will be mapped.
1837 * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS, none of these
1838 * constraints holds and the page_size argument is ignored.
1840 * Returns the number of sg elements that were mapped to the memory region.
1842 * After this completes successfully, the memory region
1843 * is ready for registration.
1845 int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
1846 unsigned int *sg_offset, unsigned int page_size)
1848 if (unlikely(!mr->device->map_mr_sg))
1851 mr->page_size = page_size;
1853 return mr->device->map_mr_sg(mr, sg, sg_nents, sg_offset);
1855 EXPORT_SYMBOL(ib_map_mr_sg);
1858 * ib_sg_to_pages() - Convert the largest prefix of a sg list
1860 * @mr: memory region
1861 * @sgl: dma mapped scatterlist
1862 * @sg_nents: number of entries in sg
1863 * @sg_offset_p: IN: start offset in bytes into sg
1864 * OUT: offset in bytes for element n of the sg of the first
1865 * byte that has not been processed where n is the return
1866 * value of this function.
1867 * @set_page: driver page assignment function pointer
1869 * Core service helper for drivers to convert the largest
1870 * prefix of given sg list to a page vector. The sg list
1871 * prefix converted is the prefix that meet the requirements
1874 * Returns the number of sg elements that were assigned to
1877 int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
1878 unsigned int *sg_offset_p, int (*set_page)(struct ib_mr *, u64))
1880 struct scatterlist *sg;
1881 u64 last_end_dma_addr = 0;
1882 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
1883 unsigned int last_page_off = 0;
1884 u64 page_mask = ~((u64)mr->page_size - 1);
1887 if (unlikely(sg_nents <= 0 || sg_offset > sg_dma_len(&sgl[0])))
1890 mr->iova = sg_dma_address(&sgl[0]) + sg_offset;
1893 for_each_sg(sgl, sg, sg_nents, i) {
1894 u64 dma_addr = sg_dma_address(sg) + sg_offset;
1895 u64 prev_addr = dma_addr;
1896 unsigned int dma_len = sg_dma_len(sg) - sg_offset;
1897 u64 end_dma_addr = dma_addr + dma_len;
1898 u64 page_addr = dma_addr & page_mask;
1901 * For the second and later elements, check whether either the
1902 * end of element i-1 or the start of element i is not aligned
1903 * on a page boundary.
1905 if (i && (last_page_off != 0 || page_addr != dma_addr)) {
1906 /* Stop mapping if there is a gap. */
1907 if (last_end_dma_addr != dma_addr)
1911 * Coalesce this element with the last. If it is small
1912 * enough just update mr->length. Otherwise start
1913 * mapping from the next page.
1919 ret = set_page(mr, page_addr);
1920 if (unlikely(ret < 0)) {
1921 sg_offset = prev_addr - sg_dma_address(sg);
1922 mr->length += prev_addr - dma_addr;
1924 *sg_offset_p = sg_offset;
1925 return i || sg_offset ? i : ret;
1927 prev_addr = page_addr;
1929 page_addr += mr->page_size;
1930 } while (page_addr < end_dma_addr);
1932 mr->length += dma_len;
1933 last_end_dma_addr = end_dma_addr;
1934 last_page_off = end_dma_addr & ~page_mask;
1943 EXPORT_SYMBOL(ib_sg_to_pages);
1945 struct ib_drain_cqe {
1947 struct completion done;
1950 static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
1952 struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe,
1955 complete(&cqe->done);
1959 * Post a WR and block until its completion is reaped for the SQ.
1961 static void __ib_drain_sq(struct ib_qp *qp)
1963 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
1964 struct ib_drain_cqe sdrain;
1965 struct ib_send_wr *bad_swr;
1966 struct ib_rdma_wr swr = {
1968 .opcode = IB_WR_RDMA_WRITE,
1969 .wr_cqe = &sdrain.cqe,
1974 if (qp->send_cq->poll_ctx == IB_POLL_DIRECT) {
1975 WARN_ONCE(qp->send_cq->poll_ctx == IB_POLL_DIRECT,
1976 "IB_POLL_DIRECT poll_ctx not supported for drain\n");
1980 sdrain.cqe.done = ib_drain_qp_done;
1981 init_completion(&sdrain.done);
1983 ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
1985 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
1989 ret = ib_post_send(qp, &swr.wr, &bad_swr);
1991 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
1995 wait_for_completion(&sdrain.done);
1999 * Post a WR and block until its completion is reaped for the RQ.
2001 static void __ib_drain_rq(struct ib_qp *qp)
2003 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
2004 struct ib_drain_cqe rdrain;
2005 struct ib_recv_wr rwr = {}, *bad_rwr;
2008 if (qp->recv_cq->poll_ctx == IB_POLL_DIRECT) {
2009 WARN_ONCE(qp->recv_cq->poll_ctx == IB_POLL_DIRECT,
2010 "IB_POLL_DIRECT poll_ctx not supported for drain\n");
2014 rwr.wr_cqe = &rdrain.cqe;
2015 rdrain.cqe.done = ib_drain_qp_done;
2016 init_completion(&rdrain.done);
2018 ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
2020 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
2024 ret = ib_post_recv(qp, &rwr, &bad_rwr);
2026 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
2030 wait_for_completion(&rdrain.done);
2034 * ib_drain_sq() - Block until all SQ CQEs have been consumed by the
2036 * @qp: queue pair to drain
2038 * If the device has a provider-specific drain function, then
2039 * call that. Otherwise call the generic drain function
2044 * ensure there is room in the CQ and SQ for the drain work request and
2047 * allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be
2050 * ensure that there are no other contexts that are posting WRs concurrently.
2051 * Otherwise the drain is not guaranteed.
2053 void ib_drain_sq(struct ib_qp *qp)
2055 if (qp->device->drain_sq)
2056 qp->device->drain_sq(qp);
2060 EXPORT_SYMBOL(ib_drain_sq);
2063 * ib_drain_rq() - Block until all RQ CQEs have been consumed by the
2065 * @qp: queue pair to drain
2067 * If the device has a provider-specific drain function, then
2068 * call that. Otherwise call the generic drain function
2073 * ensure there is room in the CQ and RQ for the drain work request and
2076 * allocate the CQ using ib_alloc_cq() and the CQ poll context cannot be
2079 * ensure that there are no other contexts that are posting WRs concurrently.
2080 * Otherwise the drain is not guaranteed.
2082 void ib_drain_rq(struct ib_qp *qp)
2084 if (qp->device->drain_rq)
2085 qp->device->drain_rq(qp);
2089 EXPORT_SYMBOL(ib_drain_rq);
2092 * ib_drain_qp() - Block until all CQEs have been consumed by the
2093 * application on both the RQ and SQ.
2094 * @qp: queue pair to drain
2098 * ensure there is room in the CQ(s), SQ, and RQ for drain work requests
2101 * allocate the CQs using ib_alloc_cq() and the CQ poll context cannot be
2104 * ensure that there are no other contexts that are posting WRs concurrently.
2105 * Otherwise the drain is not guaranteed.
2107 void ib_drain_qp(struct ib_qp *qp)
2113 EXPORT_SYMBOL(ib_drain_qp);