2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0
4 * Copyright (c) 2005 Voltaire Inc. All rights reserved.
5 * Copyright (c) 2002-2005, Network Appliance, Inc. All rights reserved.
6 * Copyright (c) 1999-2005, Mellanox Technologies, Inc. All rights reserved.
7 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
9 * This software is available to you under a choice of one of two
10 * licenses. You may choose to be licensed under the terms of the GNU
11 * General Public License (GPL) Version 2, available from the file
12 * COPYING in the main directory of this source tree, or the
13 * OpenIB.org BSD license below:
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
19 * - Redistributions of source code must retain the above
20 * copyright notice, this list of conditions and the following
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials
26 * provided with the distribution.
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
41 #define LINUXKPI_PARAM_PREFIX ibcore_
43 #include <linux/completion.h>
45 #include <linux/in6.h>
46 #include <linux/mutex.h>
47 #include <linux/random.h>
48 #include <linux/idr.h>
49 #include <linux/inetdevice.h>
50 #include <linux/slab.h>
51 #include <linux/module.h>
52 #include <net/route.h>
53 #include <net/route/nhop.h>
58 #include <netinet/in_fib.h>
60 #include <netinet6/in6_fib.h>
61 #include <netinet6/scope6_var.h>
62 #include <netinet6/ip6_var.h>
64 #include <rdma/rdma_cm.h>
65 #include <rdma/rdma_cm_ib.h>
66 #include <rdma/rdma_sdp.h>
68 #include <rdma/ib_addr.h>
69 #include <rdma/ib_cache.h>
70 #include <rdma/ib_cm.h>
71 #include <rdma/ib_sa.h>
72 #include <rdma/iw_cm.h>
76 #include "core_priv.h"
78 MODULE_AUTHOR("Sean Hefty");
79 MODULE_DESCRIPTION("Generic RDMA CM Agent");
80 MODULE_LICENSE("Dual BSD/GPL");
82 #define CMA_CM_RESPONSE_TIMEOUT 20
83 #define CMA_QUERY_CLASSPORT_INFO_TIMEOUT 3000
84 #define CMA_MAX_CM_RETRIES 15
85 #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24)
86 #define CMA_IBOE_PACKET_LIFETIME 18
88 static const char * const cma_events[] = {
89 [RDMA_CM_EVENT_ADDR_RESOLVED] = "address resolved",
90 [RDMA_CM_EVENT_ADDR_ERROR] = "address error",
91 [RDMA_CM_EVENT_ROUTE_RESOLVED] = "route resolved ",
92 [RDMA_CM_EVENT_ROUTE_ERROR] = "route error",
93 [RDMA_CM_EVENT_CONNECT_REQUEST] = "connect request",
94 [RDMA_CM_EVENT_CONNECT_RESPONSE] = "connect response",
95 [RDMA_CM_EVENT_CONNECT_ERROR] = "connect error",
96 [RDMA_CM_EVENT_UNREACHABLE] = "unreachable",
97 [RDMA_CM_EVENT_REJECTED] = "rejected",
98 [RDMA_CM_EVENT_ESTABLISHED] = "established",
99 [RDMA_CM_EVENT_DISCONNECTED] = "disconnected",
100 [RDMA_CM_EVENT_DEVICE_REMOVAL] = "device removal",
101 [RDMA_CM_EVENT_MULTICAST_JOIN] = "multicast join",
102 [RDMA_CM_EVENT_MULTICAST_ERROR] = "multicast error",
103 [RDMA_CM_EVENT_ADDR_CHANGE] = "address change",
104 [RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit",
107 const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event)
109 size_t index = event;
111 return (index < ARRAY_SIZE(cma_events) && cma_events[index]) ?
112 cma_events[index] : "unrecognized event";
114 EXPORT_SYMBOL(rdma_event_msg);
116 static int cma_check_linklocal(struct rdma_dev_addr *, struct sockaddr *);
117 static void cma_add_one(struct ib_device *device);
118 static void cma_remove_one(struct ib_device *device, void *client_data);
119 static enum rdma_port_space rdma_ps_from_service_id(__be64 service_id);
121 static struct ib_client cma_client = {
124 .remove = cma_remove_one
127 static struct ib_sa_client sa_client;
128 static struct rdma_addr_client addr_client;
129 static LIST_HEAD(dev_list);
130 static LIST_HEAD(listen_any_list);
131 static DEFINE_MUTEX(lock);
132 static struct workqueue_struct *cma_wq;
142 VNET_DEFINE(struct cma_pernet, cma_pernet);
144 static struct cma_pernet *cma_pernet_ptr(struct vnet *vnet)
146 struct cma_pernet *retval;
148 CURVNET_SET_QUIET(vnet);
149 retval = &VNET(cma_pernet);
155 static struct idr *cma_pernet_idr(struct vnet *net, enum rdma_port_space ps)
157 struct cma_pernet *pernet = cma_pernet_ptr(net);
161 return &pernet->tcp_ps;
163 return &pernet->udp_ps;
165 return &pernet->ipoib_ps;
167 return &pernet->ib_ps;
169 return &pernet->sdp_ps;
176 struct list_head list;
177 struct ib_device *device;
178 struct completion comp;
180 struct list_head id_list;
181 struct sysctl_ctx_list sysctl_ctx;
182 enum ib_gid_type *default_gid_type;
185 struct rdma_bind_list {
186 enum rdma_port_space ps;
187 struct hlist_head owners;
191 struct class_port_info_context {
192 struct ib_class_port_info *class_port_info;
193 struct ib_device *device;
194 struct completion done;
195 struct ib_sa_query *sa_query;
199 static int cma_ps_alloc(struct vnet *vnet, enum rdma_port_space ps,
200 struct rdma_bind_list *bind_list, int snum)
202 struct idr *idr = cma_pernet_idr(vnet, ps);
204 return idr_alloc(idr, bind_list, snum, snum + 1, GFP_KERNEL);
207 static struct rdma_bind_list *cma_ps_find(struct vnet *net,
208 enum rdma_port_space ps, int snum)
210 struct idr *idr = cma_pernet_idr(net, ps);
212 return idr_find(idr, snum);
215 static void cma_ps_remove(struct vnet *net, enum rdma_port_space ps, int snum)
217 struct idr *idr = cma_pernet_idr(net, ps);
219 idr_remove(idr, snum);
226 void cma_ref_dev(struct cma_device *cma_dev)
228 atomic_inc(&cma_dev->refcount);
231 struct cma_device *cma_enum_devices_by_ibdev(cma_device_filter filter,
234 struct cma_device *cma_dev;
235 struct cma_device *found_cma_dev = NULL;
239 list_for_each_entry(cma_dev, &dev_list, list)
240 if (filter(cma_dev->device, cookie)) {
241 found_cma_dev = cma_dev;
246 cma_ref_dev(found_cma_dev);
248 return found_cma_dev;
251 int cma_get_default_gid_type(struct cma_device *cma_dev,
254 if (port < rdma_start_port(cma_dev->device) ||
255 port > rdma_end_port(cma_dev->device))
258 return cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)];
261 int cma_set_default_gid_type(struct cma_device *cma_dev,
263 enum ib_gid_type default_gid_type)
265 unsigned long supported_gids;
267 if (port < rdma_start_port(cma_dev->device) ||
268 port > rdma_end_port(cma_dev->device))
271 supported_gids = roce_gid_type_mask_support(cma_dev->device, port);
273 if (!(supported_gids & 1 << default_gid_type))
276 cma_dev->default_gid_type[port - rdma_start_port(cma_dev->device)] =
282 struct ib_device *cma_get_ib_dev(struct cma_device *cma_dev)
284 return cma_dev->device;
288 * Device removal can occur at anytime, so we need extra handling to
289 * serialize notifying the user of device removal with other callbacks.
290 * We do this by disabling removal notification while a callback is in process,
291 * and reporting it after the callback completes.
293 struct rdma_id_private {
294 struct rdma_cm_id id;
296 struct rdma_bind_list *bind_list;
297 struct hlist_node node;
298 struct list_head list; /* listen_any_list or cma_device.list */
299 struct list_head listen_list; /* per device listens */
300 struct cma_device *cma_dev;
301 struct list_head mc_list;
304 enum rdma_cm_state state;
306 struct mutex qp_mutex;
308 struct completion comp;
310 struct mutex handler_mutex;
314 struct ib_sa_query *query;
330 enum ib_gid_type gid_type;
333 struct cma_multicast {
334 struct rdma_id_private *id_priv;
336 struct ib_sa_multicast *ib;
338 struct list_head list;
340 struct sockaddr_storage addr;
347 struct work_struct work;
348 struct rdma_id_private *id;
349 enum rdma_cm_state old_state;
350 enum rdma_cm_state new_state;
351 struct rdma_cm_event event;
354 struct cma_ndev_work {
355 struct work_struct work;
356 struct rdma_id_private *id;
357 struct rdma_cm_event event;
360 struct iboe_mcast_work {
361 struct work_struct work;
362 struct rdma_id_private *id;
363 struct cma_multicast *mc;
368 u8 ip_version; /* IP version: 7:4 */
370 union cma_ip_addr src_addr;
371 union cma_ip_addr dst_addr;
374 #define CMA_VERSION 0x00
375 #define SDP_MAJ_VERSION 0x2
377 struct cma_req_info {
378 struct ib_device *device;
380 union ib_gid local_gid;
386 static int cma_comp(struct rdma_id_private *id_priv, enum rdma_cm_state comp)
391 spin_lock_irqsave(&id_priv->lock, flags);
392 ret = (id_priv->state == comp);
393 spin_unlock_irqrestore(&id_priv->lock, flags);
397 static int cma_comp_exch(struct rdma_id_private *id_priv,
398 enum rdma_cm_state comp, enum rdma_cm_state exch)
403 spin_lock_irqsave(&id_priv->lock, flags);
404 if ((ret = (id_priv->state == comp)))
405 id_priv->state = exch;
406 spin_unlock_irqrestore(&id_priv->lock, flags);
410 static enum rdma_cm_state cma_exch(struct rdma_id_private *id_priv,
411 enum rdma_cm_state exch)
414 enum rdma_cm_state old;
416 spin_lock_irqsave(&id_priv->lock, flags);
417 old = id_priv->state;
418 id_priv->state = exch;
419 spin_unlock_irqrestore(&id_priv->lock, flags);
423 static inline u8 cma_get_ip_ver(const struct cma_hdr *hdr)
425 return hdr->ip_version >> 4;
428 static inline void cma_set_ip_ver(struct cma_hdr *hdr, u8 ip_ver)
430 hdr->ip_version = (ip_ver << 4) | (hdr->ip_version & 0xF);
433 static inline u8 sdp_get_majv(u8 sdp_version)
435 return sdp_version >> 4;
438 static inline u8 sdp_get_ip_ver(const struct sdp_hh *hh)
440 return hh->ipv_cap >> 4;
443 static inline void sdp_set_ip_ver(struct sdp_hh *hh, u8 ip_ver)
445 hh->ipv_cap = (ip_ver << 4) | (hh->ipv_cap & 0xF);
448 static int cma_igmp_send(struct net_device *ndev, const union ib_gid *mgid, bool join)
453 union rdma_sockaddr addr;
455 rdma_gid2ip(&addr._sockaddr, mgid);
457 CURVNET_SET_QUIET(ndev->if_vnet);
459 retval = -if_addmulti(ndev, &addr._sockaddr, NULL);
461 retval = -if_delmulti(ndev, &addr._sockaddr);
469 static void _cma_attach_to_dev(struct rdma_id_private *id_priv,
470 struct cma_device *cma_dev)
472 cma_ref_dev(cma_dev);
473 id_priv->cma_dev = cma_dev;
474 id_priv->gid_type = 0;
475 id_priv->id.device = cma_dev->device;
476 id_priv->id.route.addr.dev_addr.transport =
477 rdma_node_get_transport(cma_dev->device->node_type);
478 list_add_tail(&id_priv->list, &cma_dev->id_list);
481 static void cma_attach_to_dev(struct rdma_id_private *id_priv,
482 struct cma_device *cma_dev)
484 _cma_attach_to_dev(id_priv, cma_dev);
486 cma_dev->default_gid_type[id_priv->id.port_num -
487 rdma_start_port(cma_dev->device)];
490 void cma_deref_dev(struct cma_device *cma_dev)
492 if (atomic_dec_and_test(&cma_dev->refcount))
493 complete(&cma_dev->comp);
496 static inline void release_mc(struct kref *kref)
498 struct cma_multicast *mc = container_of(kref, struct cma_multicast, mcref);
500 kfree(mc->multicast.ib);
504 static void cma_release_dev(struct rdma_id_private *id_priv)
507 list_del(&id_priv->list);
508 cma_deref_dev(id_priv->cma_dev);
509 id_priv->cma_dev = NULL;
513 static inline struct sockaddr *cma_src_addr(struct rdma_id_private *id_priv)
515 return (struct sockaddr *) &id_priv->id.route.addr.src_addr;
518 static inline struct sockaddr *cma_dst_addr(struct rdma_id_private *id_priv)
520 return (struct sockaddr *) &id_priv->id.route.addr.dst_addr;
523 static inline unsigned short cma_family(struct rdma_id_private *id_priv)
525 return id_priv->id.route.addr.src_addr.ss_family;
528 static int cma_set_qkey(struct rdma_id_private *id_priv, u32 qkey)
530 struct ib_sa_mcmember_rec rec;
534 if (qkey && id_priv->qkey != qkey)
540 id_priv->qkey = qkey;
544 switch (id_priv->id.ps) {
547 id_priv->qkey = RDMA_UDP_QKEY;
550 ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid);
551 ret = ib_sa_get_mcmember_rec(id_priv->id.device,
552 id_priv->id.port_num, &rec.mgid,
555 id_priv->qkey = be32_to_cpu(rec.qkey);
563 static void cma_translate_ib(struct sockaddr_ib *sib, struct rdma_dev_addr *dev_addr)
565 dev_addr->dev_type = ARPHRD_INFINIBAND;
566 rdma_addr_set_sgid(dev_addr, (union ib_gid *) &sib->sib_addr);
567 ib_addr_set_pkey(dev_addr, ntohs(sib->sib_pkey));
570 static int cma_translate_addr(struct sockaddr *addr, struct rdma_dev_addr *dev_addr)
574 if (addr->sa_family != AF_IB) {
575 ret = rdma_translate_ip(addr, dev_addr);
577 cma_translate_ib((struct sockaddr_ib *) addr, dev_addr);
584 static inline int cma_validate_port(struct ib_device *device, u8 port,
585 enum ib_gid_type gid_type,
587 const struct rdma_dev_addr *dev_addr)
589 const int dev_type = dev_addr->dev_type;
590 struct net_device *ndev;
593 if ((dev_type == ARPHRD_INFINIBAND) && !rdma_protocol_ib(device, port))
596 if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port))
599 if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port)) {
600 ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
603 gid_type = IB_GID_TYPE_IB;
606 ret = ib_find_cached_gid_by_port(device, gid, gid_type, port,
615 static int cma_acquire_dev(struct rdma_id_private *id_priv,
616 struct rdma_id_private *listen_id_priv)
618 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
619 struct cma_device *cma_dev;
620 union ib_gid gid, iboe_gid, *gidp;
624 if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
625 id_priv->id.ps == RDMA_PS_IPOIB)
629 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
632 memcpy(&gid, dev_addr->src_dev_addr +
633 rdma_addr_gid_offset(dev_addr), sizeof gid);
635 if (listen_id_priv) {
636 cma_dev = listen_id_priv->cma_dev;
637 port = listen_id_priv->id.port_num;
639 if (rdma_is_port_valid(cma_dev->device, port)) {
640 gidp = rdma_protocol_roce(cma_dev->device, port) ?
643 ret = cma_validate_port(cma_dev->device, port,
644 rdma_protocol_ib(cma_dev->device, port) ?
646 listen_id_priv->gid_type, gidp, dev_addr);
648 id_priv->id.port_num = port;
654 list_for_each_entry(cma_dev, &dev_list, list) {
655 for (port = 1; port <= cma_dev->device->phys_port_cnt; ++port) {
656 if (listen_id_priv &&
657 listen_id_priv->cma_dev == cma_dev &&
658 listen_id_priv->id.port_num == port)
661 gidp = rdma_protocol_roce(cma_dev->device, port) ?
664 ret = cma_validate_port(cma_dev->device, port,
665 rdma_protocol_ib(cma_dev->device, port) ?
667 cma_dev->default_gid_type[port - 1],
670 id_priv->id.port_num = port;
678 cma_attach_to_dev(id_priv, cma_dev);
685 * Select the source IB device and address to reach the destination IB address.
687 static int cma_resolve_ib_dev(struct rdma_id_private *id_priv)
689 struct cma_device *cma_dev, *cur_dev;
690 struct sockaddr_ib *addr;
691 union ib_gid gid, sgid, *dgid;
697 addr = (struct sockaddr_ib *) cma_dst_addr(id_priv);
698 dgid = (union ib_gid *) &addr->sib_addr;
699 pkey = ntohs(addr->sib_pkey);
701 list_for_each_entry(cur_dev, &dev_list, list) {
702 for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
703 if (!rdma_cap_af_ib(cur_dev->device, p))
706 if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index))
709 for (i = 0; !ib_get_cached_gid(cur_dev->device, p, i,
712 if (!memcmp(&gid, dgid, sizeof(gid))) {
715 id_priv->id.port_num = p;
719 if (!cma_dev && (gid.global.subnet_prefix ==
720 dgid->global.subnet_prefix)) {
723 id_priv->id.port_num = p;
733 cma_attach_to_dev(id_priv, cma_dev);
734 addr = (struct sockaddr_ib *) cma_src_addr(id_priv);
735 memcpy(&addr->sib_addr, &sgid, sizeof sgid);
736 cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr);
740 static void cma_deref_id(struct rdma_id_private *id_priv)
742 if (atomic_dec_and_test(&id_priv->refcount))
743 complete(&id_priv->comp);
746 struct rdma_cm_id *rdma_create_id(struct vnet *net,
747 rdma_cm_event_handler event_handler,
748 void *context, enum rdma_port_space ps,
749 enum ib_qp_type qp_type)
751 struct rdma_id_private *id_priv;
755 return ERR_PTR(-EINVAL);
757 id_priv = kzalloc(sizeof *id_priv, GFP_KERNEL);
759 return ERR_PTR(-ENOMEM);
761 id_priv->owner = task_pid_nr(current);
762 id_priv->state = RDMA_CM_IDLE;
763 id_priv->id.context = context;
764 id_priv->id.event_handler = event_handler;
766 id_priv->id.qp_type = qp_type;
767 spin_lock_init(&id_priv->lock);
768 mutex_init(&id_priv->qp_mutex);
769 init_completion(&id_priv->comp);
770 atomic_set(&id_priv->refcount, 1);
771 mutex_init(&id_priv->handler_mutex);
772 INIT_LIST_HEAD(&id_priv->listen_list);
773 INIT_LIST_HEAD(&id_priv->mc_list);
774 get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num);
775 id_priv->id.route.addr.dev_addr.net = net;
779 EXPORT_SYMBOL(rdma_create_id);
781 static int cma_init_ud_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
783 struct ib_qp_attr qp_attr;
784 int qp_attr_mask, ret;
786 qp_attr.qp_state = IB_QPS_INIT;
787 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
791 ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
795 qp_attr.qp_state = IB_QPS_RTR;
796 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
800 qp_attr.qp_state = IB_QPS_RTS;
802 ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN);
807 static int cma_init_conn_qp(struct rdma_id_private *id_priv, struct ib_qp *qp)
809 struct ib_qp_attr qp_attr;
810 int qp_attr_mask, ret;
812 qp_attr.qp_state = IB_QPS_INIT;
813 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
817 return ib_modify_qp(qp, &qp_attr, qp_attr_mask);
820 int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,
821 struct ib_qp_init_attr *qp_init_attr)
823 struct rdma_id_private *id_priv;
827 id_priv = container_of(id, struct rdma_id_private, id);
828 if (id->device != pd->device)
831 qp_init_attr->port_num = id->port_num;
832 qp = ib_create_qp(pd, qp_init_attr);
836 if (id->qp_type == IB_QPT_UD)
837 ret = cma_init_ud_qp(id_priv, qp);
839 ret = cma_init_conn_qp(id_priv, qp);
844 id_priv->qp_num = qp->qp_num;
845 id_priv->srq = (qp->srq != NULL);
851 EXPORT_SYMBOL(rdma_create_qp);
853 void rdma_destroy_qp(struct rdma_cm_id *id)
855 struct rdma_id_private *id_priv;
857 id_priv = container_of(id, struct rdma_id_private, id);
858 mutex_lock(&id_priv->qp_mutex);
859 ib_destroy_qp(id_priv->id.qp);
860 id_priv->id.qp = NULL;
861 mutex_unlock(&id_priv->qp_mutex);
863 EXPORT_SYMBOL(rdma_destroy_qp);
865 static int cma_modify_qp_rtr(struct rdma_id_private *id_priv,
866 struct rdma_conn_param *conn_param)
868 struct ib_qp_attr qp_attr;
869 int qp_attr_mask, ret;
872 mutex_lock(&id_priv->qp_mutex);
873 if (!id_priv->id.qp) {
878 /* Need to update QP attributes from default values. */
879 qp_attr.qp_state = IB_QPS_INIT;
880 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
884 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
888 qp_attr.qp_state = IB_QPS_RTR;
889 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
893 ret = ib_query_gid(id_priv->id.device, id_priv->id.port_num,
894 qp_attr.ah_attr.grh.sgid_index, &sgid, NULL);
898 BUG_ON(id_priv->cma_dev->device != id_priv->id.device);
901 qp_attr.max_dest_rd_atomic = conn_param->responder_resources;
902 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
904 mutex_unlock(&id_priv->qp_mutex);
908 static int cma_modify_qp_rts(struct rdma_id_private *id_priv,
909 struct rdma_conn_param *conn_param)
911 struct ib_qp_attr qp_attr;
912 int qp_attr_mask, ret;
914 mutex_lock(&id_priv->qp_mutex);
915 if (!id_priv->id.qp) {
920 qp_attr.qp_state = IB_QPS_RTS;
921 ret = rdma_init_qp_attr(&id_priv->id, &qp_attr, &qp_attr_mask);
926 qp_attr.max_rd_atomic = conn_param->initiator_depth;
927 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, qp_attr_mask);
929 mutex_unlock(&id_priv->qp_mutex);
933 static int cma_modify_qp_err(struct rdma_id_private *id_priv)
935 struct ib_qp_attr qp_attr;
938 mutex_lock(&id_priv->qp_mutex);
939 if (!id_priv->id.qp) {
944 qp_attr.qp_state = IB_QPS_ERR;
945 ret = ib_modify_qp(id_priv->id.qp, &qp_attr, IB_QP_STATE);
947 mutex_unlock(&id_priv->qp_mutex);
951 static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,
952 struct ib_qp_attr *qp_attr, int *qp_attr_mask)
954 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
958 if (rdma_cap_eth_ah(id_priv->id.device, id_priv->id.port_num))
961 pkey = ib_addr_get_pkey(dev_addr);
963 ret = ib_find_cached_pkey(id_priv->id.device, id_priv->id.port_num,
964 pkey, &qp_attr->pkey_index);
968 qp_attr->port_num = id_priv->id.port_num;
969 *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT;
971 if (id_priv->id.qp_type == IB_QPT_UD) {
972 ret = cma_set_qkey(id_priv, 0);
976 qp_attr->qkey = id_priv->qkey;
977 *qp_attr_mask |= IB_QP_QKEY;
979 qp_attr->qp_access_flags = 0;
980 *qp_attr_mask |= IB_QP_ACCESS_FLAGS;
985 int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
988 struct rdma_id_private *id_priv;
991 id_priv = container_of(id, struct rdma_id_private, id);
992 if (rdma_cap_ib_cm(id->device, id->port_num)) {
993 if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD))
994 ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask);
996 ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr,
999 if (qp_attr->qp_state == IB_QPS_RTR)
1000 qp_attr->rq_psn = id_priv->seq_num;
1001 } else if (rdma_cap_iw_cm(id->device, id->port_num)) {
1002 if (!id_priv->cm_id.iw) {
1003 qp_attr->qp_access_flags = 0;
1004 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
1006 ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr,
1008 qp_attr->port_num = id_priv->id.port_num;
1009 *qp_attr_mask |= IB_QP_PORT;
1015 EXPORT_SYMBOL(rdma_init_qp_attr);
1017 static inline int cma_zero_addr(struct sockaddr *addr)
1019 switch (addr->sa_family) {
1021 return ipv4_is_zeronet(((struct sockaddr_in *)addr)->sin_addr.s_addr);
1023 return ipv6_addr_any(&((struct sockaddr_in6 *) addr)->sin6_addr);
1025 return ib_addr_any(&((struct sockaddr_ib *) addr)->sib_addr);
1031 static inline int cma_loopback_addr(struct sockaddr *addr)
1033 switch (addr->sa_family) {
1035 return ipv4_is_loopback(((struct sockaddr_in *) addr)->sin_addr.s_addr);
1037 return ipv6_addr_loopback(&((struct sockaddr_in6 *) addr)->sin6_addr);
1039 return ib_addr_loopback(&((struct sockaddr_ib *) addr)->sib_addr);
1045 static inline int cma_any_addr(struct sockaddr *addr)
1047 return cma_zero_addr(addr) || cma_loopback_addr(addr);
1050 static int cma_addr_cmp(struct sockaddr *src, struct sockaddr *dst)
1052 if (src->sa_family != dst->sa_family)
1055 switch (src->sa_family) {
1057 return ((struct sockaddr_in *) src)->sin_addr.s_addr !=
1058 ((struct sockaddr_in *) dst)->sin_addr.s_addr;
1060 return ipv6_addr_cmp(&((struct sockaddr_in6 *) src)->sin6_addr,
1061 &((struct sockaddr_in6 *) dst)->sin6_addr);
1063 return ib_addr_cmp(&((struct sockaddr_ib *) src)->sib_addr,
1064 &((struct sockaddr_ib *) dst)->sib_addr);
1068 static __be16 cma_port(struct sockaddr *addr)
1070 struct sockaddr_ib *sib;
1072 switch (addr->sa_family) {
1074 return ((struct sockaddr_in *) addr)->sin_port;
1076 return ((struct sockaddr_in6 *) addr)->sin6_port;
1078 sib = (struct sockaddr_ib *) addr;
1079 return htons((u16) (be64_to_cpu(sib->sib_sid) &
1080 be64_to_cpu(sib->sib_sid_mask)));
1086 static inline int cma_any_port(struct sockaddr *addr)
1088 return !cma_port(addr);
1091 static void cma_save_ib_info(struct sockaddr *src_addr,
1092 struct sockaddr *dst_addr,
1093 struct rdma_cm_id *listen_id,
1094 struct ib_sa_path_rec *path)
1096 struct sockaddr_ib *listen_ib, *ib;
1098 listen_ib = (struct sockaddr_ib *) &listen_id->route.addr.src_addr;
1100 ib = (struct sockaddr_ib *)src_addr;
1101 ib->sib_family = AF_IB;
1103 ib->sib_pkey = path->pkey;
1104 ib->sib_flowinfo = path->flow_label;
1105 memcpy(&ib->sib_addr, &path->sgid, 16);
1106 ib->sib_sid = path->service_id;
1107 ib->sib_scope_id = 0;
1109 ib->sib_pkey = listen_ib->sib_pkey;
1110 ib->sib_flowinfo = listen_ib->sib_flowinfo;
1111 ib->sib_addr = listen_ib->sib_addr;
1112 ib->sib_sid = listen_ib->sib_sid;
1113 ib->sib_scope_id = listen_ib->sib_scope_id;
1115 ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL);
1118 ib = (struct sockaddr_ib *)dst_addr;
1119 ib->sib_family = AF_IB;
1121 ib->sib_pkey = path->pkey;
1122 ib->sib_flowinfo = path->flow_label;
1123 memcpy(&ib->sib_addr, &path->dgid, 16);
1128 static void cma_save_ip4_info(struct sockaddr_in *src_addr,
1129 struct sockaddr_in *dst_addr,
1130 struct cma_hdr *hdr,
1134 *src_addr = (struct sockaddr_in) {
1135 .sin_len = sizeof(struct sockaddr_in),
1136 .sin_family = AF_INET,
1137 .sin_addr.s_addr = hdr->dst_addr.ip4.addr,
1138 .sin_port = local_port,
1143 *dst_addr = (struct sockaddr_in) {
1144 .sin_len = sizeof(struct sockaddr_in),
1145 .sin_family = AF_INET,
1146 .sin_addr.s_addr = hdr->src_addr.ip4.addr,
1147 .sin_port = hdr->port,
1152 static void cma_ip6_clear_scope_id(struct in6_addr *addr)
1154 /* make sure link local scope ID gets zeroed */
1155 if (IN6_IS_SCOPE_LINKLOCAL(addr) ||
1156 IN6_IS_ADDR_MC_INTFACELOCAL(addr)) {
1157 /* use byte-access to be alignment safe */
1158 addr->s6_addr[2] = 0;
1159 addr->s6_addr[3] = 0;
1163 static void cma_save_ip6_info(struct sockaddr_in6 *src_addr,
1164 struct sockaddr_in6 *dst_addr,
1165 struct cma_hdr *hdr,
1169 *src_addr = (struct sockaddr_in6) {
1170 .sin6_len = sizeof(struct sockaddr_in6),
1171 .sin6_family = AF_INET6,
1172 .sin6_addr = hdr->dst_addr.ip6,
1173 .sin6_port = local_port,
1175 cma_ip6_clear_scope_id(&src_addr->sin6_addr);
1179 *dst_addr = (struct sockaddr_in6) {
1180 .sin6_len = sizeof(struct sockaddr_in6),
1181 .sin6_family = AF_INET6,
1182 .sin6_addr = hdr->src_addr.ip6,
1183 .sin6_port = hdr->port,
1185 cma_ip6_clear_scope_id(&dst_addr->sin6_addr);
1189 static u16 cma_port_from_service_id(__be64 service_id)
1191 return (u16)be64_to_cpu(service_id);
1194 static int sdp_save_ip_info(struct sockaddr *src_addr,
1195 struct sockaddr *dst_addr,
1196 const struct sdp_hh *hdr,
1201 BUG_ON(src_addr == NULL || dst_addr == NULL);
1203 if (sdp_get_majv(hdr->majv_minv) != SDP_MAJ_VERSION)
1206 local_port = htons(cma_port_from_service_id(service_id));
1208 switch (sdp_get_ip_ver(hdr)) {
1210 struct sockaddr_in *s4, *d4;
1212 s4 = (void *)src_addr;
1213 d4 = (void *)dst_addr;
1215 *s4 = (struct sockaddr_in) {
1216 .sin_len = sizeof(*s4),
1217 .sin_family = AF_INET,
1218 .sin_addr.s_addr = hdr->dst_addr.ip4.addr,
1219 .sin_port = local_port,
1221 *d4 = (struct sockaddr_in) {
1222 .sin_len = sizeof(*d4),
1223 .sin_family = AF_INET,
1224 .sin_addr.s_addr = hdr->src_addr.ip4.addr,
1225 .sin_port = hdr->port,
1230 struct sockaddr_in6 *s6, *d6;
1232 s6 = (void *)src_addr;
1233 d6 = (void *)dst_addr;
1235 *s6 = (struct sockaddr_in6) {
1236 .sin6_len = sizeof(*s6),
1237 .sin6_family = AF_INET6,
1238 .sin6_addr = hdr->dst_addr.ip6,
1239 .sin6_port = local_port,
1241 *d6 = (struct sockaddr_in6) {
1242 .sin6_len = sizeof(*d6),
1243 .sin6_family = AF_INET6,
1244 .sin6_addr = hdr->src_addr.ip6,
1245 .sin6_port = hdr->port,
1247 cma_ip6_clear_scope_id(&s6->sin6_addr);
1248 cma_ip6_clear_scope_id(&d6->sin6_addr);
1252 return -EAFNOSUPPORT;
1258 static int cma_save_ip_info(struct sockaddr *src_addr,
1259 struct sockaddr *dst_addr,
1260 struct ib_cm_event *ib_event,
1263 struct cma_hdr *hdr;
1266 if (rdma_ps_from_service_id(service_id) == RDMA_PS_SDP)
1267 return sdp_save_ip_info(src_addr, dst_addr,
1268 ib_event->private_data, service_id);
1270 hdr = ib_event->private_data;
1271 if (hdr->cma_version != CMA_VERSION)
1274 port = htons(cma_port_from_service_id(service_id));
1276 switch (cma_get_ip_ver(hdr)) {
1278 cma_save_ip4_info((struct sockaddr_in *)src_addr,
1279 (struct sockaddr_in *)dst_addr, hdr, port);
1282 cma_save_ip6_info((struct sockaddr_in6 *)src_addr,
1283 (struct sockaddr_in6 *)dst_addr, hdr, port);
1286 return -EAFNOSUPPORT;
1292 static int cma_save_net_info(struct sockaddr *src_addr,
1293 struct sockaddr *dst_addr,
1294 struct rdma_cm_id *listen_id,
1295 struct ib_cm_event *ib_event,
1296 sa_family_t sa_family, __be64 service_id)
1298 if (sa_family == AF_IB) {
1299 if (ib_event->event == IB_CM_REQ_RECEIVED)
1300 cma_save_ib_info(src_addr, dst_addr, listen_id,
1301 ib_event->param.req_rcvd.primary_path);
1302 else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED)
1303 cma_save_ib_info(src_addr, dst_addr, listen_id, NULL);
1307 return cma_save_ip_info(src_addr, dst_addr, ib_event, service_id);
1310 static int cma_save_req_info(const struct ib_cm_event *ib_event,
1311 struct cma_req_info *req)
1313 const struct ib_cm_req_event_param *req_param =
1314 &ib_event->param.req_rcvd;
1315 const struct ib_cm_sidr_req_event_param *sidr_param =
1316 &ib_event->param.sidr_req_rcvd;
1318 switch (ib_event->event) {
1319 case IB_CM_REQ_RECEIVED:
1320 req->device = req_param->listen_id->device;
1321 req->port = req_param->port;
1322 memcpy(&req->local_gid, &req_param->primary_path->sgid,
1323 sizeof(req->local_gid));
1324 req->has_gid = true;
1325 req->service_id = req_param->primary_path->service_id;
1326 req->pkey = be16_to_cpu(req_param->primary_path->pkey);
1327 if (req->pkey != req_param->bth_pkey)
1328 pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n"
1329 "RDMA CMA: in the future this may cause the request to be dropped\n",
1330 req_param->bth_pkey, req->pkey);
1332 case IB_CM_SIDR_REQ_RECEIVED:
1333 req->device = sidr_param->listen_id->device;
1334 req->port = sidr_param->port;
1335 req->has_gid = false;
1336 req->service_id = sidr_param->service_id;
1337 req->pkey = sidr_param->pkey;
1338 if (req->pkey != sidr_param->bth_pkey)
1339 pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and SIDR request payload P_Key (0x%x)\n"
1340 "RDMA CMA: in the future this may cause the request to be dropped\n",
1341 sidr_param->bth_pkey, req->pkey);
1350 static bool validate_ipv4_net_dev(struct net_device *net_dev,
1351 const struct sockaddr_in *dst_addr,
1352 const struct sockaddr_in *src_addr)
1355 __be32 daddr = dst_addr->sin_addr.s_addr,
1356 saddr = src_addr->sin_addr.s_addr;
1357 struct net_device *dst_dev;
1358 struct nhop_object *nh;
1361 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1362 ipv4_is_lbcast(daddr) || ipv4_is_zeronet(saddr) ||
1363 ipv4_is_zeronet(daddr) || ipv4_is_loopback(daddr) ||
1364 ipv4_is_loopback(saddr))
1367 dst_dev = ip_dev_find(net_dev->if_vnet, daddr);
1368 if (dst_dev != net_dev) {
1369 if (dst_dev != NULL)
1376 * Check for loopback.
1381 CURVNET_SET(net_dev->if_vnet);
1382 nh = fib4_lookup(RT_DEFAULT_FIB, src_addr->sin_addr, 0, NHR_NONE, 0);
1384 ret = (nh->nh_ifp == net_dev);
1394 static bool validate_ipv6_net_dev(struct net_device *net_dev,
1395 const struct sockaddr_in6 *dst_addr,
1396 const struct sockaddr_in6 *src_addr)
1399 struct sockaddr_in6 src_tmp = *src_addr;
1400 struct sockaddr_in6 dst_tmp = *dst_addr;
1401 struct net_device *dst_dev;
1402 struct nhop_object *nh;
1405 dst_dev = ip6_dev_find(net_dev->if_vnet, dst_tmp.sin6_addr,
1407 if (dst_dev != net_dev) {
1408 if (dst_dev != NULL)
1414 CURVNET_SET(net_dev->if_vnet);
1417 * Make sure the scope ID gets embedded.
1419 src_tmp.sin6_scope_id = net_dev->if_index;
1420 sa6_embedscope(&src_tmp, 0);
1422 dst_tmp.sin6_scope_id = net_dev->if_index;
1423 sa6_embedscope(&dst_tmp, 0);
1426 * Check for loopback after scope ID
1427 * has been embedded:
1429 if (memcmp(&src_tmp.sin6_addr, &dst_tmp.sin6_addr,
1430 sizeof(dst_tmp.sin6_addr)) == 0) {
1433 /* non-loopback case */
1434 nh = fib6_lookup(RT_DEFAULT_FIB, &src_addr->sin6_addr,
1435 net_dev->if_index, NHR_NONE, 0);
1437 ret = (nh->nh_ifp == net_dev);
1448 static bool validate_net_dev(struct net_device *net_dev,
1449 const struct sockaddr *daddr,
1450 const struct sockaddr *saddr)
1452 const struct sockaddr_in *daddr4 = (const struct sockaddr_in *)daddr;
1453 const struct sockaddr_in *saddr4 = (const struct sockaddr_in *)saddr;
1454 const struct sockaddr_in6 *daddr6 = (const struct sockaddr_in6 *)daddr;
1455 const struct sockaddr_in6 *saddr6 = (const struct sockaddr_in6 *)saddr;
1457 switch (daddr->sa_family) {
1459 return saddr->sa_family == AF_INET &&
1460 validate_ipv4_net_dev(net_dev, daddr4, saddr4);
1463 return saddr->sa_family == AF_INET6 &&
1464 validate_ipv6_net_dev(net_dev, daddr6, saddr6);
1471 static struct net_device *
1472 roce_get_net_dev_by_cm_event(struct ib_device *device, u8 port_num,
1473 const struct ib_cm_event *ib_event)
1475 struct ib_gid_attr sgid_attr;
1479 if (ib_event->event == IB_CM_REQ_RECEIVED) {
1480 err = ib_get_cached_gid(device, port_num,
1481 ib_event->param.req_rcvd.ppath_sgid_index, &sgid, &sgid_attr);
1482 } else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) {
1483 err = ib_get_cached_gid(device, port_num,
1484 ib_event->param.sidr_req_rcvd.sgid_index, &sgid, &sgid_attr);
1488 return (sgid_attr.ndev);
1491 static struct net_device *cma_get_net_dev(struct ib_cm_event *ib_event,
1492 const struct cma_req_info *req)
1494 struct sockaddr_storage listen_addr_storage, src_addr_storage;
1495 struct sockaddr *listen_addr = (struct sockaddr *)&listen_addr_storage,
1496 *src_addr = (struct sockaddr *)&src_addr_storage;
1497 struct net_device *net_dev;
1498 const union ib_gid *gid = req->has_gid ? &req->local_gid : NULL;
1499 struct epoch_tracker et;
1502 err = cma_save_ip_info(listen_addr, src_addr, ib_event,
1505 return ERR_PTR(err);
1507 if (rdma_protocol_roce(req->device, req->port)) {
1508 net_dev = roce_get_net_dev_by_cm_event(req->device, req->port,
1511 net_dev = ib_get_net_dev_by_params(req->device, req->port,
1516 return ERR_PTR(-ENODEV);
1518 NET_EPOCH_ENTER(et);
1519 if (!validate_net_dev(net_dev, listen_addr, src_addr)) {
1522 return ERR_PTR(-EHOSTUNREACH);
1529 static enum rdma_port_space rdma_ps_from_service_id(__be64 service_id)
1531 return (be64_to_cpu(service_id) >> 16) & 0xffff;
1534 static bool sdp_match_private_data(struct rdma_id_private *id_priv,
1535 const struct sdp_hh *hdr,
1536 struct sockaddr *addr)
1539 struct in6_addr ip6_addr;
1541 switch (addr->sa_family) {
1543 ip4_addr = ((struct sockaddr_in *)addr)->sin_addr.s_addr;
1544 if (sdp_get_ip_ver(hdr) != 4)
1546 if (!cma_any_addr(addr) &&
1547 hdr->dst_addr.ip4.addr != ip4_addr)
1551 ip6_addr = ((struct sockaddr_in6 *)addr)->sin6_addr;
1552 if (sdp_get_ip_ver(hdr) != 6)
1554 cma_ip6_clear_scope_id(&ip6_addr);
1555 if (!cma_any_addr(addr) &&
1556 memcmp(&hdr->dst_addr.ip6, &ip6_addr, sizeof(ip6_addr)))
1568 static bool cma_match_private_data(struct rdma_id_private *id_priv,
1571 const struct cma_hdr *hdr = vhdr;
1572 struct sockaddr *addr = cma_src_addr(id_priv);
1574 struct in6_addr ip6_addr;
1576 if (cma_any_addr(addr) && !id_priv->afonly)
1579 if (id_priv->id.ps == RDMA_PS_SDP)
1580 return sdp_match_private_data(id_priv, vhdr, addr);
1582 switch (addr->sa_family) {
1584 ip4_addr = ((struct sockaddr_in *)addr)->sin_addr.s_addr;
1585 if (cma_get_ip_ver(hdr) != 4)
1587 if (!cma_any_addr(addr) &&
1588 hdr->dst_addr.ip4.addr != ip4_addr)
1592 ip6_addr = ((struct sockaddr_in6 *)addr)->sin6_addr;
1593 if (cma_get_ip_ver(hdr) != 6)
1595 cma_ip6_clear_scope_id(&ip6_addr);
1596 if (!cma_any_addr(addr) &&
1597 memcmp(&hdr->dst_addr.ip6, &ip6_addr, sizeof(ip6_addr)))
1609 static bool cma_protocol_roce_dev_port(struct ib_device *device, int port_num)
1611 enum rdma_link_layer ll = rdma_port_get_link_layer(device, port_num);
1612 enum rdma_transport_type transport =
1613 rdma_node_get_transport(device->node_type);
1615 return ll == IB_LINK_LAYER_ETHERNET && transport == RDMA_TRANSPORT_IB;
1618 static bool cma_protocol_roce(const struct rdma_cm_id *id)
1620 struct ib_device *device = id->device;
1621 const int port_num = id->port_num ?: rdma_start_port(device);
1623 return cma_protocol_roce_dev_port(device, port_num);
1626 static bool cma_match_net_dev(const struct rdma_cm_id *id,
1627 const struct net_device *net_dev,
1630 const struct rdma_addr *addr = &id->route.addr;
1633 if (id->port_num && id->port_num != port_num)
1636 if (id->ps == RDMA_PS_SDP) {
1637 if (addr->src_addr.ss_family == AF_INET ||
1638 addr->src_addr.ss_family == AF_INET6)
1642 /* This request is an AF_IB request or a RoCE request */
1643 return addr->src_addr.ss_family == AF_IB ||
1644 cma_protocol_roce_dev_port(id->device, port_num);
1647 return !addr->dev_addr.bound_dev_if ||
1648 (net_eq(dev_net(net_dev), addr->dev_addr.net) &&
1649 addr->dev_addr.bound_dev_if == net_dev->if_index);
1652 static struct rdma_id_private *cma_find_listener(
1653 const struct rdma_bind_list *bind_list,
1654 const struct ib_cm_id *cm_id,
1655 const struct ib_cm_event *ib_event,
1656 const struct cma_req_info *req,
1657 const struct net_device *net_dev)
1659 struct rdma_id_private *id_priv, *id_priv_dev;
1662 return ERR_PTR(-EINVAL);
1664 hlist_for_each_entry(id_priv, &bind_list->owners, node) {
1665 if (cma_match_private_data(id_priv, ib_event->private_data)) {
1666 if (id_priv->id.device == cm_id->device &&
1667 cma_match_net_dev(&id_priv->id, net_dev, req->port))
1669 list_for_each_entry(id_priv_dev,
1670 &id_priv->listen_list,
1672 if (id_priv_dev->id.device == cm_id->device &&
1673 cma_match_net_dev(&id_priv_dev->id, net_dev, req->port))
1679 return ERR_PTR(-EINVAL);
1682 static struct rdma_id_private *cma_id_from_event(struct ib_cm_id *cm_id,
1683 struct ib_cm_event *ib_event,
1684 struct net_device **net_dev)
1686 struct cma_req_info req;
1687 struct rdma_bind_list *bind_list;
1688 struct rdma_id_private *id_priv;
1691 err = cma_save_req_info(ib_event, &req);
1693 return ERR_PTR(err);
1695 if (rdma_ps_from_service_id(cm_id->service_id) == RDMA_PS_SDP) {
1697 goto there_is_no_net_dev;
1700 *net_dev = cma_get_net_dev(ib_event, &req);
1701 if (IS_ERR(*net_dev)) {
1702 if (PTR_ERR(*net_dev) == -EAFNOSUPPORT) {
1703 /* Assuming the protocol is AF_IB */
1706 return ERR_CAST(*net_dev);
1710 there_is_no_net_dev:
1711 bind_list = cma_ps_find(*net_dev ? dev_net(*net_dev) : &init_net,
1712 rdma_ps_from_service_id(req.service_id),
1713 cma_port_from_service_id(req.service_id));
1714 id_priv = cma_find_listener(bind_list, cm_id, ib_event, &req, *net_dev);
1715 if (IS_ERR(id_priv) && *net_dev) {
1723 static inline int cma_user_data_offset(struct rdma_id_private *id_priv)
1725 if (cma_family(id_priv) == AF_IB)
1727 if (id_priv->id.ps == RDMA_PS_SDP)
1729 return sizeof(struct cma_hdr);
1732 static void cma_cancel_route(struct rdma_id_private *id_priv)
1734 if (rdma_cap_ib_sa(id_priv->id.device, id_priv->id.port_num)) {
1736 ib_sa_cancel_query(id_priv->query_id, id_priv->query);
1740 static void cma_cancel_listens(struct rdma_id_private *id_priv)
1742 struct rdma_id_private *dev_id_priv;
1745 * Remove from listen_any_list to prevent added devices from spawning
1746 * additional listen requests.
1749 list_del(&id_priv->list);
1751 while (!list_empty(&id_priv->listen_list)) {
1752 dev_id_priv = list_entry(id_priv->listen_list.next,
1753 struct rdma_id_private, listen_list);
1754 /* sync with device removal to avoid duplicate destruction */
1755 list_del_init(&dev_id_priv->list);
1756 list_del(&dev_id_priv->listen_list);
1757 mutex_unlock(&lock);
1759 rdma_destroy_id(&dev_id_priv->id);
1762 mutex_unlock(&lock);
1765 static void cma_cancel_operation(struct rdma_id_private *id_priv,
1766 enum rdma_cm_state state)
1769 case RDMA_CM_ADDR_QUERY:
1770 rdma_addr_cancel(&id_priv->id.route.addr.dev_addr);
1772 case RDMA_CM_ROUTE_QUERY:
1773 cma_cancel_route(id_priv);
1775 case RDMA_CM_LISTEN:
1776 if (cma_any_addr(cma_src_addr(id_priv)) && !id_priv->cma_dev)
1777 cma_cancel_listens(id_priv);
1784 static void cma_release_port(struct rdma_id_private *id_priv)
1786 struct rdma_bind_list *bind_list = id_priv->bind_list;
1787 struct vnet *net = id_priv->id.route.addr.dev_addr.net;
1793 hlist_del(&id_priv->node);
1794 if (hlist_empty(&bind_list->owners)) {
1795 cma_ps_remove(net, bind_list->ps, bind_list->port);
1798 mutex_unlock(&lock);
1801 static void cma_leave_mc_groups(struct rdma_id_private *id_priv)
1803 struct cma_multicast *mc;
1805 while (!list_empty(&id_priv->mc_list)) {
1806 mc = container_of(id_priv->mc_list.next,
1807 struct cma_multicast, list);
1808 list_del(&mc->list);
1809 if (rdma_cap_ib_mcast(id_priv->cma_dev->device,
1810 id_priv->id.port_num)) {
1811 ib_sa_free_multicast(mc->multicast.ib);
1814 if (mc->igmp_joined) {
1815 struct rdma_dev_addr *dev_addr =
1816 &id_priv->id.route.addr.dev_addr;
1817 struct net_device *ndev = NULL;
1819 if (dev_addr->bound_dev_if)
1820 ndev = dev_get_by_index(dev_addr->net,
1821 dev_addr->bound_dev_if);
1824 &mc->multicast.ib->rec.mgid,
1829 kref_put(&mc->mcref, release_mc);
1834 void rdma_destroy_id(struct rdma_cm_id *id)
1836 struct rdma_id_private *id_priv;
1837 enum rdma_cm_state state;
1839 id_priv = container_of(id, struct rdma_id_private, id);
1840 state = cma_exch(id_priv, RDMA_CM_DESTROYING);
1841 cma_cancel_operation(id_priv, state);
1844 * Wait for any active callback to finish. New callbacks will find
1845 * the id_priv state set to destroying and abort.
1847 mutex_lock(&id_priv->handler_mutex);
1848 mutex_unlock(&id_priv->handler_mutex);
1850 if (id_priv->cma_dev) {
1851 if (rdma_cap_ib_cm(id_priv->id.device, 1)) {
1852 if (id_priv->cm_id.ib)
1853 ib_destroy_cm_id(id_priv->cm_id.ib);
1854 } else if (rdma_cap_iw_cm(id_priv->id.device, 1)) {
1855 if (id_priv->cm_id.iw)
1856 iw_destroy_cm_id(id_priv->cm_id.iw);
1858 cma_leave_mc_groups(id_priv);
1859 cma_release_dev(id_priv);
1862 cma_release_port(id_priv);
1863 cma_deref_id(id_priv);
1864 wait_for_completion(&id_priv->comp);
1866 if (id_priv->internal_id)
1867 cma_deref_id(id_priv->id.context);
1869 kfree(id_priv->id.route.path_rec);
1872 EXPORT_SYMBOL(rdma_destroy_id);
1874 static int cma_rep_recv(struct rdma_id_private *id_priv)
1878 ret = cma_modify_qp_rtr(id_priv, NULL);
1882 ret = cma_modify_qp_rts(id_priv, NULL);
1886 ret = ib_send_cm_rtu(id_priv->cm_id.ib, NULL, 0);
1892 cma_modify_qp_err(id_priv);
1893 ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED,
1898 static int sdp_verify_rep(const struct sdp_hah *data)
1900 if (sdp_get_majv(data->majv_minv) != SDP_MAJ_VERSION)
1905 static void cma_set_rep_event_data(struct rdma_cm_event *event,
1906 struct ib_cm_rep_event_param *rep_data,
1909 event->param.conn.private_data = private_data;
1910 event->param.conn.private_data_len = IB_CM_REP_PRIVATE_DATA_SIZE;
1911 event->param.conn.responder_resources = rep_data->responder_resources;
1912 event->param.conn.initiator_depth = rep_data->initiator_depth;
1913 event->param.conn.flow_control = rep_data->flow_control;
1914 event->param.conn.rnr_retry_count = rep_data->rnr_retry_count;
1915 event->param.conn.srq = rep_data->srq;
1916 event->param.conn.qp_num = rep_data->remote_qpn;
1919 static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
1921 struct rdma_id_private *id_priv = cm_id->context;
1922 struct rdma_cm_event event;
1925 mutex_lock(&id_priv->handler_mutex);
1926 if ((ib_event->event != IB_CM_TIMEWAIT_EXIT &&
1927 id_priv->state != RDMA_CM_CONNECT) ||
1928 (ib_event->event == IB_CM_TIMEWAIT_EXIT &&
1929 id_priv->state != RDMA_CM_DISCONNECT))
1932 memset(&event, 0, sizeof event);
1933 switch (ib_event->event) {
1934 case IB_CM_REQ_ERROR:
1935 case IB_CM_REP_ERROR:
1936 event.event = RDMA_CM_EVENT_UNREACHABLE;
1937 event.status = -ETIMEDOUT;
1939 case IB_CM_REP_RECEIVED:
1940 if (id_priv->id.ps == RDMA_PS_SDP) {
1941 event.status = sdp_verify_rep(ib_event->private_data);
1943 event.event = RDMA_CM_EVENT_CONNECT_ERROR;
1945 event.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
1947 if (id_priv->id.qp) {
1948 event.status = cma_rep_recv(id_priv);
1949 event.event = event.status ? RDMA_CM_EVENT_CONNECT_ERROR :
1950 RDMA_CM_EVENT_ESTABLISHED;
1952 event.event = RDMA_CM_EVENT_CONNECT_RESPONSE;
1955 cma_set_rep_event_data(&event, &ib_event->param.rep_rcvd,
1956 ib_event->private_data);
1958 case IB_CM_RTU_RECEIVED:
1959 case IB_CM_USER_ESTABLISHED:
1960 event.event = RDMA_CM_EVENT_ESTABLISHED;
1962 case IB_CM_DREQ_ERROR:
1963 event.status = -ETIMEDOUT; /* fall through */
1964 case IB_CM_DREQ_RECEIVED:
1965 case IB_CM_DREP_RECEIVED:
1966 if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT,
1967 RDMA_CM_DISCONNECT))
1969 event.event = RDMA_CM_EVENT_DISCONNECTED;
1971 case IB_CM_TIMEWAIT_EXIT:
1972 event.event = RDMA_CM_EVENT_TIMEWAIT_EXIT;
1974 case IB_CM_MRA_RECEIVED:
1977 case IB_CM_REJ_RECEIVED:
1978 cma_modify_qp_err(id_priv);
1979 event.status = ib_event->param.rej_rcvd.reason;
1980 event.event = RDMA_CM_EVENT_REJECTED;
1981 event.param.conn.private_data = ib_event->private_data;
1982 event.param.conn.private_data_len = IB_CM_REJ_PRIVATE_DATA_SIZE;
1985 pr_err("RDMA CMA: unexpected IB CM event: %d\n",
1990 ret = id_priv->id.event_handler(&id_priv->id, &event);
1992 /* Destroy the CM ID by returning a non-zero value. */
1993 id_priv->cm_id.ib = NULL;
1994 cma_exch(id_priv, RDMA_CM_DESTROYING);
1995 mutex_unlock(&id_priv->handler_mutex);
1996 rdma_destroy_id(&id_priv->id);
2000 mutex_unlock(&id_priv->handler_mutex);
2004 static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
2005 struct ib_cm_event *ib_event,
2006 struct net_device *net_dev)
2008 struct rdma_id_private *id_priv;
2009 struct rdma_cm_id *id;
2010 struct rdma_route *rt;
2011 const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family;
2012 const __be64 service_id =
2013 ib_event->param.req_rcvd.primary_path->service_id;
2016 id = rdma_create_id(listen_id->route.addr.dev_addr.net,
2017 listen_id->event_handler, listen_id->context,
2018 listen_id->ps, ib_event->param.req_rcvd.qp_type);
2022 id_priv = container_of(id, struct rdma_id_private, id);
2023 if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr,
2024 (struct sockaddr *)&id->route.addr.dst_addr,
2025 listen_id, ib_event, ss_family, service_id))
2029 rt->num_paths = ib_event->param.req_rcvd.alternate_path ? 2 : 1;
2030 rt->path_rec = kmalloc(sizeof *rt->path_rec * rt->num_paths,
2035 rt->path_rec[0] = *ib_event->param.req_rcvd.primary_path;
2036 if (rt->num_paths == 2)
2037 rt->path_rec[1] = *ib_event->param.req_rcvd.alternate_path;
2040 ret = rdma_copy_addr(&rt->addr.dev_addr, net_dev, NULL);
2044 if (!cma_protocol_roce(listen_id) &&
2045 cma_any_addr(cma_src_addr(id_priv))) {
2046 rt->addr.dev_addr.dev_type = ARPHRD_INFINIBAND;
2047 rdma_addr_set_sgid(&rt->addr.dev_addr, &rt->path_rec[0].sgid);
2048 ib_addr_set_pkey(&rt->addr.dev_addr, be16_to_cpu(rt->path_rec[0].pkey));
2049 } else if (!cma_any_addr(cma_src_addr(id_priv))) {
2050 ret = cma_translate_addr(cma_src_addr(id_priv), &rt->addr.dev_addr);
2055 rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);
2057 id_priv->state = RDMA_CM_CONNECT;
2061 rdma_destroy_id(id);
2065 static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,
2066 struct ib_cm_event *ib_event,
2067 struct net_device *net_dev)
2069 struct rdma_id_private *id_priv;
2070 struct rdma_cm_id *id;
2071 const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family;
2072 struct vnet *net = listen_id->route.addr.dev_addr.net;
2075 id = rdma_create_id(net, listen_id->event_handler, listen_id->context,
2076 listen_id->ps, IB_QPT_UD);
2080 id_priv = container_of(id, struct rdma_id_private, id);
2081 if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr,
2082 (struct sockaddr *)&id->route.addr.dst_addr,
2083 listen_id, ib_event, ss_family,
2084 ib_event->param.sidr_req_rcvd.service_id))
2088 ret = rdma_copy_addr(&id->route.addr.dev_addr, net_dev, NULL);
2092 if (!cma_any_addr(cma_src_addr(id_priv))) {
2093 ret = cma_translate_addr(cma_src_addr(id_priv),
2094 &id->route.addr.dev_addr);
2100 id_priv->state = RDMA_CM_CONNECT;
2103 rdma_destroy_id(id);
2107 static void cma_set_req_event_data(struct rdma_cm_event *event,
2108 struct ib_cm_req_event_param *req_data,
2109 void *private_data, int offset)
2111 event->param.conn.private_data = (char *)private_data + offset;
2112 event->param.conn.private_data_len = IB_CM_REQ_PRIVATE_DATA_SIZE - offset;
2113 event->param.conn.responder_resources = req_data->responder_resources;
2114 event->param.conn.initiator_depth = req_data->initiator_depth;
2115 event->param.conn.flow_control = req_data->flow_control;
2116 event->param.conn.retry_count = req_data->retry_count;
2117 event->param.conn.rnr_retry_count = req_data->rnr_retry_count;
2118 event->param.conn.srq = req_data->srq;
2119 event->param.conn.qp_num = req_data->remote_qpn;
2122 static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_event)
2124 return (((ib_event->event == IB_CM_REQ_RECEIVED) &&
2125 (ib_event->param.req_rcvd.qp_type == id->qp_type)) ||
2126 ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) &&
2127 (id->qp_type == IB_QPT_UD)) ||
2131 static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)
2133 struct rdma_id_private *listen_id, *conn_id = NULL;
2134 struct rdma_cm_event event;
2135 struct net_device *net_dev;
2138 listen_id = cma_id_from_event(cm_id, ib_event, &net_dev);
2139 if (IS_ERR(listen_id))
2140 return PTR_ERR(listen_id);
2142 if (!cma_check_req_qp_type(&listen_id->id, ib_event)) {
2147 mutex_lock(&listen_id->handler_mutex);
2148 if (listen_id->state != RDMA_CM_LISTEN) {
2149 ret = -ECONNABORTED;
2153 memset(&event, 0, sizeof event);
2154 offset = cma_user_data_offset(listen_id);
2155 event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
2156 if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED) {
2157 conn_id = cma_new_udp_id(&listen_id->id, ib_event, net_dev);
2158 event.param.ud.private_data = (char *)ib_event->private_data + offset;
2159 event.param.ud.private_data_len =
2160 IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE - offset;
2162 conn_id = cma_new_conn_id(&listen_id->id, ib_event, net_dev);
2163 cma_set_req_event_data(&event, &ib_event->param.req_rcvd,
2164 ib_event->private_data, offset);
2171 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
2172 ret = cma_acquire_dev(conn_id, listen_id);
2176 conn_id->cm_id.ib = cm_id;
2177 cm_id->context = conn_id;
2178 cm_id->cm_handler = cma_ib_handler;
2181 * Protect against the user destroying conn_id from another thread
2182 * until we're done accessing it.
2184 atomic_inc(&conn_id->refcount);
2185 ret = conn_id->id.event_handler(&conn_id->id, &event);
2189 * Acquire mutex to prevent user executing rdma_destroy_id()
2190 * while we're accessing the cm_id.
2193 if (cma_comp(conn_id, RDMA_CM_CONNECT) &&
2194 (conn_id->id.qp_type != IB_QPT_UD))
2195 ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);
2196 mutex_unlock(&lock);
2197 mutex_unlock(&conn_id->handler_mutex);
2198 mutex_unlock(&listen_id->handler_mutex);
2199 cma_deref_id(conn_id);
2205 cma_deref_id(conn_id);
2206 /* Destroy the CM ID by returning a non-zero value. */
2207 conn_id->cm_id.ib = NULL;
2209 cma_exch(conn_id, RDMA_CM_DESTROYING);
2210 mutex_unlock(&conn_id->handler_mutex);
2212 mutex_unlock(&listen_id->handler_mutex);
2214 rdma_destroy_id(&conn_id->id);
2223 __be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr)
2225 if (addr->sa_family == AF_IB)
2226 return ((struct sockaddr_ib *) addr)->sib_sid;
2228 return cpu_to_be64(((u64)id->ps << 16) + be16_to_cpu(cma_port(addr)));
2230 EXPORT_SYMBOL(rdma_get_service_id);
2232 static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)
2234 struct rdma_id_private *id_priv = iw_id->context;
2235 struct rdma_cm_event event;
2237 struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
2238 struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
2240 mutex_lock(&id_priv->handler_mutex);
2241 if (id_priv->state != RDMA_CM_CONNECT)
2244 memset(&event, 0, sizeof event);
2245 switch (iw_event->event) {
2246 case IW_CM_EVENT_CLOSE:
2247 event.event = RDMA_CM_EVENT_DISCONNECTED;
2249 case IW_CM_EVENT_CONNECT_REPLY:
2250 memcpy(cma_src_addr(id_priv), laddr,
2251 rdma_addr_size(laddr));
2252 memcpy(cma_dst_addr(id_priv), raddr,
2253 rdma_addr_size(raddr));
2254 switch (iw_event->status) {
2256 event.event = RDMA_CM_EVENT_ESTABLISHED;
2257 event.param.conn.initiator_depth = iw_event->ird;
2258 event.param.conn.responder_resources = iw_event->ord;
2262 event.event = RDMA_CM_EVENT_REJECTED;
2265 event.event = RDMA_CM_EVENT_UNREACHABLE;
2268 event.event = RDMA_CM_EVENT_CONNECT_ERROR;
2272 case IW_CM_EVENT_ESTABLISHED:
2273 event.event = RDMA_CM_EVENT_ESTABLISHED;
2274 event.param.conn.initiator_depth = iw_event->ird;
2275 event.param.conn.responder_resources = iw_event->ord;
2281 event.status = iw_event->status;
2282 event.param.conn.private_data = iw_event->private_data;
2283 event.param.conn.private_data_len = iw_event->private_data_len;
2284 ret = id_priv->id.event_handler(&id_priv->id, &event);
2286 /* Destroy the CM ID by returning a non-zero value. */
2287 id_priv->cm_id.iw = NULL;
2288 cma_exch(id_priv, RDMA_CM_DESTROYING);
2289 mutex_unlock(&id_priv->handler_mutex);
2290 rdma_destroy_id(&id_priv->id);
2295 mutex_unlock(&id_priv->handler_mutex);
2299 static int iw_conn_req_handler(struct iw_cm_id *cm_id,
2300 struct iw_cm_event *iw_event)
2302 struct rdma_cm_id *new_cm_id;
2303 struct rdma_id_private *listen_id, *conn_id;
2304 struct rdma_cm_event event;
2305 int ret = -ECONNABORTED;
2306 struct sockaddr *laddr = (struct sockaddr *)&iw_event->local_addr;
2307 struct sockaddr *raddr = (struct sockaddr *)&iw_event->remote_addr;
2309 listen_id = cm_id->context;
2311 mutex_lock(&listen_id->handler_mutex);
2312 if (listen_id->state != RDMA_CM_LISTEN)
2315 /* Create a new RDMA id for the new IW CM ID */
2316 new_cm_id = rdma_create_id(listen_id->id.route.addr.dev_addr.net,
2317 listen_id->id.event_handler,
2318 listen_id->id.context,
2319 RDMA_PS_TCP, IB_QPT_RC);
2320 if (IS_ERR(new_cm_id)) {
2324 conn_id = container_of(new_cm_id, struct rdma_id_private, id);
2325 mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING);
2326 conn_id->state = RDMA_CM_CONNECT;
2328 ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr);
2330 mutex_unlock(&conn_id->handler_mutex);
2331 rdma_destroy_id(new_cm_id);
2335 ret = cma_acquire_dev(conn_id, listen_id);
2337 mutex_unlock(&conn_id->handler_mutex);
2338 rdma_destroy_id(new_cm_id);
2342 conn_id->cm_id.iw = cm_id;
2343 cm_id->context = conn_id;
2344 cm_id->cm_handler = cma_iw_handler;
2346 memcpy(cma_src_addr(conn_id), laddr, rdma_addr_size(laddr));
2347 memcpy(cma_dst_addr(conn_id), raddr, rdma_addr_size(raddr));
2349 memset(&event, 0, sizeof event);
2350 event.event = RDMA_CM_EVENT_CONNECT_REQUEST;
2351 event.param.conn.private_data = iw_event->private_data;
2352 event.param.conn.private_data_len = iw_event->private_data_len;
2353 event.param.conn.initiator_depth = iw_event->ird;
2354 event.param.conn.responder_resources = iw_event->ord;
2357 * Protect against the user destroying conn_id from another thread
2358 * until we're done accessing it.
2360 atomic_inc(&conn_id->refcount);
2361 ret = conn_id->id.event_handler(&conn_id->id, &event);
2363 /* User wants to destroy the CM ID */
2364 conn_id->cm_id.iw = NULL;
2365 cma_exch(conn_id, RDMA_CM_DESTROYING);
2366 mutex_unlock(&conn_id->handler_mutex);
2367 cma_deref_id(conn_id);
2368 rdma_destroy_id(&conn_id->id);
2372 mutex_unlock(&conn_id->handler_mutex);
2373 cma_deref_id(conn_id);
2376 mutex_unlock(&listen_id->handler_mutex);
2380 static int cma_ib_listen(struct rdma_id_private *id_priv)
2382 struct sockaddr *addr;
2383 struct ib_cm_id *id;
2386 addr = cma_src_addr(id_priv);
2387 svc_id = rdma_get_service_id(&id_priv->id, addr);
2388 id = ib_cm_insert_listen(id_priv->id.device, cma_req_handler, svc_id);
2391 id_priv->cm_id.ib = id;
2396 static int cma_iw_listen(struct rdma_id_private *id_priv, int backlog)
2399 struct iw_cm_id *id;
2401 id = iw_create_cm_id(id_priv->id.device,
2402 iw_conn_req_handler,
2407 id->tos = id_priv->tos;
2408 id_priv->cm_id.iw = id;
2410 memcpy(&id_priv->cm_id.iw->local_addr, cma_src_addr(id_priv),
2411 rdma_addr_size(cma_src_addr(id_priv)));
2413 ret = iw_cm_listen(id_priv->cm_id.iw, backlog);
2416 iw_destroy_cm_id(id_priv->cm_id.iw);
2417 id_priv->cm_id.iw = NULL;
2423 static int cma_listen_handler(struct rdma_cm_id *id,
2424 struct rdma_cm_event *event)
2426 struct rdma_id_private *id_priv = id->context;
2428 id->context = id_priv->id.context;
2429 id->event_handler = id_priv->id.event_handler;
2430 return id_priv->id.event_handler(id, event);
2433 static void cma_listen_on_dev(struct rdma_id_private *id_priv,
2434 struct cma_device *cma_dev)
2436 struct rdma_id_private *dev_id_priv;
2437 struct rdma_cm_id *id;
2438 struct vnet *net = id_priv->id.route.addr.dev_addr.net;
2441 if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1))
2444 id = rdma_create_id(net, cma_listen_handler, id_priv, id_priv->id.ps,
2445 id_priv->id.qp_type);
2449 dev_id_priv = container_of(id, struct rdma_id_private, id);
2451 dev_id_priv->state = RDMA_CM_ADDR_BOUND;
2452 memcpy(cma_src_addr(dev_id_priv), cma_src_addr(id_priv),
2453 rdma_addr_size(cma_src_addr(id_priv)));
2455 _cma_attach_to_dev(dev_id_priv, cma_dev);
2456 list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list);
2457 atomic_inc(&id_priv->refcount);
2458 dev_id_priv->internal_id = 1;
2459 dev_id_priv->afonly = id_priv->afonly;
2461 ret = rdma_listen(id, id_priv->backlog);
2463 pr_warn("RDMA CMA: cma_listen_on_dev, error %d, listening on device %s\n",
2464 ret, cma_dev->device->name);
2467 static void cma_listen_on_all(struct rdma_id_private *id_priv)
2469 struct cma_device *cma_dev;
2472 list_add_tail(&id_priv->list, &listen_any_list);
2473 list_for_each_entry(cma_dev, &dev_list, list)
2474 cma_listen_on_dev(id_priv, cma_dev);
2475 mutex_unlock(&lock);
2478 void rdma_set_service_type(struct rdma_cm_id *id, int tos)
2480 struct rdma_id_private *id_priv;
2482 id_priv = container_of(id, struct rdma_id_private, id);
2483 id_priv->tos = (u8) tos;
2485 EXPORT_SYMBOL(rdma_set_service_type);
2487 static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec,
2490 struct cma_work *work = context;
2491 struct rdma_route *route;
2493 route = &work->id->id.route;
2496 route->num_paths = 1;
2497 *route->path_rec = *path_rec;
2499 work->old_state = RDMA_CM_ROUTE_QUERY;
2500 work->new_state = RDMA_CM_ADDR_RESOLVED;
2501 work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;
2502 work->event.status = status;
2505 queue_work(cma_wq, &work->work);
2508 static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
2509 struct cma_work *work)
2511 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
2512 struct ib_sa_path_rec path_rec;
2513 ib_sa_comp_mask comp_mask;
2514 struct sockaddr_in6 *sin6;
2515 struct sockaddr_ib *sib;
2517 memset(&path_rec, 0, sizeof path_rec);
2518 rdma_addr_get_sgid(dev_addr, &path_rec.sgid);
2519 rdma_addr_get_dgid(dev_addr, &path_rec.dgid);
2520 path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
2521 path_rec.numb_path = 1;
2522 path_rec.reversible = 1;
2523 path_rec.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
2525 comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
2526 IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH |
2527 IB_SA_PATH_REC_REVERSIBLE | IB_SA_PATH_REC_SERVICE_ID;
2529 switch (cma_family(id_priv)) {
2531 path_rec.qos_class = cpu_to_be16((u16) id_priv->tos);
2532 comp_mask |= IB_SA_PATH_REC_QOS_CLASS;
2535 sin6 = (struct sockaddr_in6 *) cma_src_addr(id_priv);
2536 path_rec.traffic_class = (u8) (be32_to_cpu(sin6->sin6_flowinfo) >> 20);
2537 comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS;
2540 sib = (struct sockaddr_ib *) cma_src_addr(id_priv);
2541 path_rec.traffic_class = (u8) (be32_to_cpu(sib->sib_flowinfo) >> 20);
2542 comp_mask |= IB_SA_PATH_REC_TRAFFIC_CLASS;
2546 id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device,
2547 id_priv->id.port_num, &path_rec,
2548 comp_mask, timeout_ms,
2549 GFP_KERNEL, cma_query_handler,
2550 work, &id_priv->query);
2552 return (id_priv->query_id < 0) ? id_priv->query_id : 0;
2555 static void cma_work_handler(struct work_struct *_work)
2557 struct cma_work *work = container_of(_work, struct cma_work, work);
2558 struct rdma_id_private *id_priv = work->id;
2561 mutex_lock(&id_priv->handler_mutex);
2562 if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
2565 if (id_priv->id.event_handler(&id_priv->id, &work->event)) {
2566 cma_exch(id_priv, RDMA_CM_DESTROYING);
2570 mutex_unlock(&id_priv->handler_mutex);
2571 cma_deref_id(id_priv);
2573 rdma_destroy_id(&id_priv->id);
2577 static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)
2579 struct rdma_route *route = &id_priv->id.route;
2580 struct cma_work *work;
2583 work = kzalloc(sizeof *work, GFP_KERNEL);
2588 INIT_WORK(&work->work, cma_work_handler);
2589 work->old_state = RDMA_CM_ROUTE_QUERY;
2590 work->new_state = RDMA_CM_ROUTE_RESOLVED;
2591 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
2593 route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL);
2594 if (!route->path_rec) {
2599 ret = cma_query_ib_route(id_priv, timeout_ms, work);
2605 kfree(route->path_rec);
2606 route->path_rec = NULL;
2612 int rdma_set_ib_paths(struct rdma_cm_id *id,
2613 struct ib_sa_path_rec *path_rec, int num_paths)
2615 struct rdma_id_private *id_priv;
2618 id_priv = container_of(id, struct rdma_id_private, id);
2619 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
2620 RDMA_CM_ROUTE_RESOLVED))
2623 id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths,
2625 if (!id->route.path_rec) {
2630 id->route.num_paths = num_paths;
2633 cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED);
2636 EXPORT_SYMBOL(rdma_set_ib_paths);
2638 static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)
2640 struct cma_work *work;
2642 work = kzalloc(sizeof *work, GFP_KERNEL);
2647 INIT_WORK(&work->work, cma_work_handler);
2648 work->old_state = RDMA_CM_ROUTE_QUERY;
2649 work->new_state = RDMA_CM_ROUTE_RESOLVED;
2650 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
2651 queue_work(cma_wq, &work->work);
2655 static int iboe_tos_to_sl(struct net_device *ndev, int tos)
2657 /* get service level, SL, from IPv4 type of service, TOS */
2658 int sl = (tos >> 5) & 0x7;
2660 /* final mappings are done by the vendor specific drivers */
2664 static enum ib_gid_type cma_route_gid_type(enum rdma_network_type network_type,
2665 unsigned long supported_gids,
2666 enum ib_gid_type default_gid)
2668 if ((network_type == RDMA_NETWORK_IPV4 ||
2669 network_type == RDMA_NETWORK_IPV6) &&
2670 test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP, &supported_gids))
2671 return IB_GID_TYPE_ROCE_UDP_ENCAP;
2676 static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)
2678 struct rdma_route *route = &id_priv->id.route;
2679 struct rdma_addr *addr = &route->addr;
2680 struct cma_work *work;
2682 struct net_device *ndev = NULL;
2685 work = kzalloc(sizeof *work, GFP_KERNEL);
2690 INIT_WORK(&work->work, cma_work_handler);
2692 route->path_rec = kzalloc(sizeof *route->path_rec, GFP_KERNEL);
2693 if (!route->path_rec) {
2698 route->num_paths = 1;
2700 if (addr->dev_addr.bound_dev_if) {
2701 unsigned long supported_gids;
2703 ndev = dev_get_by_index(addr->dev_addr.net,
2704 addr->dev_addr.bound_dev_if);
2710 route->path_rec->net = ndev->if_vnet;
2711 route->path_rec->ifindex = ndev->if_index;
2712 supported_gids = roce_gid_type_mask_support(id_priv->id.device,
2713 id_priv->id.port_num);
2714 route->path_rec->gid_type =
2715 cma_route_gid_type(addr->dev_addr.network,
2724 memcpy(route->path_rec->dmac, addr->dev_addr.dst_dev_addr, ETH_ALEN);
2726 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
2727 &route->path_rec->sgid);
2728 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.dst_addr,
2729 &route->path_rec->dgid);
2731 /* Use the hint from IP Stack to select GID Type */
2732 if (route->path_rec->gid_type < ib_network_to_gid_type(addr->dev_addr.network))
2733 route->path_rec->gid_type = ib_network_to_gid_type(addr->dev_addr.network);
2734 if (((struct sockaddr *)&id_priv->id.route.addr.dst_addr)->sa_family != AF_IB)
2735 /* TODO: get the hoplimit from the inet/inet6 device */
2736 route->path_rec->hop_limit = addr->dev_addr.hoplimit;
2738 route->path_rec->hop_limit = 1;
2739 route->path_rec->reversible = 1;
2740 route->path_rec->pkey = cpu_to_be16(0xffff);
2741 route->path_rec->mtu_selector = IB_SA_EQ;
2742 route->path_rec->sl = iboe_tos_to_sl(ndev, id_priv->tos);
2743 route->path_rec->traffic_class = id_priv->tos;
2744 route->path_rec->mtu = iboe_get_mtu(ndev->if_mtu);
2745 route->path_rec->rate_selector = IB_SA_EQ;
2746 route->path_rec->rate = iboe_get_rate(ndev);
2748 route->path_rec->packet_life_time_selector = IB_SA_EQ;
2749 route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME;
2750 if (!route->path_rec->mtu) {
2755 work->old_state = RDMA_CM_ROUTE_QUERY;
2756 work->new_state = RDMA_CM_ROUTE_RESOLVED;
2757 work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
2758 work->event.status = 0;
2760 queue_work(cma_wq, &work->work);
2765 kfree(route->path_rec);
2766 route->path_rec = NULL;
2772 int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)
2774 struct rdma_id_private *id_priv;
2777 id_priv = container_of(id, struct rdma_id_private, id);
2778 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY))
2781 atomic_inc(&id_priv->refcount);
2782 if (rdma_cap_ib_sa(id->device, id->port_num))
2783 ret = cma_resolve_ib_route(id_priv, timeout_ms);
2784 else if (rdma_protocol_roce(id->device, id->port_num))
2785 ret = cma_resolve_iboe_route(id_priv);
2786 else if (rdma_protocol_iwarp(id->device, id->port_num))
2787 ret = cma_resolve_iw_route(id_priv, timeout_ms);
2796 cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED);
2797 cma_deref_id(id_priv);
2800 EXPORT_SYMBOL(rdma_resolve_route);
2802 static void cma_set_loopback(struct sockaddr *addr)
2804 switch (addr->sa_family) {
2806 ((struct sockaddr_in *) addr)->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
2809 ipv6_addr_set(&((struct sockaddr_in6 *) addr)->sin6_addr,
2813 ib_addr_set(&((struct sockaddr_ib *) addr)->sib_addr,
2819 static int cma_bind_loopback(struct rdma_id_private *id_priv)
2821 struct cma_device *cma_dev, *cur_dev;
2822 struct ib_port_attr port_attr;
2830 list_for_each_entry(cur_dev, &dev_list, list) {
2831 if (cma_family(id_priv) == AF_IB &&
2832 !rdma_cap_ib_cm(cur_dev->device, 1))
2838 for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) {
2839 if (!ib_query_port(cur_dev->device, p, &port_attr) &&
2840 port_attr.state == IB_PORT_ACTIVE) {
2855 ret = ib_get_cached_gid(cma_dev->device, p, 0, &gid, NULL);
2859 ret = ib_get_cached_pkey(cma_dev->device, p, 0, &pkey);
2863 id_priv->id.route.addr.dev_addr.dev_type =
2864 (rdma_protocol_ib(cma_dev->device, p)) ?
2865 ARPHRD_INFINIBAND : ARPHRD_ETHER;
2867 rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid);
2868 ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey);
2869 id_priv->id.port_num = p;
2870 cma_attach_to_dev(id_priv, cma_dev);
2871 cma_set_loopback(cma_src_addr(id_priv));
2873 mutex_unlock(&lock);
2877 static void addr_handler(int status, struct sockaddr *src_addr,
2878 struct rdma_dev_addr *dev_addr, void *context)
2880 struct rdma_id_private *id_priv = context;
2881 struct rdma_cm_event event;
2883 memset(&event, 0, sizeof event);
2884 mutex_lock(&id_priv->handler_mutex);
2885 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY,
2886 RDMA_CM_ADDR_RESOLVED))
2889 memcpy(cma_src_addr(id_priv), src_addr, rdma_addr_size(src_addr));
2890 if (!status && !id_priv->cma_dev)
2891 status = cma_acquire_dev(id_priv, NULL);
2894 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED,
2895 RDMA_CM_ADDR_BOUND))
2897 event.event = RDMA_CM_EVENT_ADDR_ERROR;
2898 event.status = status;
2900 event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
2902 if (id_priv->id.event_handler(&id_priv->id, &event)) {
2903 cma_exch(id_priv, RDMA_CM_DESTROYING);
2904 mutex_unlock(&id_priv->handler_mutex);
2905 cma_deref_id(id_priv);
2906 rdma_destroy_id(&id_priv->id);
2910 mutex_unlock(&id_priv->handler_mutex);
2911 cma_deref_id(id_priv);
2914 static int cma_resolve_loopback(struct rdma_id_private *id_priv)
2916 struct cma_work *work;
2920 work = kzalloc(sizeof *work, GFP_KERNEL);
2924 if (!id_priv->cma_dev) {
2925 ret = cma_bind_loopback(id_priv);
2930 rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid);
2931 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid);
2934 INIT_WORK(&work->work, cma_work_handler);
2935 work->old_state = RDMA_CM_ADDR_QUERY;
2936 work->new_state = RDMA_CM_ADDR_RESOLVED;
2937 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
2938 queue_work(cma_wq, &work->work);
2945 static int cma_resolve_ib_addr(struct rdma_id_private *id_priv)
2947 struct cma_work *work;
2950 work = kzalloc(sizeof *work, GFP_KERNEL);
2954 if (!id_priv->cma_dev) {
2955 ret = cma_resolve_ib_dev(id_priv);
2960 rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *)
2961 &(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr));
2964 INIT_WORK(&work->work, cma_work_handler);
2965 work->old_state = RDMA_CM_ADDR_QUERY;
2966 work->new_state = RDMA_CM_ADDR_RESOLVED;
2967 work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;
2968 queue_work(cma_wq, &work->work);
2975 static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
2976 struct sockaddr *dst_addr)
2978 if (!src_addr || !src_addr->sa_family) {
2979 src_addr = (struct sockaddr *) &id->route.addr.src_addr;
2980 src_addr->sa_family = dst_addr->sa_family;
2981 if (dst_addr->sa_family == AF_INET6) {
2982 struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr;
2983 struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr;
2984 src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
2985 if (IN6_IS_SCOPE_LINKLOCAL(&dst_addr6->sin6_addr) ||
2986 IN6_IS_ADDR_MC_INTFACELOCAL(&dst_addr6->sin6_addr))
2987 id->route.addr.dev_addr.bound_dev_if = dst_addr6->sin6_scope_id;
2988 } else if (dst_addr->sa_family == AF_IB) {
2989 ((struct sockaddr_ib *) src_addr)->sib_pkey =
2990 ((struct sockaddr_ib *) dst_addr)->sib_pkey;
2993 return rdma_bind_addr(id, src_addr);
2996 int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
2997 struct sockaddr *dst_addr, int timeout_ms)
2999 struct rdma_id_private *id_priv;
3002 id_priv = container_of(id, struct rdma_id_private, id);
3003 if (id_priv->state == RDMA_CM_IDLE) {
3004 ret = cma_bind_addr(id, src_addr, dst_addr);
3009 if (cma_family(id_priv) != dst_addr->sa_family)
3012 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY))
3015 atomic_inc(&id_priv->refcount);
3016 memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
3017 if (cma_any_addr(dst_addr)) {
3018 ret = cma_resolve_loopback(id_priv);
3020 if (dst_addr->sa_family == AF_IB) {
3021 ret = cma_resolve_ib_addr(id_priv);
3023 ret = cma_check_linklocal(&id->route.addr.dev_addr, dst_addr);
3027 ret = rdma_resolve_ip(&addr_client, cma_src_addr(id_priv),
3028 dst_addr, &id->route.addr.dev_addr,
3029 timeout_ms, addr_handler, id_priv);
3037 cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);
3038 cma_deref_id(id_priv);
3041 EXPORT_SYMBOL(rdma_resolve_addr);
3043 int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse)
3045 struct rdma_id_private *id_priv;
3046 unsigned long flags;
3049 id_priv = container_of(id, struct rdma_id_private, id);
3050 spin_lock_irqsave(&id_priv->lock, flags);
3051 if (reuse || id_priv->state == RDMA_CM_IDLE) {
3052 id_priv->reuseaddr = reuse;
3057 spin_unlock_irqrestore(&id_priv->lock, flags);
3060 EXPORT_SYMBOL(rdma_set_reuseaddr);
3062 int rdma_set_afonly(struct rdma_cm_id *id, int afonly)
3064 struct rdma_id_private *id_priv;
3065 unsigned long flags;
3068 id_priv = container_of(id, struct rdma_id_private, id);
3069 spin_lock_irqsave(&id_priv->lock, flags);
3070 if (id_priv->state == RDMA_CM_IDLE || id_priv->state == RDMA_CM_ADDR_BOUND) {
3071 id_priv->options |= (1 << CMA_OPTION_AFONLY);
3072 id_priv->afonly = afonly;
3077 spin_unlock_irqrestore(&id_priv->lock, flags);
3080 EXPORT_SYMBOL(rdma_set_afonly);
3082 static void cma_bind_port(struct rdma_bind_list *bind_list,
3083 struct rdma_id_private *id_priv)
3085 struct sockaddr *addr;
3086 struct sockaddr_ib *sib;
3090 addr = cma_src_addr(id_priv);
3091 port = htons(bind_list->port);
3093 switch (addr->sa_family) {
3095 ((struct sockaddr_in *) addr)->sin_port = port;
3098 ((struct sockaddr_in6 *) addr)->sin6_port = port;
3101 sib = (struct sockaddr_ib *) addr;
3102 sid = be64_to_cpu(sib->sib_sid);
3103 mask = be64_to_cpu(sib->sib_sid_mask);
3104 sib->sib_sid = cpu_to_be64((sid & mask) | (u64) ntohs(port));
3105 sib->sib_sid_mask = cpu_to_be64(~0ULL);
3108 id_priv->bind_list = bind_list;
3109 hlist_add_head(&id_priv->node, &bind_list->owners);
3112 static int cma_alloc_port(enum rdma_port_space ps,
3113 struct rdma_id_private *id_priv, unsigned short snum)
3115 struct rdma_bind_list *bind_list;
3118 bind_list = kzalloc(sizeof *bind_list, GFP_KERNEL);
3122 ret = cma_ps_alloc(id_priv->id.route.addr.dev_addr.net, ps, bind_list,
3128 bind_list->port = (unsigned short)ret;
3129 cma_bind_port(bind_list, id_priv);
3133 return ret == -ENOSPC ? -EADDRNOTAVAIL : ret;
3136 static int cma_alloc_any_port(enum rdma_port_space ps,
3137 struct rdma_id_private *id_priv)
3139 static unsigned int last_used_port;
3140 int low, high, remaining;
3142 struct vnet *net = id_priv->id.route.addr.dev_addr.net;
3145 inet_get_local_port_range(net, &low, &high);
3146 remaining = (high - low) + 1;
3147 get_random_bytes(&rand, sizeof(rand));
3148 rover = rand % remaining + low;
3150 if (last_used_port != rover &&
3151 !cma_ps_find(net, ps, (unsigned short)rover)) {
3152 int ret = cma_alloc_port(ps, id_priv, rover);
3154 * Remember previously used port number in order to avoid
3155 * re-using same port immediately after it is closed.
3158 last_used_port = rover;
3159 if (ret != -EADDRNOTAVAIL)
3164 if ((rover < low) || (rover > high))
3168 return -EADDRNOTAVAIL;
3172 * Check that the requested port is available. This is called when trying to
3173 * bind to a specific port, or when trying to listen on a bound port. In
3174 * the latter case, the provided id_priv may already be on the bind_list, but
3175 * we still need to check that it's okay to start listening.
3177 static int cma_check_port(struct rdma_bind_list *bind_list,
3178 struct rdma_id_private *id_priv, uint8_t reuseaddr)
3180 struct rdma_id_private *cur_id;
3181 struct sockaddr *addr, *cur_addr;
3183 addr = cma_src_addr(id_priv);
3184 hlist_for_each_entry(cur_id, &bind_list->owners, node) {
3185 if (id_priv == cur_id)
3188 if ((cur_id->state != RDMA_CM_LISTEN) && reuseaddr &&
3192 cur_addr = cma_src_addr(cur_id);
3193 if (id_priv->afonly && cur_id->afonly &&
3194 (addr->sa_family != cur_addr->sa_family))
3197 if (cma_any_addr(addr) || cma_any_addr(cur_addr))
3198 return -EADDRNOTAVAIL;
3200 if (!cma_addr_cmp(addr, cur_addr))
3206 static int cma_use_port(enum rdma_port_space ps,
3207 struct rdma_id_private *id_priv)
3209 struct rdma_bind_list *bind_list;
3210 unsigned short snum;
3213 snum = ntohs(cma_port(cma_src_addr(id_priv)));
3214 if (snum < IPPORT_RESERVED &&
3215 priv_check(curthread, PRIV_NETINET_BINDANY) != 0)
3218 bind_list = cma_ps_find(id_priv->id.route.addr.dev_addr.net, ps, snum);
3220 ret = cma_alloc_port(ps, id_priv, snum);
3222 ret = cma_check_port(bind_list, id_priv, id_priv->reuseaddr);
3224 cma_bind_port(bind_list, id_priv);
3229 static int cma_bind_listen(struct rdma_id_private *id_priv)
3231 struct rdma_bind_list *bind_list = id_priv->bind_list;
3235 if (bind_list->owners.first->next)
3236 ret = cma_check_port(bind_list, id_priv, 0);
3237 mutex_unlock(&lock);
3241 static enum rdma_port_space cma_select_inet_ps(
3242 struct rdma_id_private *id_priv)
3244 switch (id_priv->id.ps) {
3250 return id_priv->id.ps;
3257 static enum rdma_port_space cma_select_ib_ps(struct rdma_id_private *id_priv)
3259 enum rdma_port_space ps = 0;
3260 struct sockaddr_ib *sib;
3261 u64 sid_ps, mask, sid;
3263 sib = (struct sockaddr_ib *) cma_src_addr(id_priv);
3264 mask = be64_to_cpu(sib->sib_sid_mask) & RDMA_IB_IP_PS_MASK;
3265 sid = be64_to_cpu(sib->sib_sid) & mask;
3267 if ((id_priv->id.ps == RDMA_PS_IB) && (sid == (RDMA_IB_IP_PS_IB & mask))) {
3268 sid_ps = RDMA_IB_IP_PS_IB;
3270 } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_TCP)) &&
3271 (sid == (RDMA_IB_IP_PS_TCP & mask))) {
3272 sid_ps = RDMA_IB_IP_PS_TCP;
3274 } else if (((id_priv->id.ps == RDMA_PS_IB) || (id_priv->id.ps == RDMA_PS_UDP)) &&
3275 (sid == (RDMA_IB_IP_PS_UDP & mask))) {
3276 sid_ps = RDMA_IB_IP_PS_UDP;
3281 sib->sib_sid = cpu_to_be64(sid_ps | ntohs(cma_port((struct sockaddr *) sib)));
3282 sib->sib_sid_mask = cpu_to_be64(RDMA_IB_IP_PS_MASK |
3283 be64_to_cpu(sib->sib_sid_mask));
3288 static int cma_get_port(struct rdma_id_private *id_priv)
3290 enum rdma_port_space ps;
3293 if (cma_family(id_priv) != AF_IB)
3294 ps = cma_select_inet_ps(id_priv);
3296 ps = cma_select_ib_ps(id_priv);
3298 return -EPROTONOSUPPORT;
3301 if (cma_any_port(cma_src_addr(id_priv)))
3302 ret = cma_alloc_any_port(ps, id_priv);
3304 ret = cma_use_port(ps, id_priv);
3305 mutex_unlock(&lock);
3310 static int cma_check_linklocal(struct rdma_dev_addr *dev_addr,
3311 struct sockaddr *addr)
3314 struct sockaddr_in6 sin6;
3316 if (addr->sa_family != AF_INET6)
3319 sin6 = *(struct sockaddr_in6 *)addr;
3321 if (IN6_IS_SCOPE_LINKLOCAL(&sin6.sin6_addr) ||
3322 IN6_IS_ADDR_MC_INTFACELOCAL(&sin6.sin6_addr)) {
3325 CURVNET_SET_QUIET(dev_addr->net);
3326 failure = sa6_recoverscope(&sin6) || sin6.sin6_scope_id == 0;
3329 /* check if IPv6 scope ID is not set */
3332 dev_addr->bound_dev_if = sin6.sin6_scope_id;
3338 int rdma_listen(struct rdma_cm_id *id, int backlog)
3340 struct rdma_id_private *id_priv;
3343 id_priv = container_of(id, struct rdma_id_private, id);
3344 if (id_priv->state == RDMA_CM_IDLE) {
3345 id->route.addr.src_addr.ss_family = AF_INET;
3346 ret = rdma_bind_addr(id, cma_src_addr(id_priv));
3351 if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN))
3354 if (id_priv->reuseaddr) {
3355 ret = cma_bind_listen(id_priv);
3360 id_priv->backlog = backlog;
3362 if (rdma_cap_ib_cm(id->device, 1)) {
3363 ret = cma_ib_listen(id_priv);
3366 } else if (rdma_cap_iw_cm(id->device, 1)) {
3367 ret = cma_iw_listen(id_priv, backlog);
3375 cma_listen_on_all(id_priv);
3379 id_priv->backlog = 0;
3380 cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND);
3383 EXPORT_SYMBOL(rdma_listen);
3385 int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)
3387 struct rdma_id_private *id_priv;
3390 if (addr->sa_family != AF_INET && addr->sa_family != AF_INET6 &&
3391 addr->sa_family != AF_IB)
3392 return -EAFNOSUPPORT;
3394 id_priv = container_of(id, struct rdma_id_private, id);
3395 if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND))
3398 ret = cma_check_linklocal(&id->route.addr.dev_addr, addr);
3402 memcpy(cma_src_addr(id_priv), addr, rdma_addr_size(addr));
3403 if (!cma_any_addr(addr)) {
3404 ret = cma_translate_addr(addr, &id->route.addr.dev_addr);
3408 ret = cma_acquire_dev(id_priv, NULL);
3413 if (!(id_priv->options & (1 << CMA_OPTION_AFONLY))) {
3414 if (addr->sa_family == AF_INET)
3415 id_priv->afonly = 1;
3417 else if (addr->sa_family == AF_INET6) {
3418 CURVNET_SET_QUIET(id_priv->id.route.addr.dev_addr.net);
3419 id_priv->afonly = V_ip6_v6only;
3424 ret = cma_get_port(id_priv);
3430 if (id_priv->cma_dev)
3431 cma_release_dev(id_priv);
3433 cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE);
3436 EXPORT_SYMBOL(rdma_bind_addr);
3438 static int sdp_format_hdr(struct sdp_hh *sdp_hdr, struct rdma_id_private *id_priv)
3441 * XXXCEM: CMA just sets the version itself rather than relying on
3442 * passed in packet to have the major version set. Should we?
3444 if (sdp_get_majv(sdp_hdr->majv_minv) != SDP_MAJ_VERSION)
3447 if (cma_family(id_priv) == AF_INET) {
3448 struct sockaddr_in *src4, *dst4;
3450 src4 = (struct sockaddr_in *) cma_src_addr(id_priv);
3451 dst4 = (struct sockaddr_in *) cma_dst_addr(id_priv);
3453 sdp_set_ip_ver(sdp_hdr, 4);
3454 sdp_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr;
3455 sdp_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr;
3456 sdp_hdr->port = src4->sin_port;
3457 } else if (cma_family(id_priv) == AF_INET6) {
3458 struct sockaddr_in6 *src6, *dst6;
3460 src6 = (struct sockaddr_in6 *) cma_src_addr(id_priv);
3461 dst6 = (struct sockaddr_in6 *) cma_dst_addr(id_priv);
3463 sdp_set_ip_ver(sdp_hdr, 6);
3464 sdp_hdr->src_addr.ip6 = src6->sin6_addr;
3465 sdp_hdr->dst_addr.ip6 = dst6->sin6_addr;
3466 sdp_hdr->port = src6->sin6_port;
3467 cma_ip6_clear_scope_id(&sdp_hdr->src_addr.ip6);
3468 cma_ip6_clear_scope_id(&sdp_hdr->dst_addr.ip6);
3470 return -EAFNOSUPPORT;
3474 static int cma_format_hdr(void *hdr, struct rdma_id_private *id_priv)
3476 struct cma_hdr *cma_hdr;
3478 if (id_priv->id.ps == RDMA_PS_SDP)
3479 return sdp_format_hdr(hdr, id_priv);
3482 cma_hdr->cma_version = CMA_VERSION;
3483 if (cma_family(id_priv) == AF_INET) {
3484 struct sockaddr_in *src4, *dst4;
3486 src4 = (struct sockaddr_in *) cma_src_addr(id_priv);
3487 dst4 = (struct sockaddr_in *) cma_dst_addr(id_priv);
3489 cma_set_ip_ver(cma_hdr, 4);
3490 cma_hdr->src_addr.ip4.addr = src4->sin_addr.s_addr;
3491 cma_hdr->dst_addr.ip4.addr = dst4->sin_addr.s_addr;
3492 cma_hdr->port = src4->sin_port;
3493 } else if (cma_family(id_priv) == AF_INET6) {
3494 struct sockaddr_in6 *src6, *dst6;
3496 src6 = (struct sockaddr_in6 *) cma_src_addr(id_priv);
3497 dst6 = (struct sockaddr_in6 *) cma_dst_addr(id_priv);
3499 cma_set_ip_ver(cma_hdr, 6);
3500 cma_hdr->src_addr.ip6 = src6->sin6_addr;
3501 cma_hdr->dst_addr.ip6 = dst6->sin6_addr;
3502 cma_hdr->port = src6->sin6_port;
3503 cma_ip6_clear_scope_id(&cma_hdr->src_addr.ip6);
3504 cma_ip6_clear_scope_id(&cma_hdr->dst_addr.ip6);
3509 static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,
3510 struct ib_cm_event *ib_event)
3512 struct rdma_id_private *id_priv = cm_id->context;
3513 struct rdma_cm_event event;
3514 struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;
3517 mutex_lock(&id_priv->handler_mutex);
3518 if (id_priv->state != RDMA_CM_CONNECT)
3521 memset(&event, 0, sizeof event);
3522 switch (ib_event->event) {
3523 case IB_CM_SIDR_REQ_ERROR:
3524 event.event = RDMA_CM_EVENT_UNREACHABLE;
3525 event.status = -ETIMEDOUT;
3527 case IB_CM_SIDR_REP_RECEIVED:
3528 event.param.ud.private_data = ib_event->private_data;
3529 event.param.ud.private_data_len = IB_CM_SIDR_REP_PRIVATE_DATA_SIZE;
3530 if (rep->status != IB_SIDR_SUCCESS) {
3531 event.event = RDMA_CM_EVENT_UNREACHABLE;
3532 event.status = ib_event->param.sidr_rep_rcvd.status;
3535 ret = cma_set_qkey(id_priv, rep->qkey);
3537 event.event = RDMA_CM_EVENT_ADDR_ERROR;
3541 ret = ib_init_ah_from_path(id_priv->id.device,
3542 id_priv->id.port_num,
3543 id_priv->id.route.path_rec,
3544 &event.param.ud.ah_attr);
3546 event.event = RDMA_CM_EVENT_ADDR_ERROR;
3550 event.param.ud.qp_num = rep->qpn;
3551 event.param.ud.qkey = rep->qkey;
3552 event.event = RDMA_CM_EVENT_ESTABLISHED;
3556 pr_err("RDMA CMA: unexpected IB CM event: %d\n",
3561 ret = id_priv->id.event_handler(&id_priv->id, &event);
3563 /* Destroy the CM ID by returning a non-zero value. */
3564 id_priv->cm_id.ib = NULL;
3565 cma_exch(id_priv, RDMA_CM_DESTROYING);
3566 mutex_unlock(&id_priv->handler_mutex);
3567 rdma_destroy_id(&id_priv->id);
3571 mutex_unlock(&id_priv->handler_mutex);
3575 static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
3576 struct rdma_conn_param *conn_param)
3578 struct ib_cm_sidr_req_param req;
3579 struct ib_cm_id *id;
3583 memset(&req, 0, sizeof req);
3584 offset = cma_user_data_offset(id_priv);
3585 req.private_data_len = offset + conn_param->private_data_len;
3586 if (req.private_data_len < conn_param->private_data_len)
3589 if (req.private_data_len) {
3590 private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
3594 private_data = NULL;
3597 if (conn_param->private_data && conn_param->private_data_len)
3598 memcpy((char *)private_data + offset, conn_param->private_data,
3599 conn_param->private_data_len);
3602 ret = cma_format_hdr(private_data, id_priv);
3605 req.private_data = private_data;
3608 id = ib_create_cm_id(id_priv->id.device, cma_sidr_rep_handler,
3614 id_priv->cm_id.ib = id;
3616 req.path = id_priv->id.route.path_rec;
3617 req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
3618 req.timeout_ms = 1 << (CMA_CM_RESPONSE_TIMEOUT - 8);
3619 req.max_cm_retries = CMA_MAX_CM_RETRIES;
3621 ret = ib_send_cm_sidr_req(id_priv->cm_id.ib, &req);
3623 ib_destroy_cm_id(id_priv->cm_id.ib);
3624 id_priv->cm_id.ib = NULL;
3627 kfree(private_data);
3631 static int cma_connect_ib(struct rdma_id_private *id_priv,
3632 struct rdma_conn_param *conn_param)
3634 struct ib_cm_req_param req;
3635 struct rdma_route *route;
3637 struct ib_cm_id *id;
3640 memset(&req, 0, sizeof req);
3641 offset = cma_user_data_offset(id_priv);
3642 req.private_data_len = offset + conn_param->private_data_len;
3643 if (req.private_data_len < conn_param->private_data_len)
3646 if (req.private_data_len) {
3647 private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
3651 private_data = NULL;
3654 if (conn_param->private_data && conn_param->private_data_len)
3655 memcpy((char *)private_data + offset, conn_param->private_data,
3656 conn_param->private_data_len);
3658 id = ib_create_cm_id(id_priv->id.device, cma_ib_handler, id_priv);
3663 id_priv->cm_id.ib = id;
3665 route = &id_priv->id.route;
3667 ret = cma_format_hdr(private_data, id_priv);
3670 req.private_data = private_data;
3673 req.primary_path = &route->path_rec[0];
3674 if (route->num_paths == 2)
3675 req.alternate_path = &route->path_rec[1];
3677 req.service_id = rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
3678 req.qp_num = id_priv->qp_num;
3679 req.qp_type = id_priv->id.qp_type;
3680 req.starting_psn = id_priv->seq_num;
3681 req.responder_resources = conn_param->responder_resources;
3682 req.initiator_depth = conn_param->initiator_depth;
3683 req.flow_control = conn_param->flow_control;
3684 req.retry_count = min_t(u8, 7, conn_param->retry_count);
3685 req.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count);
3686 req.remote_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
3687 req.local_cm_response_timeout = CMA_CM_RESPONSE_TIMEOUT;
3688 req.max_cm_retries = CMA_MAX_CM_RETRIES;
3689 req.srq = id_priv->srq ? 1 : 0;
3691 ret = ib_send_cm_req(id_priv->cm_id.ib, &req);
3693 if (ret && !IS_ERR(id)) {
3694 ib_destroy_cm_id(id);
3695 id_priv->cm_id.ib = NULL;
3698 kfree(private_data);
3702 static int cma_connect_iw(struct rdma_id_private *id_priv,
3703 struct rdma_conn_param *conn_param)
3705 struct iw_cm_id *cm_id;
3707 struct iw_cm_conn_param iw_param;
3709 cm_id = iw_create_cm_id(id_priv->id.device, cma_iw_handler, id_priv);
3711 return PTR_ERR(cm_id);
3713 cm_id->tos = id_priv->tos;
3714 id_priv->cm_id.iw = cm_id;
3716 memcpy(&cm_id->local_addr, cma_src_addr(id_priv),
3717 rdma_addr_size(cma_src_addr(id_priv)));
3718 memcpy(&cm_id->remote_addr, cma_dst_addr(id_priv),
3719 rdma_addr_size(cma_dst_addr(id_priv)));
3721 ret = cma_modify_qp_rtr(id_priv, conn_param);
3726 iw_param.ord = conn_param->initiator_depth;
3727 iw_param.ird = conn_param->responder_resources;
3728 iw_param.private_data = conn_param->private_data;
3729 iw_param.private_data_len = conn_param->private_data_len;
3730 iw_param.qpn = id_priv->id.qp ? id_priv->qp_num : conn_param->qp_num;
3732 memset(&iw_param, 0, sizeof iw_param);
3733 iw_param.qpn = id_priv->qp_num;
3735 ret = iw_cm_connect(cm_id, &iw_param);
3738 iw_destroy_cm_id(cm_id);
3739 id_priv->cm_id.iw = NULL;
3744 int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
3746 struct rdma_id_private *id_priv;
3749 id_priv = container_of(id, struct rdma_id_private, id);
3750 if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT))
3754 id_priv->qp_num = conn_param->qp_num;
3755 id_priv->srq = conn_param->srq;
3758 if (rdma_cap_ib_cm(id->device, id->port_num)) {
3759 if (id->qp_type == IB_QPT_UD)
3760 ret = cma_resolve_ib_udp(id_priv, conn_param);
3762 ret = cma_connect_ib(id_priv, conn_param);
3763 } else if (rdma_cap_iw_cm(id->device, id->port_num))
3764 ret = cma_connect_iw(id_priv, conn_param);
3772 cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED);
3775 EXPORT_SYMBOL(rdma_connect);
3777 static int cma_accept_ib(struct rdma_id_private *id_priv,
3778 struct rdma_conn_param *conn_param)
3780 struct ib_cm_rep_param rep;
3783 ret = cma_modify_qp_rtr(id_priv, conn_param);
3787 ret = cma_modify_qp_rts(id_priv, conn_param);
3791 memset(&rep, 0, sizeof rep);
3792 rep.qp_num = id_priv->qp_num;
3793 rep.starting_psn = id_priv->seq_num;
3794 rep.private_data = conn_param->private_data;
3795 rep.private_data_len = conn_param->private_data_len;
3796 rep.responder_resources = conn_param->responder_resources;
3797 rep.initiator_depth = conn_param->initiator_depth;
3798 rep.failover_accepted = 0;
3799 rep.flow_control = conn_param->flow_control;
3800 rep.rnr_retry_count = min_t(u8, 7, conn_param->rnr_retry_count);
3801 rep.srq = id_priv->srq ? 1 : 0;
3803 ret = ib_send_cm_rep(id_priv->cm_id.ib, &rep);
3808 static int cma_accept_iw(struct rdma_id_private *id_priv,
3809 struct rdma_conn_param *conn_param)
3811 struct iw_cm_conn_param iw_param;
3814 ret = cma_modify_qp_rtr(id_priv, conn_param);
3818 iw_param.ord = conn_param->initiator_depth;
3819 iw_param.ird = conn_param->responder_resources;
3820 iw_param.private_data = conn_param->private_data;
3821 iw_param.private_data_len = conn_param->private_data_len;
3822 if (id_priv->id.qp) {
3823 iw_param.qpn = id_priv->qp_num;
3825 iw_param.qpn = conn_param->qp_num;
3827 return iw_cm_accept(id_priv->cm_id.iw, &iw_param);
3830 static int cma_send_sidr_rep(struct rdma_id_private *id_priv,
3831 enum ib_cm_sidr_status status, u32 qkey,
3832 const void *private_data, int private_data_len)
3834 struct ib_cm_sidr_rep_param rep;
3837 memset(&rep, 0, sizeof rep);
3838 rep.status = status;
3839 if (status == IB_SIDR_SUCCESS) {
3840 ret = cma_set_qkey(id_priv, qkey);
3843 rep.qp_num = id_priv->qp_num;
3844 rep.qkey = id_priv->qkey;
3846 rep.private_data = private_data;
3847 rep.private_data_len = private_data_len;
3849 return ib_send_cm_sidr_rep(id_priv->cm_id.ib, &rep);
3852 int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)
3854 struct rdma_id_private *id_priv;
3857 id_priv = container_of(id, struct rdma_id_private, id);
3859 id_priv->owner = task_pid_nr(current);
3861 if (!cma_comp(id_priv, RDMA_CM_CONNECT))
3864 if (!id->qp && conn_param) {
3865 id_priv->qp_num = conn_param->qp_num;
3866 id_priv->srq = conn_param->srq;
3869 if (rdma_cap_ib_cm(id->device, id->port_num)) {
3870 if (id->qp_type == IB_QPT_UD) {
3872 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
3874 conn_param->private_data,
3875 conn_param->private_data_len);
3877 ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,
3881 ret = cma_accept_ib(id_priv, conn_param);
3883 ret = cma_rep_recv(id_priv);
3885 } else if (rdma_cap_iw_cm(id->device, id->port_num))
3886 ret = cma_accept_iw(id_priv, conn_param);
3895 cma_modify_qp_err(id_priv);
3896 rdma_reject(id, NULL, 0);
3899 EXPORT_SYMBOL(rdma_accept);
3901 int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event)
3903 struct rdma_id_private *id_priv;
3906 id_priv = container_of(id, struct rdma_id_private, id);
3907 if (!id_priv->cm_id.ib)
3910 switch (id->device->node_type) {
3911 case RDMA_NODE_IB_CA:
3912 ret = ib_cm_notify(id_priv->cm_id.ib, event);
3920 EXPORT_SYMBOL(rdma_notify);
3922 int rdma_reject(struct rdma_cm_id *id, const void *private_data,
3923 u8 private_data_len)
3925 struct rdma_id_private *id_priv;
3928 id_priv = container_of(id, struct rdma_id_private, id);
3929 if (!id_priv->cm_id.ib)
3932 if (rdma_cap_ib_cm(id->device, id->port_num)) {
3933 if (id->qp_type == IB_QPT_UD)
3934 ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0,
3935 private_data, private_data_len);
3937 ret = ib_send_cm_rej(id_priv->cm_id.ib,
3938 IB_CM_REJ_CONSUMER_DEFINED, NULL,
3939 0, private_data, private_data_len);
3940 } else if (rdma_cap_iw_cm(id->device, id->port_num)) {
3941 ret = iw_cm_reject(id_priv->cm_id.iw,
3942 private_data, private_data_len);
3948 EXPORT_SYMBOL(rdma_reject);
3950 int rdma_disconnect(struct rdma_cm_id *id)
3952 struct rdma_id_private *id_priv;
3955 id_priv = container_of(id, struct rdma_id_private, id);
3956 if (!id_priv->cm_id.ib)
3959 if (rdma_cap_ib_cm(id->device, id->port_num)) {
3960 ret = cma_modify_qp_err(id_priv);
3963 /* Initiate or respond to a disconnect. */
3964 if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0))
3965 ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0);
3966 } else if (rdma_cap_iw_cm(id->device, id->port_num)) {
3967 ret = iw_cm_disconnect(id_priv->cm_id.iw, 0);
3974 EXPORT_SYMBOL(rdma_disconnect);
3976 static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)
3978 struct rdma_id_private *id_priv;
3979 struct cma_multicast *mc = multicast->context;
3980 struct rdma_cm_event event;
3983 id_priv = mc->id_priv;
3984 mutex_lock(&id_priv->handler_mutex);
3985 if (id_priv->state != RDMA_CM_ADDR_BOUND &&
3986 id_priv->state != RDMA_CM_ADDR_RESOLVED)
3990 status = cma_set_qkey(id_priv, be32_to_cpu(multicast->rec.qkey));
3991 mutex_lock(&id_priv->qp_mutex);
3992 if (!status && id_priv->id.qp)
3993 status = ib_attach_mcast(id_priv->id.qp, &multicast->rec.mgid,
3994 be16_to_cpu(multicast->rec.mlid));
3995 mutex_unlock(&id_priv->qp_mutex);
3997 memset(&event, 0, sizeof event);
3998 event.status = status;
3999 event.param.ud.private_data = mc->context;
4001 struct rdma_dev_addr *dev_addr =
4002 &id_priv->id.route.addr.dev_addr;
4003 struct net_device *ndev =
4004 dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
4005 enum ib_gid_type gid_type =
4006 id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
4007 rdma_start_port(id_priv->cma_dev->device)];
4009 event.event = RDMA_CM_EVENT_MULTICAST_JOIN;
4010 ret = ib_init_ah_from_mcmember(id_priv->id.device,
4011 id_priv->id.port_num,
4014 &event.param.ud.ah_attr);
4016 event.event = RDMA_CM_EVENT_MULTICAST_ERROR;
4018 event.param.ud.qp_num = 0xFFFFFF;
4019 event.param.ud.qkey = be32_to_cpu(multicast->rec.qkey);
4023 event.event = RDMA_CM_EVENT_MULTICAST_ERROR;
4025 ret = id_priv->id.event_handler(&id_priv->id, &event);
4027 cma_exch(id_priv, RDMA_CM_DESTROYING);
4028 mutex_unlock(&id_priv->handler_mutex);
4029 rdma_destroy_id(&id_priv->id);
4034 mutex_unlock(&id_priv->handler_mutex);
4038 static void cma_set_mgid(struct rdma_id_private *id_priv,
4039 struct sockaddr *addr, union ib_gid *mgid)
4041 unsigned char mc_map[MAX_ADDR_LEN];
4042 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
4043 struct sockaddr_in *sin = (struct sockaddr_in *) addr;
4044 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) addr;
4046 if (cma_any_addr(addr)) {
4047 memset(mgid, 0, sizeof *mgid);
4048 } else if ((addr->sa_family == AF_INET6) &&
4049 ((be32_to_cpu(sin6->sin6_addr.s6_addr32[0]) & 0xFFF0FFFF) ==
4051 /* IPv6 address is an SA assigned MGID. */
4052 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
4053 } else if (addr->sa_family == AF_IB) {
4054 memcpy(mgid, &((struct sockaddr_ib *) addr)->sib_addr, sizeof *mgid);
4055 } else if (addr->sa_family == AF_INET6) {
4056 ipv6_ib_mc_map(&sin6->sin6_addr, dev_addr->broadcast, mc_map);
4057 if (id_priv->id.ps == RDMA_PS_UDP)
4058 mc_map[7] = 0x01; /* Use RDMA CM signature */
4059 *mgid = *(union ib_gid *) (mc_map + 4);
4061 ip_ib_mc_map(sin->sin_addr.s_addr, dev_addr->broadcast, mc_map);
4062 if (id_priv->id.ps == RDMA_PS_UDP)
4063 mc_map[7] = 0x01; /* Use RDMA CM signature */
4064 *mgid = *(union ib_gid *) (mc_map + 4);
4068 static void cma_query_sa_classport_info_cb(int status,
4069 struct ib_class_port_info *rec,
4072 struct class_port_info_context *cb_ctx = context;
4076 if (status || !rec) {
4077 pr_debug("RDMA CM: %s port %u failed query ClassPortInfo status: %d\n",
4078 cb_ctx->device->name, cb_ctx->port_num, status);
4082 memcpy(cb_ctx->class_port_info, rec, sizeof(struct ib_class_port_info));
4085 complete(&cb_ctx->done);
4088 static int cma_query_sa_classport_info(struct ib_device *device, u8 port_num,
4089 struct ib_class_port_info *class_port_info)
4091 struct class_port_info_context *cb_ctx;
4094 cb_ctx = kmalloc(sizeof(*cb_ctx), GFP_KERNEL);
4098 cb_ctx->device = device;
4099 cb_ctx->class_port_info = class_port_info;
4100 cb_ctx->port_num = port_num;
4101 init_completion(&cb_ctx->done);
4103 ret = ib_sa_classport_info_rec_query(&sa_client, device, port_num,
4104 CMA_QUERY_CLASSPORT_INFO_TIMEOUT,
4105 GFP_KERNEL, cma_query_sa_classport_info_cb,
4106 cb_ctx, &cb_ctx->sa_query);
4108 pr_err("RDMA CM: %s port %u failed to send ClassPortInfo query, ret: %d\n",
4109 device->name, port_num, ret);
4113 wait_for_completion(&cb_ctx->done);
4120 static int cma_join_ib_multicast(struct rdma_id_private *id_priv,
4121 struct cma_multicast *mc)
4123 struct ib_sa_mcmember_rec rec;
4124 struct ib_class_port_info class_port_info;
4125 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
4126 ib_sa_comp_mask comp_mask;
4129 ib_addr_get_mgid(dev_addr, &rec.mgid);
4130 ret = ib_sa_get_mcmember_rec(id_priv->id.device, id_priv->id.port_num,
4135 ret = cma_set_qkey(id_priv, 0);
4139 cma_set_mgid(id_priv, (struct sockaddr *) &mc->addr, &rec.mgid);
4140 rec.qkey = cpu_to_be32(id_priv->qkey);
4141 rdma_addr_get_sgid(dev_addr, &rec.port_gid);
4142 rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
4143 rec.join_state = mc->join_state;
4145 if (rec.join_state == BIT(SENDONLY_FULLMEMBER_JOIN)) {
4146 ret = cma_query_sa_classport_info(id_priv->id.device,
4147 id_priv->id.port_num,
4153 if (!(ib_get_cpi_capmask2(&class_port_info) &
4154 IB_SA_CAP_MASK2_SENDONLY_FULL_MEM_SUPPORT)) {
4155 pr_warn("RDMA CM: %s port %u Unable to multicast join\n"
4156 "RDMA CM: SM doesn't support Send Only Full Member option\n",
4157 id_priv->id.device->name, id_priv->id.port_num);
4162 comp_mask = IB_SA_MCMEMBER_REC_MGID | IB_SA_MCMEMBER_REC_PORT_GID |
4163 IB_SA_MCMEMBER_REC_PKEY | IB_SA_MCMEMBER_REC_JOIN_STATE |
4164 IB_SA_MCMEMBER_REC_QKEY | IB_SA_MCMEMBER_REC_SL |
4165 IB_SA_MCMEMBER_REC_FLOW_LABEL |
4166 IB_SA_MCMEMBER_REC_TRAFFIC_CLASS;
4168 if (id_priv->id.ps == RDMA_PS_IPOIB)
4169 comp_mask |= IB_SA_MCMEMBER_REC_RATE |
4170 IB_SA_MCMEMBER_REC_RATE_SELECTOR |
4171 IB_SA_MCMEMBER_REC_MTU_SELECTOR |
4172 IB_SA_MCMEMBER_REC_MTU |
4173 IB_SA_MCMEMBER_REC_HOP_LIMIT;
4175 mc->multicast.ib = ib_sa_join_multicast(&sa_client, id_priv->id.device,
4176 id_priv->id.port_num, &rec,
4177 comp_mask, GFP_KERNEL,
4178 cma_ib_mc_handler, mc);
4179 return PTR_ERR_OR_ZERO(mc->multicast.ib);
4182 static void iboe_mcast_work_handler(struct work_struct *work)
4184 struct iboe_mcast_work *mw = container_of(work, struct iboe_mcast_work, work);
4185 struct cma_multicast *mc = mw->mc;
4186 struct ib_sa_multicast *m = mc->multicast.ib;
4188 mc->multicast.ib->context = mc;
4189 cma_ib_mc_handler(0, m);
4190 kref_put(&mc->mcref, release_mc);
4194 static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid,
4195 enum ib_gid_type gid_type)
4197 struct sockaddr_in *sin = (struct sockaddr_in *)addr;
4198 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr;
4200 if (cma_any_addr(addr)) {
4201 memset(mgid, 0, sizeof *mgid);
4202 } else if (addr->sa_family == AF_INET6) {
4203 memcpy(mgid, &sin6->sin6_addr, sizeof *mgid);
4206 (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? 0 : 0xff;
4208 (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) ? 0 : 0x0e;
4217 mgid->raw[10] = 0xff;
4218 mgid->raw[11] = 0xff;
4219 *(__be32 *)(&mgid->raw[12]) = sin->sin_addr.s_addr;
4223 static int cma_iboe_join_multicast(struct rdma_id_private *id_priv,
4224 struct cma_multicast *mc)
4226 struct iboe_mcast_work *work;
4227 struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr;
4229 struct sockaddr *addr = (struct sockaddr *)&mc->addr;
4230 struct net_device *ndev = NULL;
4231 enum ib_gid_type gid_type;
4234 send_only = mc->join_state == BIT(SENDONLY_FULLMEMBER_JOIN);
4236 if (cma_zero_addr((struct sockaddr *)&mc->addr))
4239 work = kzalloc(sizeof *work, GFP_KERNEL);
4243 mc->multicast.ib = kzalloc(sizeof(struct ib_sa_multicast), GFP_KERNEL);
4244 if (!mc->multicast.ib) {
4249 gid_type = id_priv->cma_dev->default_gid_type[id_priv->id.port_num -
4250 rdma_start_port(id_priv->cma_dev->device)];
4251 cma_iboe_set_mgid(addr, &mc->multicast.ib->rec.mgid, gid_type);
4253 mc->multicast.ib->rec.pkey = cpu_to_be16(0xffff);
4254 if (id_priv->id.ps == RDMA_PS_UDP)
4255 mc->multicast.ib->rec.qkey = cpu_to_be32(RDMA_UDP_QKEY);
4257 if (dev_addr->bound_dev_if)
4258 ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if);
4263 mc->multicast.ib->rec.rate = iboe_get_rate(ndev);
4264 mc->multicast.ib->rec.hop_limit = 1;
4265 mc->multicast.ib->rec.mtu = iboe_get_mtu(ndev->if_mtu);
4267 if (addr->sa_family == AF_INET || addr->sa_family == AF_INET6) {
4268 if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) {
4269 mc->multicast.ib->rec.hop_limit = IPV6_DEFAULT_HOPLIMIT;
4271 err = cma_igmp_send(ndev, &mc->multicast.ib->rec.mgid,
4274 mc->igmp_joined = true;
4278 if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
4282 if (err || !mc->multicast.ib->rec.mtu) {
4287 rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr,
4288 &mc->multicast.ib->rec.port_gid);
4291 INIT_WORK(&work->work, iboe_mcast_work_handler);
4292 kref_get(&mc->mcref);
4293 queue_work(cma_wq, &work->work);
4298 kfree(mc->multicast.ib);
4304 int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,
4305 u8 join_state, void *context)
4307 struct rdma_id_private *id_priv;
4308 struct cma_multicast *mc;
4314 id_priv = container_of(id, struct rdma_id_private, id);
4315 if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) &&
4316 !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED))
4319 mc = kmalloc(sizeof *mc, GFP_KERNEL);
4323 memcpy(&mc->addr, addr, rdma_addr_size(addr));
4324 mc->context = context;
4325 mc->id_priv = id_priv;
4326 mc->igmp_joined = false;
4327 mc->join_state = join_state;
4328 spin_lock(&id_priv->lock);
4329 list_add(&mc->list, &id_priv->mc_list);
4330 spin_unlock(&id_priv->lock);
4332 if (rdma_protocol_roce(id->device, id->port_num)) {
4333 kref_init(&mc->mcref);
4334 ret = cma_iboe_join_multicast(id_priv, mc);
4335 } else if (rdma_cap_ib_mcast(id->device, id->port_num))
4336 ret = cma_join_ib_multicast(id_priv, mc);
4341 spin_lock_irq(&id_priv->lock);
4342 list_del(&mc->list);
4343 spin_unlock_irq(&id_priv->lock);
4348 EXPORT_SYMBOL(rdma_join_multicast);
4350 void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr)
4352 struct rdma_id_private *id_priv;
4353 struct cma_multicast *mc;
4355 id_priv = container_of(id, struct rdma_id_private, id);
4356 spin_lock_irq(&id_priv->lock);
4357 list_for_each_entry(mc, &id_priv->mc_list, list) {
4358 if (!memcmp(&mc->addr, addr, rdma_addr_size(addr))) {
4359 list_del(&mc->list);
4360 spin_unlock_irq(&id_priv->lock);
4363 ib_detach_mcast(id->qp,
4364 &mc->multicast.ib->rec.mgid,
4365 be16_to_cpu(mc->multicast.ib->rec.mlid));
4367 BUG_ON(id_priv->cma_dev->device != id->device);
4369 if (rdma_cap_ib_mcast(id->device, id->port_num)) {
4370 ib_sa_free_multicast(mc->multicast.ib);
4372 } else if (rdma_protocol_roce(id->device, id->port_num)) {
4373 if (mc->igmp_joined) {
4374 struct rdma_dev_addr *dev_addr =
4375 &id->route.addr.dev_addr;
4376 struct net_device *ndev = NULL;
4378 if (dev_addr->bound_dev_if)
4379 ndev = dev_get_by_index(dev_addr->net,
4380 dev_addr->bound_dev_if);
4383 &mc->multicast.ib->rec.mgid,
4387 mc->igmp_joined = false;
4389 kref_put(&mc->mcref, release_mc);
4394 spin_unlock_irq(&id_priv->lock);
4396 EXPORT_SYMBOL(rdma_leave_multicast);
4399 sysctl_cma_default_roce_mode(SYSCTL_HANDLER_ARGS)
4401 struct cma_device *cma_dev = arg1;
4402 const int port = arg2;
4406 strlcpy(buf, ib_cache_gid_type_str(
4407 cma_get_default_gid_type(cma_dev, port)), sizeof(buf));
4409 error = sysctl_handle_string(oidp, buf, sizeof(buf), req);
4410 if (error != 0 || req->newptr == NULL)
4413 error = ib_cache_gid_parse_type_str(buf);
4419 cma_set_default_gid_type(cma_dev, port, error);
4425 static void cma_add_one(struct ib_device *device)
4427 struct cma_device *cma_dev;
4428 struct rdma_id_private *id_priv;
4431 cma_dev = kmalloc(sizeof *cma_dev, GFP_KERNEL);
4435 sysctl_ctx_init(&cma_dev->sysctl_ctx);
4437 cma_dev->device = device;
4438 cma_dev->default_gid_type = kcalloc(device->phys_port_cnt,
4439 sizeof(*cma_dev->default_gid_type),
4441 if (!cma_dev->default_gid_type) {
4445 for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
4446 unsigned long supported_gids;
4447 unsigned int default_gid_type;
4449 supported_gids = roce_gid_type_mask_support(device, i);
4451 if (WARN_ON(!supported_gids)) {
4452 /* set something valid */
4453 default_gid_type = 0;
4454 } else if (test_bit(IB_GID_TYPE_ROCE_UDP_ENCAP, &supported_gids)) {
4455 /* prefer RoCEv2, if supported */
4456 default_gid_type = IB_GID_TYPE_ROCE_UDP_ENCAP;
4458 default_gid_type = find_first_bit(&supported_gids,
4461 cma_dev->default_gid_type[i - rdma_start_port(device)] =
4465 init_completion(&cma_dev->comp);
4466 atomic_set(&cma_dev->refcount, 1);
4467 INIT_LIST_HEAD(&cma_dev->id_list);
4468 ib_set_client_data(device, &cma_client, cma_dev);
4471 list_add_tail(&cma_dev->list, &dev_list);
4472 list_for_each_entry(id_priv, &listen_any_list, list)
4473 cma_listen_on_dev(id_priv, cma_dev);
4474 mutex_unlock(&lock);
4476 for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) {
4479 snprintf(buf, sizeof(buf), "default_roce_mode_port%d", i);
4481 (void) SYSCTL_ADD_PROC(&cma_dev->sysctl_ctx,
4482 SYSCTL_CHILDREN(device->ports_parent->parent->oidp),
4483 OID_AUTO, buf, CTLTYPE_STRING | CTLFLAG_RWTUN | CTLFLAG_MPSAFE,
4484 cma_dev, i, &sysctl_cma_default_roce_mode, "A",
4485 "Default RoCE mode. Valid values: IB/RoCE v1 and RoCE v2");
4489 static int cma_remove_id_dev(struct rdma_id_private *id_priv)
4491 struct rdma_cm_event event;
4492 enum rdma_cm_state state;
4495 /* Record that we want to remove the device */
4496 state = cma_exch(id_priv, RDMA_CM_DEVICE_REMOVAL);
4497 if (state == RDMA_CM_DESTROYING)
4500 cma_cancel_operation(id_priv, state);
4501 mutex_lock(&id_priv->handler_mutex);
4503 /* Check for destruction from another callback. */
4504 if (!cma_comp(id_priv, RDMA_CM_DEVICE_REMOVAL))
4507 memset(&event, 0, sizeof event);
4508 event.event = RDMA_CM_EVENT_DEVICE_REMOVAL;
4509 ret = id_priv->id.event_handler(&id_priv->id, &event);
4511 mutex_unlock(&id_priv->handler_mutex);
4515 static void cma_process_remove(struct cma_device *cma_dev)
4517 struct rdma_id_private *id_priv;
4521 while (!list_empty(&cma_dev->id_list)) {
4522 id_priv = list_entry(cma_dev->id_list.next,
4523 struct rdma_id_private, list);
4525 list_del(&id_priv->listen_list);
4526 list_del_init(&id_priv->list);
4527 atomic_inc(&id_priv->refcount);
4528 mutex_unlock(&lock);
4530 ret = id_priv->internal_id ? 1 : cma_remove_id_dev(id_priv);
4531 cma_deref_id(id_priv);
4533 rdma_destroy_id(&id_priv->id);
4537 mutex_unlock(&lock);
4539 cma_deref_dev(cma_dev);
4540 wait_for_completion(&cma_dev->comp);
4543 static void cma_remove_one(struct ib_device *device, void *client_data)
4545 struct cma_device *cma_dev = client_data;
4551 list_del(&cma_dev->list);
4552 mutex_unlock(&lock);
4554 cma_process_remove(cma_dev);
4555 sysctl_ctx_free(&cma_dev->sysctl_ctx);
4556 kfree(cma_dev->default_gid_type);
4560 static void cma_init_vnet(void *arg)
4562 struct cma_pernet *pernet = &VNET(cma_pernet);
4564 idr_init(&pernet->tcp_ps);
4565 idr_init(&pernet->udp_ps);
4566 idr_init(&pernet->ipoib_ps);
4567 idr_init(&pernet->ib_ps);
4568 idr_init(&pernet->sdp_ps);
4570 VNET_SYSINIT(cma_init_vnet, SI_SUB_OFED_MODINIT - 1, SI_ORDER_FIRST, cma_init_vnet, NULL);
4572 static void cma_destroy_vnet(void *arg)
4574 struct cma_pernet *pernet = &VNET(cma_pernet);
4576 idr_destroy(&pernet->tcp_ps);
4577 idr_destroy(&pernet->udp_ps);
4578 idr_destroy(&pernet->ipoib_ps);
4579 idr_destroy(&pernet->ib_ps);
4580 idr_destroy(&pernet->sdp_ps);
4582 VNET_SYSUNINIT(cma_destroy_vnet, SI_SUB_OFED_MODINIT - 1, SI_ORDER_SECOND, cma_destroy_vnet, NULL);
4584 static int __init cma_init(void)
4588 cma_wq = alloc_ordered_workqueue("rdma_cm", WQ_MEM_RECLAIM);
4592 ib_sa_register_client(&sa_client);
4593 rdma_addr_register_client(&addr_client);
4595 ret = ib_register_client(&cma_client);
4599 cma_configfs_init();
4604 rdma_addr_unregister_client(&addr_client);
4605 ib_sa_unregister_client(&sa_client);
4606 destroy_workqueue(cma_wq);
4610 static void __exit cma_cleanup(void)
4612 cma_configfs_exit();
4613 ib_unregister_client(&cma_client);
4614 rdma_addr_unregister_client(&addr_client);
4615 ib_sa_unregister_client(&sa_client);
4616 destroy_workqueue(cma_wq);
4619 module_init_order(cma_init, SI_ORDER_FOURTH);
4620 module_exit_order(cma_cleanup, SI_ORDER_FOURTH);