2 * SPDX-License-Identifier: BSD-2-Clause OR GPL-2.0
4 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #include <linux/completion.h>
38 #include <linux/file.h>
39 #include <linux/mutex.h>
40 #include <linux/poll.h>
41 #include <linux/sched.h>
42 #include <linux/idr.h>
44 #include <linux/in6.h>
45 #include <linux/miscdevice.h>
46 #include <linux/slab.h>
47 #include <linux/module.h>
49 #include <sys/filio.h>
51 #include <rdma/rdma_user_cm.h>
52 #include <rdma/ib_marshall.h>
53 #include <rdma/rdma_cm.h>
54 #include <rdma/rdma_cm_ib.h>
55 #include <rdma/ib_addr.h>
58 MODULE_AUTHOR("Sean Hefty");
59 MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
60 MODULE_LICENSE("Dual BSD/GPL");
62 static unsigned int max_backlog = 1024;
67 struct list_head ctx_list;
68 struct list_head event_list;
69 wait_queue_head_t poll_wait;
70 struct workqueue_struct *close_wq;
75 struct completion comp;
80 struct ucma_file *file;
81 struct rdma_cm_id *cm_id;
84 struct list_head list;
85 struct list_head mc_list;
86 /* mark that device is in process of destroying the internal HW
87 * resources, protected by the global mut
90 /* sync between removal event and id destroy, protected by file mut */
92 struct work_struct close_work;
95 struct ucma_multicast {
96 struct ucma_context *ctx;
102 struct list_head list;
103 struct sockaddr_storage addr;
107 struct ucma_context *ctx;
108 struct ucma_multicast *mc;
109 struct list_head list;
110 struct rdma_cm_id *cm_id;
111 struct rdma_ucm_event_resp resp;
112 struct work_struct close_work;
115 static DEFINE_MUTEX(mut);
116 static DEFINE_IDR(ctx_idr);
117 static DEFINE_IDR(multicast_idr);
119 static inline struct ucma_context *_ucma_find_context(int id,
120 struct ucma_file *file)
122 struct ucma_context *ctx;
124 ctx = idr_find(&ctx_idr, id);
126 ctx = ERR_PTR(-ENOENT);
127 else if (ctx->file != file)
128 ctx = ERR_PTR(-EINVAL);
132 static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
134 struct ucma_context *ctx;
137 ctx = _ucma_find_context(id, file);
142 atomic_inc(&ctx->ref);
148 static void ucma_put_ctx(struct ucma_context *ctx)
150 if (atomic_dec_and_test(&ctx->ref))
151 complete(&ctx->comp);
154 static void ucma_close_event_id(struct work_struct *work)
156 struct ucma_event *uevent_close = container_of(work, struct ucma_event, close_work);
158 rdma_destroy_id(uevent_close->cm_id);
162 static void ucma_close_id(struct work_struct *work)
164 struct ucma_context *ctx = container_of(work, struct ucma_context, close_work);
166 /* once all inflight tasks are finished, we close all underlying
167 * resources. The context is still alive till its explicit destryoing
171 wait_for_completion(&ctx->comp);
172 /* No new events will be generated after destroying the id. */
173 rdma_destroy_id(ctx->cm_id);
176 static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
178 struct ucma_context *ctx;
180 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
184 INIT_WORK(&ctx->close_work, ucma_close_id);
185 atomic_set(&ctx->ref, 1);
186 init_completion(&ctx->comp);
187 INIT_LIST_HEAD(&ctx->mc_list);
191 ctx->id = idr_alloc(&ctx_idr, ctx, 0, 0, GFP_KERNEL);
196 list_add_tail(&ctx->list, &file->ctx_list);
204 static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
206 struct ucma_multicast *mc;
208 mc = kzalloc(sizeof(*mc), GFP_KERNEL);
213 mc->id = idr_alloc(&multicast_idr, mc, 0, 0, GFP_KERNEL);
219 list_add_tail(&mc->list, &ctx->mc_list);
227 static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
228 struct rdma_conn_param *src)
230 if (src->private_data_len)
231 memcpy(dst->private_data, src->private_data,
232 src->private_data_len);
233 dst->private_data_len = src->private_data_len;
234 dst->responder_resources =src->responder_resources;
235 dst->initiator_depth = src->initiator_depth;
236 dst->flow_control = src->flow_control;
237 dst->retry_count = src->retry_count;
238 dst->rnr_retry_count = src->rnr_retry_count;
240 dst->qp_num = src->qp_num;
243 static void ucma_copy_ud_event(struct rdma_ucm_ud_param *dst,
244 struct rdma_ud_param *src)
246 if (src->private_data_len)
247 memcpy(dst->private_data, src->private_data,
248 src->private_data_len);
249 dst->private_data_len = src->private_data_len;
250 ib_copy_ah_attr_to_user(&dst->ah_attr, &src->ah_attr);
251 dst->qp_num = src->qp_num;
252 dst->qkey = src->qkey;
255 static void ucma_set_event_context(struct ucma_context *ctx,
256 struct rdma_cm_event *event,
257 struct ucma_event *uevent)
260 switch (event->event) {
261 case RDMA_CM_EVENT_MULTICAST_JOIN:
262 case RDMA_CM_EVENT_MULTICAST_ERROR:
263 uevent->mc = __DECONST(struct ucma_multicast *,
264 event->param.ud.private_data);
265 uevent->resp.uid = uevent->mc->uid;
266 uevent->resp.id = uevent->mc->id;
269 uevent->resp.uid = ctx->uid;
270 uevent->resp.id = ctx->id;
275 /* Called with file->mut locked for the relevant context. */
276 static void ucma_removal_event_handler(struct rdma_cm_id *cm_id)
278 struct ucma_context *ctx = cm_id->context;
279 struct ucma_event *con_req_eve;
285 /* only if context is pointing to cm_id that it owns it and can be
286 * queued to be closed, otherwise that cm_id is an inflight one that
287 * is part of that context event list pending to be detached and
288 * reattached to its new context as part of ucma_get_event,
289 * handled separately below.
291 if (ctx->cm_id == cm_id) {
295 queue_work(ctx->file->close_wq, &ctx->close_work);
299 list_for_each_entry(con_req_eve, &ctx->file->event_list, list) {
300 if (con_req_eve->cm_id == cm_id &&
301 con_req_eve->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
302 list_del(&con_req_eve->list);
303 INIT_WORK(&con_req_eve->close_work, ucma_close_event_id);
304 queue_work(ctx->file->close_wq, &con_req_eve->close_work);
310 pr_err("ucma_removal_event_handler: warning: connect request event wasn't found\n");
313 static int ucma_event_handler(struct rdma_cm_id *cm_id,
314 struct rdma_cm_event *event)
316 struct ucma_event *uevent;
317 struct ucma_context *ctx = cm_id->context;
320 uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
322 return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
324 mutex_lock(&ctx->file->mut);
325 uevent->cm_id = cm_id;
326 ucma_set_event_context(ctx, event, uevent);
327 uevent->resp.event = event->event;
328 uevent->resp.status = event->status;
329 if (cm_id->qp_type == IB_QPT_UD)
330 ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud);
332 ucma_copy_conn_event(&uevent->resp.param.conn,
335 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
342 } else if (!ctx->uid || ctx->cm_id != cm_id) {
344 * We ignore events for new connections until userspace has set
345 * their context. This can only happen if an error occurs on a
346 * new connection before the user accepts it. This is okay,
347 * since the accept will just fail later. However, we do need
348 * to release the underlying HW resources in case of a device
351 if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
352 ucma_removal_event_handler(cm_id);
358 list_add_tail(&uevent->list, &ctx->file->event_list);
359 wake_up_interruptible(&ctx->file->poll_wait);
360 if (event->event == RDMA_CM_EVENT_DEVICE_REMOVAL)
361 ucma_removal_event_handler(cm_id);
363 mutex_unlock(&ctx->file->mut);
367 static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
368 int in_len, int out_len)
370 struct ucma_context *ctx;
371 struct rdma_ucm_get_event cmd;
372 struct ucma_event *uevent;
375 if (out_len < sizeof uevent->resp)
378 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
381 mutex_lock(&file->mut);
382 while (list_empty(&file->event_list)) {
383 mutex_unlock(&file->mut);
385 if (file->filp->f_flags & O_NONBLOCK)
388 if (wait_event_interruptible(file->poll_wait,
389 !list_empty(&file->event_list)))
392 mutex_lock(&file->mut);
395 uevent = list_entry(file->event_list.next, struct ucma_event, list);
397 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
398 ctx = ucma_alloc_ctx(file);
403 uevent->ctx->backlog++;
404 ctx->cm_id = uevent->cm_id;
405 ctx->cm_id->context = ctx;
406 uevent->resp.id = ctx->id;
409 if (copy_to_user((void __user *)(unsigned long)cmd.response,
410 &uevent->resp, sizeof uevent->resp)) {
415 list_del(&uevent->list);
416 uevent->ctx->events_reported++;
418 uevent->mc->events_reported++;
421 mutex_unlock(&file->mut);
425 static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type)
429 *qp_type = IB_QPT_RC;
433 *qp_type = IB_QPT_UD;
436 *qp_type = cmd->qp_type;
443 static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf,
444 int in_len, int out_len)
446 struct rdma_ucm_create_id cmd;
447 struct rdma_ucm_create_id_resp resp;
448 struct ucma_context *ctx;
449 enum ib_qp_type qp_type;
452 if (out_len < sizeof(resp))
455 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
458 ret = ucma_get_qp_type(&cmd, &qp_type);
462 mutex_lock(&file->mut);
463 ctx = ucma_alloc_ctx(file);
464 mutex_unlock(&file->mut);
469 ctx->cm_id = rdma_create_id(TD_TO_VNET(curthread),
470 ucma_event_handler, ctx, cmd.ps, qp_type);
471 if (IS_ERR(ctx->cm_id)) {
472 ret = PTR_ERR(ctx->cm_id);
477 if (copy_to_user((void __user *)(unsigned long)cmd.response,
478 &resp, sizeof(resp))) {
485 rdma_destroy_id(ctx->cm_id);
488 idr_remove(&ctx_idr, ctx->id);
494 static void ucma_cleanup_multicast(struct ucma_context *ctx)
496 struct ucma_multicast *mc, *tmp;
499 list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
501 idr_remove(&multicast_idr, mc->id);
507 static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
509 struct ucma_event *uevent, *tmp;
511 list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) {
512 if (uevent->mc != mc)
515 list_del(&uevent->list);
521 * ucma_free_ctx is called after the underlying rdma CM-ID is destroyed. At
522 * this point, no new events will be reported from the hardware. However, we
523 * still need to cleanup the UCMA context for this ID. Specifically, there
524 * might be events that have not yet been consumed by the user space software.
525 * These might include pending connect requests which we have not completed
526 * processing. We cannot call rdma_destroy_id while holding the lock of the
527 * context (file->mut), as it might cause a deadlock. We therefore extract all
528 * relevant events from the context pending events list while holding the
529 * mutex. After that we release them as needed.
531 static int ucma_free_ctx(struct ucma_context *ctx)
534 struct ucma_event *uevent, *tmp;
538 ucma_cleanup_multicast(ctx);
540 /* Cleanup events not yet reported to the user. */
541 mutex_lock(&ctx->file->mut);
542 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
543 if (uevent->ctx == ctx)
544 list_move_tail(&uevent->list, &list);
546 list_del(&ctx->list);
547 mutex_unlock(&ctx->file->mut);
549 list_for_each_entry_safe(uevent, tmp, &list, list) {
550 list_del(&uevent->list);
551 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
552 rdma_destroy_id(uevent->cm_id);
556 events_reported = ctx->events_reported;
558 return events_reported;
561 static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
562 int in_len, int out_len)
564 struct rdma_ucm_destroy_id cmd;
565 struct rdma_ucm_destroy_id_resp resp;
566 struct ucma_context *ctx;
569 if (out_len < sizeof(resp))
572 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
576 ctx = _ucma_find_context(cmd.id, file);
578 idr_remove(&ctx_idr, ctx->id);
584 mutex_lock(&ctx->file->mut);
586 mutex_unlock(&ctx->file->mut);
588 flush_workqueue(ctx->file->close_wq);
589 /* At this point it's guaranteed that there is no inflight
595 wait_for_completion(&ctx->comp);
596 rdma_destroy_id(ctx->cm_id);
601 resp.events_reported = ucma_free_ctx(ctx);
602 if (copy_to_user((void __user *)(unsigned long)cmd.response,
603 &resp, sizeof(resp)))
609 static ssize_t ucma_bind_ip(struct ucma_file *file, const char __user *inbuf,
610 int in_len, int out_len)
612 struct rdma_ucm_bind_ip cmd;
613 struct ucma_context *ctx;
616 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
619 ctx = ucma_get_ctx(file, cmd.id);
623 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
628 static ssize_t ucma_bind(struct ucma_file *file, const char __user *inbuf,
629 int in_len, int out_len)
631 struct rdma_ucm_bind cmd;
632 struct sockaddr *addr;
633 struct ucma_context *ctx;
636 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
639 addr = (struct sockaddr *) &cmd.addr;
640 if (cmd.reserved || !cmd.addr_size || (cmd.addr_size != rdma_addr_size(addr)))
643 ctx = ucma_get_ctx(file, cmd.id);
647 ret = rdma_bind_addr(ctx->cm_id, addr);
652 static ssize_t ucma_resolve_ip(struct ucma_file *file,
653 const char __user *inbuf,
654 int in_len, int out_len)
656 struct rdma_ucm_resolve_ip cmd;
657 struct ucma_context *ctx;
660 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
663 ctx = ucma_get_ctx(file, cmd.id);
667 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
668 (struct sockaddr *) &cmd.dst_addr,
674 static ssize_t ucma_resolve_addr(struct ucma_file *file,
675 const char __user *inbuf,
676 int in_len, int out_len)
678 struct rdma_ucm_resolve_addr cmd;
679 struct sockaddr *src, *dst;
680 struct ucma_context *ctx;
683 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
686 src = (struct sockaddr *) &cmd.src_addr;
687 dst = (struct sockaddr *) &cmd.dst_addr;
688 if (cmd.reserved || (cmd.src_size && (cmd.src_size != rdma_addr_size(src))) ||
689 !cmd.dst_size || (cmd.dst_size != rdma_addr_size(dst)))
692 ctx = ucma_get_ctx(file, cmd.id);
696 ret = rdma_resolve_addr(ctx->cm_id, src, dst, cmd.timeout_ms);
701 static ssize_t ucma_resolve_route(struct ucma_file *file,
702 const char __user *inbuf,
703 int in_len, int out_len)
705 struct rdma_ucm_resolve_route cmd;
706 struct ucma_context *ctx;
709 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
712 ctx = ucma_get_ctx(file, cmd.id);
716 ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
721 static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
722 struct rdma_route *route)
724 struct rdma_dev_addr *dev_addr;
726 resp->num_paths = route->num_paths;
727 switch (route->num_paths) {
729 dev_addr = &route->addr.dev_addr;
730 rdma_addr_get_dgid(dev_addr,
731 (union ib_gid *) &resp->ib_route[0].dgid);
732 rdma_addr_get_sgid(dev_addr,
733 (union ib_gid *) &resp->ib_route[0].sgid);
734 resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
737 ib_copy_path_rec_to_user(&resp->ib_route[1],
738 &route->path_rec[1]);
741 ib_copy_path_rec_to_user(&resp->ib_route[0],
742 &route->path_rec[0]);
749 static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
750 struct rdma_route *route)
753 resp->num_paths = route->num_paths;
754 switch (route->num_paths) {
756 rdma_ip2gid((struct sockaddr *)&route->addr.dst_addr,
757 (union ib_gid *)&resp->ib_route[0].dgid);
758 rdma_ip2gid((struct sockaddr *)&route->addr.src_addr,
759 (union ib_gid *)&resp->ib_route[0].sgid);
760 resp->ib_route[0].pkey = cpu_to_be16(0xffff);
763 ib_copy_path_rec_to_user(&resp->ib_route[1],
764 &route->path_rec[1]);
767 ib_copy_path_rec_to_user(&resp->ib_route[0],
768 &route->path_rec[0]);
775 static void ucma_copy_iw_route(struct rdma_ucm_query_route_resp *resp,
776 struct rdma_route *route)
778 struct rdma_dev_addr *dev_addr;
780 dev_addr = &route->addr.dev_addr;
781 rdma_addr_get_dgid(dev_addr, (union ib_gid *) &resp->ib_route[0].dgid);
782 rdma_addr_get_sgid(dev_addr, (union ib_gid *) &resp->ib_route[0].sgid);
785 static ssize_t ucma_query_route(struct ucma_file *file,
786 const char __user *inbuf,
787 int in_len, int out_len)
789 struct rdma_ucm_query cmd;
790 struct rdma_ucm_query_route_resp resp;
791 struct ucma_context *ctx;
792 struct sockaddr *addr;
795 if (out_len < sizeof(resp))
798 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
801 ctx = ucma_get_ctx(file, cmd.id);
805 memset(&resp, 0, sizeof resp);
806 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
807 memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
808 sizeof(struct sockaddr_in) :
809 sizeof(struct sockaddr_in6));
810 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
811 memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
812 sizeof(struct sockaddr_in) :
813 sizeof(struct sockaddr_in6));
814 if (!ctx->cm_id->device)
817 resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
818 resp.port_num = ctx->cm_id->port_num;
820 if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num))
821 ucma_copy_ib_route(&resp, &ctx->cm_id->route);
822 else if (rdma_protocol_roce(ctx->cm_id->device, ctx->cm_id->port_num))
823 ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
824 else if (rdma_protocol_iwarp(ctx->cm_id->device, ctx->cm_id->port_num))
825 ucma_copy_iw_route(&resp, &ctx->cm_id->route);
828 if (copy_to_user((void __user *)(unsigned long)cmd.response,
829 &resp, sizeof(resp)))
836 static void ucma_query_device_addr(struct rdma_cm_id *cm_id,
837 struct rdma_ucm_query_addr_resp *resp)
842 resp->node_guid = (__force __u64) cm_id->device->node_guid;
843 resp->port_num = cm_id->port_num;
844 resp->pkey = (__force __u16) cpu_to_be16(
845 ib_addr_get_pkey(&cm_id->route.addr.dev_addr));
848 static ssize_t ucma_query_addr(struct ucma_context *ctx,
849 void __user *response, int out_len)
851 struct rdma_ucm_query_addr_resp resp;
852 struct sockaddr *addr;
855 if (out_len < sizeof(resp))
858 memset(&resp, 0, sizeof resp);
860 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
861 resp.src_size = rdma_addr_size(addr);
862 memcpy(&resp.src_addr, addr, resp.src_size);
864 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
865 resp.dst_size = rdma_addr_size(addr);
866 memcpy(&resp.dst_addr, addr, resp.dst_size);
868 ucma_query_device_addr(ctx->cm_id, &resp);
870 if (copy_to_user(response, &resp, sizeof(resp)))
876 static ssize_t ucma_query_path(struct ucma_context *ctx,
877 void __user *response, int out_len)
879 struct rdma_ucm_query_path_resp *resp;
882 if (out_len < sizeof(*resp))
885 resp = kzalloc(out_len, GFP_KERNEL);
889 resp->num_paths = ctx->cm_id->route.num_paths;
890 for (i = 0, out_len -= sizeof(*resp);
891 i < resp->num_paths && out_len > sizeof(struct ib_path_rec_data);
892 i++, out_len -= sizeof(struct ib_path_rec_data)) {
894 resp->path_data[i].flags = IB_PATH_GMP | IB_PATH_PRIMARY |
895 IB_PATH_BIDIRECTIONAL;
896 ib_sa_pack_path(&ctx->cm_id->route.path_rec[i],
897 &resp->path_data[i].path_rec);
900 if (copy_to_user(response, resp,
901 sizeof(*resp) + (i * sizeof(struct ib_path_rec_data))))
908 static ssize_t ucma_query_gid(struct ucma_context *ctx,
909 void __user *response, int out_len)
911 struct rdma_ucm_query_addr_resp resp;
912 struct sockaddr_ib *addr;
915 if (out_len < sizeof(resp))
918 memset(&resp, 0, sizeof resp);
920 ucma_query_device_addr(ctx->cm_id, &resp);
922 addr = (struct sockaddr_ib *) &resp.src_addr;
923 resp.src_size = sizeof(*addr);
924 if (ctx->cm_id->route.addr.src_addr.ss_family == AF_IB) {
925 memcpy(addr, &ctx->cm_id->route.addr.src_addr, resp.src_size);
927 addr->sib_family = AF_IB;
928 addr->sib_pkey = (__force __be16) resp.pkey;
929 rdma_addr_get_sgid(&ctx->cm_id->route.addr.dev_addr,
930 (union ib_gid *) &addr->sib_addr);
931 addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
932 &ctx->cm_id->route.addr.src_addr);
935 addr = (struct sockaddr_ib *) &resp.dst_addr;
936 resp.dst_size = sizeof(*addr);
937 if (ctx->cm_id->route.addr.dst_addr.ss_family == AF_IB) {
938 memcpy(addr, &ctx->cm_id->route.addr.dst_addr, resp.dst_size);
940 addr->sib_family = AF_IB;
941 addr->sib_pkey = (__force __be16) resp.pkey;
942 rdma_addr_get_dgid(&ctx->cm_id->route.addr.dev_addr,
943 (union ib_gid *) &addr->sib_addr);
944 addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *)
945 &ctx->cm_id->route.addr.dst_addr);
948 if (copy_to_user(response, &resp, sizeof(resp)))
954 static ssize_t ucma_query(struct ucma_file *file,
955 const char __user *inbuf,
956 int in_len, int out_len)
958 struct rdma_ucm_query cmd;
959 struct ucma_context *ctx;
960 void __user *response;
963 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
966 response = (void __user *)(unsigned long) cmd.response;
967 ctx = ucma_get_ctx(file, cmd.id);
971 switch (cmd.option) {
972 case RDMA_USER_CM_QUERY_ADDR:
973 ret = ucma_query_addr(ctx, response, out_len);
975 case RDMA_USER_CM_QUERY_PATH:
976 ret = ucma_query_path(ctx, response, out_len);
978 case RDMA_USER_CM_QUERY_GID:
979 ret = ucma_query_gid(ctx, response, out_len);
990 static void ucma_copy_conn_param(struct rdma_cm_id *id,
991 struct rdma_conn_param *dst,
992 struct rdma_ucm_conn_param *src)
994 dst->private_data = src->private_data;
995 dst->private_data_len = src->private_data_len;
996 dst->responder_resources =src->responder_resources;
997 dst->initiator_depth = src->initiator_depth;
998 dst->flow_control = src->flow_control;
999 dst->retry_count = src->retry_count;
1000 dst->rnr_retry_count = src->rnr_retry_count;
1001 dst->srq = src->srq;
1002 dst->qp_num = src->qp_num;
1003 dst->qkey = (id->route.addr.src_addr.ss_family == AF_IB) ? src->qkey : 0;
1006 static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
1007 int in_len, int out_len)
1009 struct rdma_ucm_connect cmd;
1010 struct rdma_conn_param conn_param;
1011 struct ucma_context *ctx;
1014 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1017 if (!cmd.conn_param.valid)
1020 ctx = ucma_get_ctx(file, cmd.id);
1022 return PTR_ERR(ctx);
1024 ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
1025 ret = rdma_connect(ctx->cm_id, &conn_param);
1030 static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
1031 int in_len, int out_len)
1033 struct rdma_ucm_listen cmd;
1034 struct ucma_context *ctx;
1037 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1040 ctx = ucma_get_ctx(file, cmd.id);
1042 return PTR_ERR(ctx);
1044 ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ?
1045 cmd.backlog : max_backlog;
1046 ret = rdma_listen(ctx->cm_id, ctx->backlog);
1051 static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
1052 int in_len, int out_len)
1054 struct rdma_ucm_accept cmd;
1055 struct rdma_conn_param conn_param;
1056 struct ucma_context *ctx;
1059 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1062 ctx = ucma_get_ctx(file, cmd.id);
1064 return PTR_ERR(ctx);
1066 if (cmd.conn_param.valid) {
1067 ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param);
1068 mutex_lock(&file->mut);
1069 ret = rdma_accept(ctx->cm_id, &conn_param);
1072 mutex_unlock(&file->mut);
1074 ret = rdma_accept(ctx->cm_id, NULL);
1080 static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
1081 int in_len, int out_len)
1083 struct rdma_ucm_reject cmd;
1084 struct ucma_context *ctx;
1087 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1090 ctx = ucma_get_ctx(file, cmd.id);
1092 return PTR_ERR(ctx);
1094 ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
1099 static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
1100 int in_len, int out_len)
1102 struct rdma_ucm_disconnect cmd;
1103 struct ucma_context *ctx;
1106 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1109 ctx = ucma_get_ctx(file, cmd.id);
1111 return PTR_ERR(ctx);
1113 ret = rdma_disconnect(ctx->cm_id);
1118 static ssize_t ucma_init_qp_attr(struct ucma_file *file,
1119 const char __user *inbuf,
1120 int in_len, int out_len)
1122 struct rdma_ucm_init_qp_attr cmd;
1123 struct ib_uverbs_qp_attr resp;
1124 struct ucma_context *ctx;
1125 struct ib_qp_attr qp_attr;
1128 if (out_len < sizeof(resp))
1131 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1134 ctx = ucma_get_ctx(file, cmd.id);
1136 return PTR_ERR(ctx);
1138 resp.qp_attr_mask = 0;
1139 memset(&qp_attr, 0, sizeof qp_attr);
1140 qp_attr.qp_state = cmd.qp_state;
1141 ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
1145 ib_copy_qp_attr_to_user(&resp, &qp_attr);
1146 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1147 &resp, sizeof(resp)))
1155 static int ucma_set_option_id(struct ucma_context *ctx, int optname,
1156 void *optval, size_t optlen)
1161 case RDMA_OPTION_ID_TOS:
1162 if (optlen != sizeof(u8)) {
1166 rdma_set_service_type(ctx->cm_id, *((u8 *) optval));
1168 case RDMA_OPTION_ID_REUSEADDR:
1169 if (optlen != sizeof(int)) {
1173 ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0);
1175 case RDMA_OPTION_ID_AFONLY:
1176 if (optlen != sizeof(int)) {
1180 ret = rdma_set_afonly(ctx->cm_id, *((int *) optval) ? 1 : 0);
1189 static int ucma_set_ib_path(struct ucma_context *ctx,
1190 struct ib_path_rec_data *path_data, size_t optlen)
1192 struct ib_sa_path_rec sa_path;
1193 struct rdma_cm_event event;
1196 if (optlen % sizeof(*path_data))
1199 for (; optlen; optlen -= sizeof(*path_data), path_data++) {
1200 if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY |
1201 IB_PATH_BIDIRECTIONAL))
1208 memset(&sa_path, 0, sizeof(sa_path));
1210 ib_sa_unpack_path(path_data->path_rec, &sa_path);
1211 ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1);
1215 memset(&event, 0, sizeof event);
1216 event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
1217 return ucma_event_handler(ctx->cm_id, &event);
1220 static int ucma_set_option_ib(struct ucma_context *ctx, int optname,
1221 void *optval, size_t optlen)
1226 case RDMA_OPTION_IB_PATH:
1227 ret = ucma_set_ib_path(ctx, optval, optlen);
1236 static int ucma_set_option_level(struct ucma_context *ctx, int level,
1237 int optname, void *optval, size_t optlen)
1242 case RDMA_OPTION_ID:
1243 ret = ucma_set_option_id(ctx, optname, optval, optlen);
1245 case RDMA_OPTION_IB:
1246 ret = ucma_set_option_ib(ctx, optname, optval, optlen);
1255 static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
1256 int in_len, int out_len)
1258 struct rdma_ucm_set_option cmd;
1259 struct ucma_context *ctx;
1263 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1266 ctx = ucma_get_ctx(file, cmd.id);
1268 return PTR_ERR(ctx);
1270 optval = memdup_user((void __user *) (unsigned long) cmd.optval,
1272 if (IS_ERR(optval)) {
1273 ret = PTR_ERR(optval);
1277 ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval,
1286 static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
1287 int in_len, int out_len)
1289 struct rdma_ucm_notify cmd;
1290 struct ucma_context *ctx;
1293 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1296 ctx = ucma_get_ctx(file, cmd.id);
1298 return PTR_ERR(ctx);
1300 ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
1305 static ssize_t ucma_process_join(struct ucma_file *file,
1306 struct rdma_ucm_join_mcast *cmd, int out_len)
1308 struct rdma_ucm_create_id_resp resp;
1309 struct ucma_context *ctx;
1310 struct ucma_multicast *mc;
1311 struct sockaddr *addr;
1315 if (out_len < sizeof(resp))
1318 addr = (struct sockaddr *) &cmd->addr;
1319 if (!cmd->addr_size || (cmd->addr_size != rdma_addr_size(addr)))
1322 if (cmd->join_flags == RDMA_MC_JOIN_FLAG_FULLMEMBER)
1323 join_state = BIT(FULLMEMBER_JOIN);
1324 else if (cmd->join_flags == RDMA_MC_JOIN_FLAG_SENDONLY_FULLMEMBER)
1325 join_state = BIT(SENDONLY_FULLMEMBER_JOIN);
1329 ctx = ucma_get_ctx(file, cmd->id);
1331 return PTR_ERR(ctx);
1333 mutex_lock(&file->mut);
1334 mc = ucma_alloc_multicast(ctx);
1339 mc->join_state = join_state;
1341 memcpy(&mc->addr, addr, cmd->addr_size);
1342 ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr,
1348 if (copy_to_user((void __user *)(unsigned long) cmd->response,
1349 &resp, sizeof(resp))) {
1354 mutex_unlock(&file->mut);
1359 rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
1360 ucma_cleanup_mc_events(mc);
1363 idr_remove(&multicast_idr, mc->id);
1365 list_del(&mc->list);
1368 mutex_unlock(&file->mut);
1373 static ssize_t ucma_join_ip_multicast(struct ucma_file *file,
1374 const char __user *inbuf,
1375 int in_len, int out_len)
1377 struct rdma_ucm_join_ip_mcast cmd;
1378 struct rdma_ucm_join_mcast join_cmd;
1380 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1383 join_cmd.response = cmd.response;
1384 join_cmd.uid = cmd.uid;
1385 join_cmd.id = cmd.id;
1386 join_cmd.addr_size = rdma_addr_size((struct sockaddr *) &cmd.addr);
1387 join_cmd.join_flags = RDMA_MC_JOIN_FLAG_FULLMEMBER;
1388 memcpy(&join_cmd.addr, &cmd.addr, join_cmd.addr_size);
1390 return ucma_process_join(file, &join_cmd, out_len);
1393 static ssize_t ucma_join_multicast(struct ucma_file *file,
1394 const char __user *inbuf,
1395 int in_len, int out_len)
1397 struct rdma_ucm_join_mcast cmd;
1399 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1402 return ucma_process_join(file, &cmd, out_len);
1405 static ssize_t ucma_leave_multicast(struct ucma_file *file,
1406 const char __user *inbuf,
1407 int in_len, int out_len)
1409 struct rdma_ucm_destroy_id cmd;
1410 struct rdma_ucm_destroy_id_resp resp;
1411 struct ucma_multicast *mc;
1414 if (out_len < sizeof(resp))
1417 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1421 mc = idr_find(&multicast_idr, cmd.id);
1423 mc = ERR_PTR(-ENOENT);
1424 else if (mc->ctx->file != file)
1425 mc = ERR_PTR(-EINVAL);
1426 else if (!atomic_inc_not_zero(&mc->ctx->ref))
1427 mc = ERR_PTR(-ENXIO);
1429 idr_remove(&multicast_idr, mc->id);
1437 rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
1438 mutex_lock(&mc->ctx->file->mut);
1439 ucma_cleanup_mc_events(mc);
1440 list_del(&mc->list);
1441 mutex_unlock(&mc->ctx->file->mut);
1443 ucma_put_ctx(mc->ctx);
1444 resp.events_reported = mc->events_reported;
1447 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1448 &resp, sizeof(resp)))
1454 static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
1456 /* Acquire mutex's based on pointer comparison to prevent deadlock. */
1457 if (file1 < file2) {
1458 mutex_lock(&file1->mut);
1459 mutex_lock_nested(&file2->mut, SINGLE_DEPTH_NESTING);
1461 mutex_lock(&file2->mut);
1462 mutex_lock_nested(&file1->mut, SINGLE_DEPTH_NESTING);
1466 static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2)
1468 if (file1 < file2) {
1469 mutex_unlock(&file2->mut);
1470 mutex_unlock(&file1->mut);
1472 mutex_unlock(&file1->mut);
1473 mutex_unlock(&file2->mut);
1477 static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file)
1479 struct ucma_event *uevent, *tmp;
1481 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list)
1482 if (uevent->ctx == ctx)
1483 list_move_tail(&uevent->list, &file->event_list);
1486 static ssize_t ucma_migrate_id(struct ucma_file *new_file,
1487 const char __user *inbuf,
1488 int in_len, int out_len)
1490 struct rdma_ucm_migrate_id cmd;
1491 struct rdma_ucm_migrate_resp resp;
1492 struct ucma_context *ctx;
1494 struct ucma_file *cur_file;
1497 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1500 /* Get current fd to protect against it being closed */
1505 /* Validate current fd and prevent destruction of id. */
1506 ctx = ucma_get_ctx(f.file->private_data, cmd.id);
1512 cur_file = ctx->file;
1513 if (cur_file == new_file) {
1514 resp.events_reported = ctx->events_reported;
1519 * Migrate events between fd's, maintaining order, and avoiding new
1520 * events being added before existing events.
1522 ucma_lock_files(cur_file, new_file);
1525 list_move_tail(&ctx->list, &new_file->ctx_list);
1526 ucma_move_events(ctx, new_file);
1527 ctx->file = new_file;
1528 resp.events_reported = ctx->events_reported;
1531 ucma_unlock_files(cur_file, new_file);
1534 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1535 &resp, sizeof(resp)))
1544 static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
1545 const char __user *inbuf,
1546 int in_len, int out_len) = {
1547 [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id,
1548 [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id,
1549 [RDMA_USER_CM_CMD_BIND_IP] = ucma_bind_ip,
1550 [RDMA_USER_CM_CMD_RESOLVE_IP] = ucma_resolve_ip,
1551 [RDMA_USER_CM_CMD_RESOLVE_ROUTE] = ucma_resolve_route,
1552 [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route,
1553 [RDMA_USER_CM_CMD_CONNECT] = ucma_connect,
1554 [RDMA_USER_CM_CMD_LISTEN] = ucma_listen,
1555 [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept,
1556 [RDMA_USER_CM_CMD_REJECT] = ucma_reject,
1557 [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect,
1558 [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr,
1559 [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event,
1560 [RDMA_USER_CM_CMD_GET_OPTION] = NULL,
1561 [RDMA_USER_CM_CMD_SET_OPTION] = ucma_set_option,
1562 [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify,
1563 [RDMA_USER_CM_CMD_JOIN_IP_MCAST] = ucma_join_ip_multicast,
1564 [RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast,
1565 [RDMA_USER_CM_CMD_MIGRATE_ID] = ucma_migrate_id,
1566 [RDMA_USER_CM_CMD_QUERY] = ucma_query,
1567 [RDMA_USER_CM_CMD_BIND] = ucma_bind,
1568 [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr,
1569 [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast
1572 static ssize_t ucma_write(struct file *filp, const char __user *buf,
1573 size_t len, loff_t *pos)
1575 struct ucma_file *file = filp->private_data;
1576 struct rdma_ucm_cmd_hdr hdr;
1579 if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
1582 if (len < sizeof(hdr))
1585 if (copy_from_user(&hdr, buf, sizeof(hdr)))
1588 if (hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
1591 if (hdr.in + sizeof(hdr) > len)
1594 if (!ucma_cmd_table[hdr.cmd])
1597 ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
1604 static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
1606 struct ucma_file *file = filp->private_data;
1607 unsigned int mask = 0;
1609 poll_wait(filp, &file->poll_wait, wait);
1611 if (!list_empty(&file->event_list))
1612 mask = POLLIN | POLLRDNORM;
1618 * ucma_open() does not need the BKL:
1620 * - no global state is referred to;
1621 * - there is no ioctl method to race against;
1622 * - no further module initialization is required for open to work
1623 * after the device is registered.
1625 static int ucma_open(struct inode *inode, struct file *filp)
1627 struct ucma_file *file;
1629 file = kmalloc(sizeof *file, GFP_KERNEL);
1633 file->close_wq = alloc_ordered_workqueue("ucma_close_id",
1635 if (!file->close_wq) {
1640 INIT_LIST_HEAD(&file->event_list);
1641 INIT_LIST_HEAD(&file->ctx_list);
1642 init_waitqueue_head(&file->poll_wait);
1643 mutex_init(&file->mut);
1645 filp->private_data = file;
1648 return nonseekable_open(inode, filp);
1651 static int ucma_close(struct inode *inode, struct file *filp)
1653 struct ucma_file *file = filp->private_data;
1654 struct ucma_context *ctx, *tmp;
1656 mutex_lock(&file->mut);
1657 list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
1658 ctx->destroying = 1;
1659 mutex_unlock(&file->mut);
1662 idr_remove(&ctx_idr, ctx->id);
1665 flush_workqueue(file->close_wq);
1666 /* At that step once ctx was marked as destroying and workqueue
1667 * was flushed we are safe from any inflights handlers that
1668 * might put other closing task.
1671 if (!ctx->closing) {
1673 /* rdma_destroy_id ensures that no event handlers are
1674 * inflight for that id before releasing it.
1676 rdma_destroy_id(ctx->cm_id);
1682 mutex_lock(&file->mut);
1684 mutex_unlock(&file->mut);
1685 destroy_workqueue(file->close_wq);
1691 ucma_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1703 static const struct file_operations ucma_fops = {
1704 .owner = THIS_MODULE,
1706 .release = ucma_close,
1707 .write = ucma_write,
1708 .unlocked_ioctl = ucma_ioctl,
1710 .llseek = no_llseek,
1713 static struct miscdevice ucma_misc = {
1714 .minor = MISC_DYNAMIC_MINOR,
1716 .nodename = "infiniband/rdma_cm",
1721 static ssize_t show_abi_version(struct device *dev,
1722 struct device_attribute *attr,
1725 return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
1727 static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
1729 static int __init ucma_init(void)
1733 ret = misc_register(&ucma_misc);
1737 ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
1739 pr_err("rdma_ucm: couldn't create abi_version attr\n");
1745 misc_deregister(&ucma_misc);
1749 static void __exit ucma_cleanup(void)
1751 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1752 misc_deregister(&ucma_misc);
1753 idr_destroy(&ctx_idr);
1754 idr_destroy(&multicast_idr);
1757 module_init(ucma_init);
1758 module_exit(ucma_cleanup);