2 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/completion.h>
34 #include <linux/file.h>
35 #include <linux/mutex.h>
36 #include <linux/poll.h>
37 #include <linux/idr.h>
39 #include <linux/in6.h>
40 #include <linux/miscdevice.h>
42 #include <rdma/rdma_user_cm.h>
43 #include <rdma/ib_marshall.h>
44 #include <rdma/rdma_cm.h>
45 #include <rdma/rdma_cm_ib.h>
47 MODULE_AUTHOR("Sean Hefty");
48 MODULE_DESCRIPTION("RDMA Userspace Connection Manager Access");
49 MODULE_LICENSE("Dual BSD/GPL");
52 UCMA_MAX_BACKLOG = 1024
58 struct list_head ctx_list;
59 struct list_head event_list;
60 wait_queue_head_t poll_wait;
65 struct completion comp;
70 struct ucma_file *file;
71 struct rdma_cm_id *cm_id;
74 struct list_head list;
75 struct list_head mc_list;
78 struct ucma_multicast {
79 struct ucma_context *ctx;
84 struct list_head list;
85 struct sockaddr_storage addr;
89 struct ucma_context *ctx;
90 struct ucma_multicast *mc;
91 struct list_head list;
92 struct rdma_cm_id *cm_id;
93 struct rdma_ucm_event_resp resp;
96 static DEFINE_MUTEX(mut);
97 static DEFINE_IDR(ctx_idr);
98 static DEFINE_IDR(multicast_idr);
100 static inline struct ucma_context *_ucma_find_context(int id,
101 struct ucma_file *file)
103 struct ucma_context *ctx;
105 ctx = idr_find(&ctx_idr, id);
107 ctx = ERR_PTR(-ENOENT);
108 else if (ctx->file != file)
109 ctx = ERR_PTR(-EINVAL);
113 static struct ucma_context *ucma_get_ctx(struct ucma_file *file, int id)
115 struct ucma_context *ctx;
118 ctx = _ucma_find_context(id, file);
120 atomic_inc(&ctx->ref);
125 static void ucma_put_ctx(struct ucma_context *ctx)
127 if (atomic_dec_and_test(&ctx->ref))
128 complete(&ctx->comp);
131 static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file)
133 struct ucma_context *ctx;
136 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
140 atomic_set(&ctx->ref, 1);
141 init_completion(&ctx->comp);
142 INIT_LIST_HEAD(&ctx->mc_list);
146 ret = idr_pre_get(&ctx_idr, GFP_KERNEL);
151 ret = idr_get_new(&ctx_idr, ctx, &ctx->id);
153 } while (ret == -EAGAIN);
158 list_add_tail(&ctx->list, &file->ctx_list);
166 static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx)
168 struct ucma_multicast *mc;
171 mc = kzalloc(sizeof(*mc), GFP_KERNEL);
176 ret = idr_pre_get(&multicast_idr, GFP_KERNEL);
181 ret = idr_get_new(&multicast_idr, mc, &mc->id);
183 } while (ret == -EAGAIN);
189 list_add_tail(&mc->list, &ctx->mc_list);
197 static void ucma_copy_conn_event(struct rdma_ucm_conn_param *dst,
198 struct rdma_conn_param *src)
200 if (src->private_data_len)
201 memcpy(dst->private_data, src->private_data,
202 src->private_data_len);
203 dst->private_data_len = src->private_data_len;
204 dst->responder_resources =src->responder_resources;
205 dst->initiator_depth = src->initiator_depth;
206 dst->flow_control = src->flow_control;
207 dst->retry_count = src->retry_count;
208 dst->rnr_retry_count = src->rnr_retry_count;
210 dst->qp_num = src->qp_num;
213 static void ucma_copy_ud_event(struct rdma_ucm_ud_param *dst,
214 struct rdma_ud_param *src)
216 if (src->private_data_len)
217 memcpy(dst->private_data, src->private_data,
218 src->private_data_len);
219 dst->private_data_len = src->private_data_len;
220 ib_copy_ah_attr_to_user(&dst->ah_attr, &src->ah_attr);
221 dst->qp_num = src->qp_num;
222 dst->qkey = src->qkey;
225 static void ucma_set_event_context(struct ucma_context *ctx,
226 struct rdma_cm_event *event,
227 struct ucma_event *uevent)
230 switch (event->event) {
231 case RDMA_CM_EVENT_MULTICAST_JOIN:
232 case RDMA_CM_EVENT_MULTICAST_ERROR:
233 uevent->mc = (struct ucma_multicast *)
234 event->param.ud.private_data;
235 uevent->resp.uid = uevent->mc->uid;
236 uevent->resp.id = uevent->mc->id;
239 uevent->resp.uid = ctx->uid;
240 uevent->resp.id = ctx->id;
245 static int ucma_event_handler(struct rdma_cm_id *cm_id,
246 struct rdma_cm_event *event)
248 struct ucma_event *uevent;
249 struct ucma_context *ctx = cm_id->context;
252 uevent = kzalloc(sizeof(*uevent), GFP_KERNEL);
254 return event->event == RDMA_CM_EVENT_CONNECT_REQUEST;
256 uevent->cm_id = cm_id;
257 ucma_set_event_context(ctx, event, uevent);
258 uevent->resp.event = event->event;
259 uevent->resp.status = event->status;
260 if (cm_id->ps == RDMA_PS_UDP || cm_id->ps == RDMA_PS_IPOIB)
261 ucma_copy_ud_event(&uevent->resp.param.ud, &event->param.ud);
263 ucma_copy_conn_event(&uevent->resp.param.conn,
266 mutex_lock(&ctx->file->mut);
267 if (event->event == RDMA_CM_EVENT_CONNECT_REQUEST) {
274 } else if (!ctx->uid) {
276 * We ignore events for new connections until userspace has set
277 * their context. This can only happen if an error occurs on a
278 * new connection before the user accepts it. This is okay,
279 * since the accept will just fail later.
285 list_add_tail(&uevent->list, &ctx->file->event_list);
286 wake_up_interruptible(&ctx->file->poll_wait);
288 selwakeup(&ctx->file->filp->f_selinfo);
290 mutex_unlock(&ctx->file->mut);
294 static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
295 int in_len, int out_len)
297 struct ucma_context *ctx;
298 struct rdma_ucm_get_event cmd;
299 struct ucma_event *uevent;
303 if (out_len < sizeof uevent->resp)
306 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
309 mutex_lock(&file->mut);
310 while (list_empty(&file->event_list)) {
311 mutex_unlock(&file->mut);
313 if (file->filp->f_flags & O_NONBLOCK)
316 if (wait_event_interruptible(file->poll_wait,
317 !list_empty(&file->event_list)))
320 mutex_lock(&file->mut);
323 uevent = list_entry(file->event_list.next, struct ucma_event, list);
325 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) {
326 ctx = ucma_alloc_ctx(file);
331 uevent->ctx->backlog++;
332 ctx->cm_id = uevent->cm_id;
333 ctx->cm_id->context = ctx;
334 uevent->resp.id = ctx->id;
337 if (copy_to_user((void __user *)(unsigned long)cmd.response,
338 &uevent->resp, sizeof uevent->resp)) {
343 list_del(&uevent->list);
344 uevent->ctx->events_reported++;
346 uevent->mc->events_reported++;
349 mutex_unlock(&file->mut);
353 static ssize_t ucma_create_id(struct ucma_file *file,
354 const char __user *inbuf,
355 int in_len, int out_len)
357 struct rdma_ucm_create_id cmd;
358 struct rdma_ucm_create_id_resp resp;
359 struct ucma_context *ctx;
362 if (out_len < sizeof(resp))
365 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
368 mutex_lock(&file->mut);
369 ctx = ucma_alloc_ctx(file);
370 mutex_unlock(&file->mut);
375 ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps);
376 if (IS_ERR(ctx->cm_id)) {
377 ret = PTR_ERR(ctx->cm_id);
382 if (copy_to_user((void __user *)(unsigned long)cmd.response,
383 &resp, sizeof(resp))) {
390 rdma_destroy_id(ctx->cm_id);
393 idr_remove(&ctx_idr, ctx->id);
399 static void ucma_cleanup_multicast(struct ucma_context *ctx)
401 struct ucma_multicast *mc, *tmp;
404 list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) {
406 idr_remove(&multicast_idr, mc->id);
412 static void ucma_cleanup_events(struct ucma_context *ctx)
414 struct ucma_event *uevent, *tmp;
416 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) {
417 if (uevent->ctx != ctx)
420 list_del(&uevent->list);
422 /* clear incoming connections. */
423 if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST)
424 rdma_destroy_id(uevent->cm_id);
430 static void ucma_cleanup_mc_events(struct ucma_multicast *mc)
432 struct ucma_event *uevent, *tmp;
434 list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) {
435 if (uevent->mc != mc)
438 list_del(&uevent->list);
443 static int ucma_free_ctx(struct ucma_context *ctx)
447 /* No new events will be generated after destroying the id. */
448 rdma_destroy_id(ctx->cm_id);
450 ucma_cleanup_multicast(ctx);
452 /* Cleanup events not yet reported to the user. */
453 mutex_lock(&ctx->file->mut);
454 ucma_cleanup_events(ctx);
455 list_del(&ctx->list);
456 mutex_unlock(&ctx->file->mut);
458 events_reported = ctx->events_reported;
460 return events_reported;
463 static ssize_t ucma_destroy_id(struct ucma_file *file, const char __user *inbuf,
464 int in_len, int out_len)
466 struct rdma_ucm_destroy_id cmd;
467 struct rdma_ucm_destroy_id_resp resp;
468 struct ucma_context *ctx;
471 if (out_len < sizeof(resp))
474 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
478 ctx = _ucma_find_context(cmd.id, file);
480 idr_remove(&ctx_idr, ctx->id);
487 wait_for_completion(&ctx->comp);
488 resp.events_reported = ucma_free_ctx(ctx);
490 if (copy_to_user((void __user *)(unsigned long)cmd.response,
491 &resp, sizeof(resp)))
497 static ssize_t ucma_bind_addr(struct ucma_file *file, const char __user *inbuf,
498 int in_len, int out_len)
500 struct rdma_ucm_bind_addr cmd;
501 struct ucma_context *ctx;
504 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
507 ctx = ucma_get_ctx(file, cmd.id);
511 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr);
516 static ssize_t ucma_resolve_addr(struct ucma_file *file,
517 const char __user *inbuf,
518 int in_len, int out_len)
520 struct rdma_ucm_resolve_addr cmd;
521 struct ucma_context *ctx;
524 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
527 ctx = ucma_get_ctx(file, cmd.id);
531 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr,
532 (struct sockaddr *) &cmd.dst_addr,
538 static ssize_t ucma_resolve_route(struct ucma_file *file,
539 const char __user *inbuf,
540 int in_len, int out_len)
542 struct rdma_ucm_resolve_route cmd;
543 struct ucma_context *ctx;
546 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
549 ctx = ucma_get_ctx(file, cmd.id);
553 ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms);
558 static void ucma_copy_ib_route(struct rdma_ucm_query_route_resp *resp,
559 struct rdma_route *route)
561 struct rdma_dev_addr *dev_addr;
563 resp->num_paths = route->num_paths;
564 switch (route->num_paths) {
566 dev_addr = &route->addr.dev_addr;
567 rdma_addr_get_dgid(dev_addr,
568 (union ib_gid *) &resp->ib_route[0].dgid);
569 rdma_addr_get_sgid(dev_addr,
570 (union ib_gid *) &resp->ib_route[0].sgid);
571 resp->ib_route[0].pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
574 ib_copy_path_rec_to_user(&resp->ib_route[1],
575 &route->path_rec[1]);
578 ib_copy_path_rec_to_user(&resp->ib_route[0],
579 &route->path_rec[0]);
586 static void ucma_copy_iboe_route(struct rdma_ucm_query_route_resp *resp,
587 struct rdma_route *route)
589 struct rdma_dev_addr *dev_addr;
590 struct net_device *dev;
593 resp->num_paths = route->num_paths;
594 switch (route->num_paths) {
596 dev_addr = &route->addr.dev_addr;
597 dev = dev_get_by_index(&init_net, dev_addr->bound_dev_if);
599 vid = rdma_vlan_dev_vlan_id(dev);
603 iboe_mac_vlan_to_ll((union ib_gid *) &resp->ib_route[0].dgid,
604 dev_addr->dst_dev_addr, vid);
605 iboe_addr_get_sgid(dev_addr,
606 (union ib_gid *) &resp->ib_route[0].sgid);
607 resp->ib_route[0].pkey = cpu_to_be16(0xffff);
610 ib_copy_path_rec_to_user(&resp->ib_route[1],
611 &route->path_rec[1]);
614 ib_copy_path_rec_to_user(&resp->ib_route[0],
615 &route->path_rec[0]);
622 static ssize_t ucma_query_route(struct ucma_file *file,
623 const char __user *inbuf,
624 int in_len, int out_len)
626 struct rdma_ucm_query_route cmd;
627 struct rdma_ucm_query_route_resp resp;
628 struct ucma_context *ctx;
629 struct sockaddr *addr;
632 if (out_len < sizeof(resp))
635 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
638 ctx = ucma_get_ctx(file, cmd.id);
642 memset(&resp, 0, sizeof resp);
643 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr;
644 memcpy(&resp.src_addr, addr, addr->sa_family == AF_INET ?
645 sizeof(struct sockaddr_in) :
646 sizeof(struct sockaddr_in6));
647 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr;
648 memcpy(&resp.dst_addr, addr, addr->sa_family == AF_INET ?
649 sizeof(struct sockaddr_in) :
650 sizeof(struct sockaddr_in6));
651 if (!ctx->cm_id->device)
654 resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
655 resp.port_num = ctx->cm_id->port_num;
656 if (rdma_node_get_transport(ctx->cm_id->device->node_type) == RDMA_TRANSPORT_IB) {
657 switch (rdma_port_get_link_layer(ctx->cm_id->device, ctx->cm_id->port_num)) {
658 case IB_LINK_LAYER_INFINIBAND:
659 ucma_copy_ib_route(&resp, &ctx->cm_id->route);
661 case IB_LINK_LAYER_ETHERNET:
662 ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
670 if (copy_to_user((void __user *)(unsigned long)cmd.response,
671 &resp, sizeof(resp)))
678 static void ucma_copy_conn_param(struct rdma_conn_param *dst,
679 struct rdma_ucm_conn_param *src)
681 dst->private_data = src->private_data;
682 dst->private_data_len = src->private_data_len;
683 dst->responder_resources =src->responder_resources;
684 dst->initiator_depth = src->initiator_depth;
685 dst->flow_control = src->flow_control;
686 dst->retry_count = src->retry_count;
687 dst->rnr_retry_count = src->rnr_retry_count;
689 dst->qp_num = src->qp_num;
692 static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
693 int in_len, int out_len)
695 struct rdma_ucm_connect cmd;
696 struct rdma_conn_param conn_param;
697 struct ucma_context *ctx;
700 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
703 if (!cmd.conn_param.valid)
706 ctx = ucma_get_ctx(file, cmd.id);
710 ucma_copy_conn_param(&conn_param, &cmd.conn_param);
711 ret = rdma_connect(ctx->cm_id, &conn_param);
716 static ssize_t ucma_listen(struct ucma_file *file, const char __user *inbuf,
717 int in_len, int out_len)
719 struct rdma_ucm_listen cmd;
720 struct ucma_context *ctx;
723 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
726 ctx = ucma_get_ctx(file, cmd.id);
730 ctx->backlog = cmd.backlog > 0 && cmd.backlog < UCMA_MAX_BACKLOG ?
731 cmd.backlog : UCMA_MAX_BACKLOG;
732 ret = rdma_listen(ctx->cm_id, ctx->backlog);
737 static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
738 int in_len, int out_len)
740 struct rdma_ucm_accept cmd;
741 struct rdma_conn_param conn_param;
742 struct ucma_context *ctx;
745 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
748 ctx = ucma_get_ctx(file, cmd.id);
752 if (cmd.conn_param.valid) {
754 ucma_copy_conn_param(&conn_param, &cmd.conn_param);
755 ret = rdma_accept(ctx->cm_id, &conn_param);
757 ret = rdma_accept(ctx->cm_id, NULL);
763 static ssize_t ucma_reject(struct ucma_file *file, const char __user *inbuf,
764 int in_len, int out_len)
766 struct rdma_ucm_reject cmd;
767 struct ucma_context *ctx;
770 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
773 ctx = ucma_get_ctx(file, cmd.id);
777 ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len);
782 static ssize_t ucma_disconnect(struct ucma_file *file, const char __user *inbuf,
783 int in_len, int out_len)
785 struct rdma_ucm_disconnect cmd;
786 struct ucma_context *ctx;
789 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
792 ctx = ucma_get_ctx(file, cmd.id);
796 ret = rdma_disconnect(ctx->cm_id);
801 static ssize_t ucma_init_qp_attr(struct ucma_file *file,
802 const char __user *inbuf,
803 int in_len, int out_len)
805 struct rdma_ucm_init_qp_attr cmd;
806 struct ib_uverbs_qp_attr resp;
807 struct ucma_context *ctx;
808 struct ib_qp_attr qp_attr;
811 if (out_len < sizeof(resp))
814 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
817 ctx = ucma_get_ctx(file, cmd.id);
821 resp.qp_attr_mask = 0;
822 memset(&qp_attr, 0, sizeof qp_attr);
823 qp_attr.qp_state = cmd.qp_state;
824 ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask);
828 ib_copy_qp_attr_to_user(&resp, &qp_attr);
829 if (copy_to_user((void __user *)(unsigned long)cmd.response,
830 &resp, sizeof(resp)))
838 static int ucma_set_option_id(struct ucma_context *ctx, int optname,
839 void *optval, size_t optlen)
844 case RDMA_OPTION_ID_TOS:
845 if (optlen != sizeof(u8)) {
849 rdma_set_service_type(ctx->cm_id, *((u8 *) optval));
858 static int ucma_set_ib_path(struct ucma_context *ctx,
859 struct ib_path_rec_data *path_data, size_t optlen)
861 struct ib_sa_path_rec sa_path;
862 struct rdma_cm_event event;
865 if (optlen % sizeof(*path_data))
868 for (; optlen; optlen -= sizeof(*path_data), path_data++) {
869 if (path_data->flags == (IB_PATH_GMP | IB_PATH_PRIMARY |
870 IB_PATH_BIDIRECTIONAL))
877 ib_sa_unpack_path(path_data->path_rec, &sa_path);
878 ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1);
882 memset(&event, 0, sizeof event);
883 event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;
884 return ucma_event_handler(ctx->cm_id, &event);
887 static int ucma_set_option_ib(struct ucma_context *ctx, int optname,
888 void *optval, size_t optlen)
893 case RDMA_OPTION_IB_PATH:
894 ret = ucma_set_ib_path(ctx, optval, optlen);
903 static int ucma_set_option_level(struct ucma_context *ctx, int level,
904 int optname, void *optval, size_t optlen)
910 ret = ucma_set_option_id(ctx, optname, optval, optlen);
913 ret = ucma_set_option_ib(ctx, optname, optval, optlen);
922 static ssize_t ucma_set_option(struct ucma_file *file, const char __user *inbuf,
923 int in_len, int out_len)
925 struct rdma_ucm_set_option cmd;
926 struct ucma_context *ctx;
930 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
933 ctx = ucma_get_ctx(file, cmd.id);
937 optval = kmalloc(cmd.optlen, GFP_KERNEL);
943 if (copy_from_user(optval, (void __user *) (unsigned long) cmd.optval,
949 ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval,
958 static ssize_t ucma_notify(struct ucma_file *file, const char __user *inbuf,
959 int in_len, int out_len)
961 struct rdma_ucm_notify cmd;
962 struct ucma_context *ctx;
965 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
968 ctx = ucma_get_ctx(file, cmd.id);
972 ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event);
977 static ssize_t ucma_join_multicast(struct ucma_file *file,
978 const char __user *inbuf,
979 int in_len, int out_len)
981 struct rdma_ucm_join_mcast cmd;
982 struct rdma_ucm_create_id_resp resp;
983 struct ucma_context *ctx;
984 struct ucma_multicast *mc;
987 if (out_len < sizeof(resp))
990 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
993 ctx = ucma_get_ctx(file, cmd.id);
997 mutex_lock(&file->mut);
998 mc = ucma_alloc_multicast(ctx);
1005 memcpy(&mc->addr, &cmd.addr, sizeof cmd.addr);
1006 ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr, mc);
1011 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1012 &resp, sizeof(resp))) {
1017 mutex_unlock(&file->mut);
1022 rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr);
1023 ucma_cleanup_mc_events(mc);
1026 idr_remove(&multicast_idr, mc->id);
1028 list_del(&mc->list);
1031 mutex_unlock(&file->mut);
1036 static ssize_t ucma_leave_multicast(struct ucma_file *file,
1037 const char __user *inbuf,
1038 int in_len, int out_len)
1040 struct rdma_ucm_destroy_id cmd;
1041 struct rdma_ucm_destroy_id_resp resp;
1042 struct ucma_multicast *mc;
1045 if (out_len < sizeof(resp))
1048 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1052 mc = idr_find(&multicast_idr, cmd.id);
1054 mc = ERR_PTR(-ENOENT);
1055 else if (mc->ctx->file != file)
1056 mc = ERR_PTR(-EINVAL);
1058 idr_remove(&multicast_idr, mc->id);
1059 atomic_inc(&mc->ctx->ref);
1068 rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr);
1069 mutex_lock(&mc->ctx->file->mut);
1070 ucma_cleanup_mc_events(mc);
1071 list_del(&mc->list);
1072 mutex_unlock(&mc->ctx->file->mut);
1074 ucma_put_ctx(mc->ctx);
1075 resp.events_reported = mc->events_reported;
1078 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1079 &resp, sizeof(resp)))
1085 static void ucma_lock_files(struct ucma_file *file1, struct ucma_file *file2)
1087 /* Acquire mutex's based on pointer comparison to prevent deadlock. */
1088 if (file1 < file2) {
1089 mutex_lock(&file1->mut);
1090 mutex_lock(&file2->mut);
1092 mutex_lock(&file2->mut);
1093 mutex_lock(&file1->mut);
1097 static void ucma_unlock_files(struct ucma_file *file1, struct ucma_file *file2)
1099 if (file1 < file2) {
1100 mutex_unlock(&file2->mut);
1101 mutex_unlock(&file1->mut);
1103 mutex_unlock(&file1->mut);
1104 mutex_unlock(&file2->mut);
1108 static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file)
1110 struct ucma_event *uevent, *tmp;
1112 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list)
1113 if (uevent->ctx == ctx)
1114 list_move_tail(&uevent->list, &file->event_list);
1117 static ssize_t ucma_migrate_id(struct ucma_file *new_file,
1118 const char __user *inbuf,
1119 int in_len, int out_len)
1121 struct rdma_ucm_migrate_id cmd;
1122 struct rdma_ucm_migrate_resp resp;
1123 struct ucma_context *ctx;
1125 struct ucma_file *cur_file;
1128 if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
1131 /* Get current fd to protect against it being closed */
1132 filp = fget(cmd.fd);
1136 /* Validate current fd and prevent destruction of id. */
1137 ctx = ucma_get_ctx(filp->private_data, cmd.id);
1143 cur_file = ctx->file;
1144 if (cur_file == new_file) {
1145 resp.events_reported = ctx->events_reported;
1150 * Migrate events between fd's, maintaining order, and avoiding new
1151 * events being added before existing events.
1153 ucma_lock_files(cur_file, new_file);
1156 list_move_tail(&ctx->list, &new_file->ctx_list);
1157 ucma_move_events(ctx, new_file);
1158 ctx->file = new_file;
1159 resp.events_reported = ctx->events_reported;
1162 ucma_unlock_files(cur_file, new_file);
1165 if (copy_to_user((void __user *)(unsigned long)cmd.response,
1166 &resp, sizeof(resp)))
1175 static ssize_t (*ucma_cmd_table[])(struct ucma_file *file,
1176 const char __user *inbuf,
1177 int in_len, int out_len) = {
1178 [RDMA_USER_CM_CMD_CREATE_ID] = ucma_create_id,
1179 [RDMA_USER_CM_CMD_DESTROY_ID] = ucma_destroy_id,
1180 [RDMA_USER_CM_CMD_BIND_ADDR] = ucma_bind_addr,
1181 [RDMA_USER_CM_CMD_RESOLVE_ADDR] = ucma_resolve_addr,
1182 [RDMA_USER_CM_CMD_RESOLVE_ROUTE]= ucma_resolve_route,
1183 [RDMA_USER_CM_CMD_QUERY_ROUTE] = ucma_query_route,
1184 [RDMA_USER_CM_CMD_CONNECT] = ucma_connect,
1185 [RDMA_USER_CM_CMD_LISTEN] = ucma_listen,
1186 [RDMA_USER_CM_CMD_ACCEPT] = ucma_accept,
1187 [RDMA_USER_CM_CMD_REJECT] = ucma_reject,
1188 [RDMA_USER_CM_CMD_DISCONNECT] = ucma_disconnect,
1189 [RDMA_USER_CM_CMD_INIT_QP_ATTR] = ucma_init_qp_attr,
1190 [RDMA_USER_CM_CMD_GET_EVENT] = ucma_get_event,
1191 [RDMA_USER_CM_CMD_GET_OPTION] = NULL,
1192 [RDMA_USER_CM_CMD_SET_OPTION] = ucma_set_option,
1193 [RDMA_USER_CM_CMD_NOTIFY] = ucma_notify,
1194 [RDMA_USER_CM_CMD_JOIN_MCAST] = ucma_join_multicast,
1195 [RDMA_USER_CM_CMD_LEAVE_MCAST] = ucma_leave_multicast,
1196 [RDMA_USER_CM_CMD_MIGRATE_ID] = ucma_migrate_id
1199 static ssize_t ucma_write(struct file *filp, const char __user *buf,
1200 size_t len, loff_t *pos)
1202 struct ucma_file *file = filp->private_data;
1203 struct rdma_ucm_cmd_hdr hdr;
1206 if (len < sizeof(hdr))
1209 if (copy_from_user(&hdr, buf, sizeof(hdr)))
1212 if (hdr.cmd < 0 || hdr.cmd >= ARRAY_SIZE(ucma_cmd_table))
1215 if (hdr.in + sizeof(hdr) > len)
1218 if (!ucma_cmd_table[hdr.cmd])
1221 ret = ucma_cmd_table[hdr.cmd](file, buf + sizeof(hdr), hdr.in, hdr.out);
1228 static unsigned int ucma_poll(struct file *filp, struct poll_table_struct *wait)
1230 struct ucma_file *file = filp->private_data;
1231 unsigned int mask = 0;
1233 poll_wait(filp, &file->poll_wait, wait);
1235 if (!list_empty(&file->event_list))
1236 mask = POLLIN | POLLRDNORM;
1242 * ucma_open() does not need the BKL:
1244 * - no global state is referred to;
1245 * - there is no ioctl method to race against;
1246 * - no further module initialization is required for open to work
1247 * after the device is registered.
1249 static int ucma_open(struct inode *inode, struct file *filp)
1251 struct ucma_file *file;
1253 file = kmalloc(sizeof *file, GFP_KERNEL);
1257 INIT_LIST_HEAD(&file->event_list);
1258 INIT_LIST_HEAD(&file->ctx_list);
1259 init_waitqueue_head(&file->poll_wait);
1260 mutex_init(&file->mut);
1262 filp->private_data = file;
1267 static int ucma_close(struct inode *inode, struct file *filp)
1269 struct ucma_file *file = filp->private_data;
1270 struct ucma_context *ctx, *tmp;
1272 mutex_lock(&file->mut);
1273 list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) {
1274 mutex_unlock(&file->mut);
1277 idr_remove(&ctx_idr, ctx->id);
1281 mutex_lock(&file->mut);
1283 mutex_unlock(&file->mut);
1288 static const struct file_operations ucma_fops = {
1289 .owner = THIS_MODULE,
1291 .release = ucma_close,
1292 .write = ucma_write,
1296 static struct miscdevice ucma_misc = {
1297 .minor = MISC_DYNAMIC_MINOR,
1302 static ssize_t show_abi_version(struct device *dev,
1303 struct device_attribute *attr,
1306 return sprintf(buf, "%d\n", RDMA_USER_CM_ABI_VERSION);
1308 static DEVICE_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
1310 static int __init ucma_init(void)
1314 ret = misc_register(&ucma_misc);
1318 ret = device_create_file(ucma_misc.this_device, &dev_attr_abi_version);
1320 printk(KERN_ERR "rdma_ucm: couldn't create abi_version attr\n");
1325 misc_deregister(&ucma_misc);
1329 static void __exit ucma_cleanup(void)
1331 device_remove_file(ucma_misc.this_device, &dev_attr_abi_version);
1332 misc_deregister(&ucma_misc);
1333 idr_destroy(&ctx_idr);
1336 module_init(ucma_init);
1337 module_exit(ucma_cleanup);