2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/file.h>
39 #include <asm/uaccess.h>
40 #include <asm/fcntl.h>
44 static struct lock_class_key pd_lock_key;
45 static struct lock_class_key mr_lock_key;
46 static struct lock_class_key cq_lock_key;
47 static struct lock_class_key qp_lock_key;
48 static struct lock_class_key ah_lock_key;
49 static struct lock_class_key srq_lock_key;
51 #define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \
53 (udata)->inbuf = (void __user *) (ibuf); \
54 (udata)->outbuf = (void __user *) (obuf); \
55 (udata)->inlen = (ilen); \
56 (udata)->outlen = (olen); \
60 * The ib_uobject locking scheme is as follows:
62 * - ib_uverbs_idr_lock protects the uverbs idrs themselves, so it
63 * needs to be held during all idr operations. When an object is
64 * looked up, a reference must be taken on the object's kref before
67 * - Each object also has an rwsem. This rwsem must be held for
68 * reading while an operation that uses the object is performed.
69 * For example, while registering an MR, the associated PD's
70 * uobject.mutex must be held for reading. The rwsem must be held
71 * for writing while initializing or destroying an object.
73 * - In addition, each object has a "live" flag. If this flag is not
74 * set, then lookups of the object will fail even if it is found in
75 * the idr. This handles a reader that blocks and does not acquire
76 * the rwsem until after the object is destroyed. The destroy
77 * operation will set the live flag to 0 and then drop the rwsem;
78 * this will allow the reader to acquire the rwsem, see that the
79 * live flag is 0, and then drop the rwsem and its reference to
80 * object. The underlying storage will not be freed until the last
81 * reference to the object is dropped.
84 static void init_uobj(struct ib_uobject *uobj, u64 user_handle,
85 struct ib_ucontext *context, struct lock_class_key *key)
87 uobj->user_handle = user_handle;
88 uobj->context = context;
89 kref_init(&uobj->ref);
90 init_rwsem(&uobj->mutex);
91 lockdep_set_class(&uobj->mutex, key);
95 static void release_uobj(struct kref *kref)
97 kfree(container_of(kref, struct ib_uobject, ref));
100 static void put_uobj(struct ib_uobject *uobj)
102 kref_put(&uobj->ref, release_uobj);
105 static void put_uobj_read(struct ib_uobject *uobj)
107 up_read(&uobj->mutex);
111 static void put_uobj_write(struct ib_uobject *uobj)
113 up_write(&uobj->mutex);
117 static int idr_add_uobj(struct idr *idr, struct ib_uobject *uobj)
122 if (!idr_pre_get(idr, GFP_KERNEL))
125 spin_lock(&ib_uverbs_idr_lock);
126 ret = idr_get_new(idr, uobj, &uobj->id);
127 spin_unlock(&ib_uverbs_idr_lock);
135 void idr_remove_uobj(struct idr *idr, struct ib_uobject *uobj)
137 spin_lock(&ib_uverbs_idr_lock);
138 idr_remove(idr, uobj->id);
139 spin_unlock(&ib_uverbs_idr_lock);
142 static struct ib_uobject *__idr_get_uobj(struct idr *idr, int id,
143 struct ib_ucontext *context)
145 struct ib_uobject *uobj;
147 spin_lock(&ib_uverbs_idr_lock);
148 uobj = idr_find(idr, id);
150 if (uobj->context == context)
151 kref_get(&uobj->ref);
155 spin_unlock(&ib_uverbs_idr_lock);
160 static struct ib_uobject *idr_read_uobj(struct idr *idr, int id,
161 struct ib_ucontext *context, int nested)
163 struct ib_uobject *uobj;
165 uobj = __idr_get_uobj(idr, id, context);
170 down_read_nested(&uobj->mutex, SINGLE_DEPTH_NESTING);
172 down_read(&uobj->mutex);
181 static struct ib_uobject *idr_write_uobj(struct idr *idr, int id,
182 struct ib_ucontext *context)
184 struct ib_uobject *uobj;
186 uobj = __idr_get_uobj(idr, id, context);
190 down_write(&uobj->mutex);
192 put_uobj_write(uobj);
199 static void *idr_read_obj(struct idr *idr, int id, struct ib_ucontext *context,
202 struct ib_uobject *uobj;
204 uobj = idr_read_uobj(idr, id, context, nested);
205 return uobj ? uobj->object : NULL;
208 static struct ib_pd *idr_read_pd(int pd_handle, struct ib_ucontext *context)
210 return idr_read_obj(&ib_uverbs_pd_idr, pd_handle, context, 0);
213 static void put_pd_read(struct ib_pd *pd)
215 put_uobj_read(pd->uobject);
218 static struct ib_cq *idr_read_cq(int cq_handle, struct ib_ucontext *context, int nested)
220 return idr_read_obj(&ib_uverbs_cq_idr, cq_handle, context, nested);
223 static void put_cq_read(struct ib_cq *cq)
225 put_uobj_read(cq->uobject);
228 static struct ib_ah *idr_read_ah(int ah_handle, struct ib_ucontext *context)
230 return idr_read_obj(&ib_uverbs_ah_idr, ah_handle, context, 0);
233 static void put_ah_read(struct ib_ah *ah)
235 put_uobj_read(ah->uobject);
238 static struct ib_qp *idr_read_qp(int qp_handle, struct ib_ucontext *context)
240 return idr_read_obj(&ib_uverbs_qp_idr, qp_handle, context, 0);
243 static void put_qp_read(struct ib_qp *qp)
245 put_uobj_read(qp->uobject);
248 static struct ib_srq *idr_read_srq(int srq_handle, struct ib_ucontext *context)
250 return idr_read_obj(&ib_uverbs_srq_idr, srq_handle, context, 0);
253 static void put_srq_read(struct ib_srq *srq)
255 put_uobj_read(srq->uobject);
258 static struct ib_xrcd *idr_read_xrcd(int xrcd_handle,
259 struct ib_ucontext *context,
260 struct ib_uobject **uobj)
262 *uobj = idr_read_uobj(&ib_uverbs_xrc_domain_idr, xrcd_handle,
264 return *uobj ? (*uobj)->object : NULL;
267 static void put_xrcd_read(struct ib_uobject *uobj)
272 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
273 const char __user *buf,
274 int in_len, int out_len)
276 struct ib_uverbs_get_context cmd;
277 struct ib_uverbs_get_context_resp resp;
278 struct ib_udata udata;
279 struct ib_device *ibdev = file->device->ib_dev;
280 struct ib_ucontext *ucontext;
284 if (out_len < sizeof resp)
287 if (copy_from_user(&cmd, buf, sizeof cmd))
290 mutex_lock(&file->mutex);
292 if (file->ucontext) {
297 INIT_UDATA(&udata, buf + sizeof cmd,
298 (unsigned long) cmd.response + sizeof resp,
299 in_len - sizeof cmd, out_len - sizeof resp);
301 ucontext = ibdev->alloc_ucontext(ibdev, &udata);
302 if (IS_ERR(ucontext)) {
303 ret = PTR_ERR(file->ucontext);
307 ucontext->device = ibdev;
308 INIT_LIST_HEAD(&ucontext->pd_list);
309 INIT_LIST_HEAD(&ucontext->mr_list);
310 INIT_LIST_HEAD(&ucontext->mw_list);
311 INIT_LIST_HEAD(&ucontext->cq_list);
312 INIT_LIST_HEAD(&ucontext->qp_list);
313 INIT_LIST_HEAD(&ucontext->srq_list);
314 INIT_LIST_HEAD(&ucontext->ah_list);
315 INIT_LIST_HEAD(&ucontext->xrcd_list);
316 ucontext->closing = 0;
318 resp.num_comp_vectors = file->device->num_comp_vectors;
320 filp = ib_uverbs_alloc_event_file(file, 1, &resp.async_fd);
326 if (copy_to_user((void __user *) (unsigned long) cmd.response,
327 &resp, sizeof resp)) {
332 file->async_file = filp->private_data;
334 INIT_IB_EVENT_HANDLER(&file->event_handler, file->device->ib_dev,
335 ib_uverbs_event_handler);
336 ret = ib_register_event_handler(&file->event_handler);
340 kref_get(&file->async_file->ref);
341 kref_get(&file->ref);
342 file->ucontext = ucontext;
344 fd_install(resp.async_fd, filp);
346 mutex_unlock(&file->mutex);
351 put_unused_fd(resp.async_fd);
355 ibdev->dealloc_ucontext(ucontext);
358 mutex_unlock(&file->mutex);
362 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
363 const char __user *buf,
364 int in_len, int out_len)
366 struct ib_uverbs_query_device cmd;
367 struct ib_uverbs_query_device_resp resp;
368 struct ib_device_attr attr;
371 if (out_len < sizeof resp)
374 if (copy_from_user(&cmd, buf, sizeof cmd))
377 ret = ib_query_device(file->device->ib_dev, &attr);
381 memset(&resp, 0, sizeof resp);
383 resp.fw_ver = attr.fw_ver;
384 resp.node_guid = file->device->ib_dev->node_guid;
385 resp.sys_image_guid = attr.sys_image_guid;
386 resp.max_mr_size = attr.max_mr_size;
387 resp.page_size_cap = attr.page_size_cap;
388 resp.vendor_id = attr.vendor_id;
389 resp.vendor_part_id = attr.vendor_part_id;
390 resp.hw_ver = attr.hw_ver;
391 resp.max_qp = attr.max_qp;
392 resp.max_qp_wr = attr.max_qp_wr;
393 resp.device_cap_flags = attr.device_cap_flags;
394 resp.max_sge = attr.max_sge;
395 resp.max_sge_rd = attr.max_sge_rd;
396 resp.max_cq = attr.max_cq;
397 resp.max_cqe = attr.max_cqe;
398 resp.max_mr = attr.max_mr;
399 resp.max_pd = attr.max_pd;
400 resp.max_qp_rd_atom = attr.max_qp_rd_atom;
401 resp.max_ee_rd_atom = attr.max_ee_rd_atom;
402 resp.max_res_rd_atom = attr.max_res_rd_atom;
403 resp.max_qp_init_rd_atom = attr.max_qp_init_rd_atom;
404 resp.max_ee_init_rd_atom = attr.max_ee_init_rd_atom;
405 resp.atomic_cap = attr.atomic_cap;
406 resp.max_ee = attr.max_ee;
407 resp.max_rdd = attr.max_rdd;
408 resp.max_mw = attr.max_mw;
409 resp.max_raw_ipv6_qp = attr.max_raw_ipv6_qp;
410 resp.max_raw_ethy_qp = attr.max_raw_ethy_qp;
411 resp.max_mcast_grp = attr.max_mcast_grp;
412 resp.max_mcast_qp_attach = attr.max_mcast_qp_attach;
413 resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach;
414 resp.max_ah = attr.max_ah;
415 resp.max_fmr = attr.max_fmr;
416 resp.max_map_per_fmr = attr.max_map_per_fmr;
417 resp.max_srq = attr.max_srq;
418 resp.max_srq_wr = attr.max_srq_wr;
419 resp.max_srq_sge = attr.max_srq_sge;
420 resp.max_pkeys = attr.max_pkeys;
421 resp.local_ca_ack_delay = attr.local_ca_ack_delay;
422 resp.phys_port_cnt = file->device->ib_dev->phys_port_cnt;
424 if (copy_to_user((void __user *) (unsigned long) cmd.response,
431 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
432 const char __user *buf,
433 int in_len, int out_len)
435 struct ib_uverbs_query_port cmd;
436 struct ib_uverbs_query_port_resp resp;
437 struct ib_port_attr attr;
440 if (out_len < sizeof resp)
443 if (copy_from_user(&cmd, buf, sizeof cmd))
446 ret = ib_query_port(file->device->ib_dev, cmd.port_num, &attr);
450 memset(&resp, 0, sizeof resp);
452 resp.state = attr.state;
453 resp.max_mtu = attr.max_mtu;
454 resp.active_mtu = attr.active_mtu;
455 resp.gid_tbl_len = attr.gid_tbl_len;
456 resp.port_cap_flags = attr.port_cap_flags;
457 resp.max_msg_sz = attr.max_msg_sz;
458 resp.bad_pkey_cntr = attr.bad_pkey_cntr;
459 resp.qkey_viol_cntr = attr.qkey_viol_cntr;
460 resp.pkey_tbl_len = attr.pkey_tbl_len;
462 resp.sm_lid = attr.sm_lid;
464 resp.max_vl_num = attr.max_vl_num;
465 resp.sm_sl = attr.sm_sl;
466 resp.subnet_timeout = attr.subnet_timeout;
467 resp.init_type_reply = attr.init_type_reply;
468 resp.active_width = attr.active_width;
469 resp.active_speed = attr.active_speed;
470 resp.phys_state = attr.phys_state;
471 resp.link_layer = attr.link_layer;
473 if (copy_to_user((void __user *) (unsigned long) cmd.response,
480 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
481 const char __user *buf,
482 int in_len, int out_len)
484 struct ib_uverbs_alloc_pd cmd;
485 struct ib_uverbs_alloc_pd_resp resp;
486 struct ib_udata udata;
487 struct ib_uobject *uobj;
491 if (out_len < sizeof resp)
494 if (copy_from_user(&cmd, buf, sizeof cmd))
497 INIT_UDATA(&udata, buf + sizeof cmd,
498 (unsigned long) cmd.response + sizeof resp,
499 in_len - sizeof cmd, out_len - sizeof resp);
501 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
505 init_uobj(uobj, 0, file->ucontext, &pd_lock_key);
506 down_write(&uobj->mutex);
508 pd = file->device->ib_dev->alloc_pd(file->device->ib_dev,
509 file->ucontext, &udata);
515 pd->device = file->device->ib_dev;
517 atomic_set(&pd->usecnt, 0);
520 ret = idr_add_uobj(&ib_uverbs_pd_idr, uobj);
524 memset(&resp, 0, sizeof resp);
525 resp.pd_handle = uobj->id;
527 if (copy_to_user((void __user *) (unsigned long) cmd.response,
528 &resp, sizeof resp)) {
533 mutex_lock(&file->mutex);
534 list_add_tail(&uobj->list, &file->ucontext->pd_list);
535 mutex_unlock(&file->mutex);
539 up_write(&uobj->mutex);
544 idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
550 put_uobj_write(uobj);
554 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
555 const char __user *buf,
556 int in_len, int out_len)
558 struct ib_uverbs_dealloc_pd cmd;
559 struct ib_uobject *uobj;
562 if (copy_from_user(&cmd, buf, sizeof cmd))
565 uobj = idr_write_uobj(&ib_uverbs_pd_idr, cmd.pd_handle, file->ucontext);
569 ret = ib_dealloc_pd(uobj->object);
573 put_uobj_write(uobj);
578 idr_remove_uobj(&ib_uverbs_pd_idr, uobj);
580 mutex_lock(&file->mutex);
581 list_del(&uobj->list);
582 mutex_unlock(&file->mutex);
589 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
590 const char __user *buf, int in_len,
593 struct ib_uverbs_reg_mr cmd;
594 struct ib_uverbs_reg_mr_resp resp;
595 struct ib_udata udata;
596 struct ib_uobject *uobj;
601 if (out_len < sizeof resp)
604 if (copy_from_user(&cmd, buf, sizeof cmd))
607 INIT_UDATA(&udata, buf + sizeof cmd,
608 (unsigned long) cmd.response + sizeof resp,
609 in_len - sizeof cmd, out_len - sizeof resp);
611 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
615 * Local write permission is required if remote write or
616 * remote atomic permission is also requested.
618 if (cmd.access_flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
619 !(cmd.access_flags & IB_ACCESS_LOCAL_WRITE))
622 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
626 init_uobj(uobj, 0, file->ucontext, &mr_lock_key);
627 down_write(&uobj->mutex);
629 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
635 mr = pd->device->reg_user_mr(pd, cmd.start, cmd.length, cmd.hca_va,
636 cmd.access_flags, &udata, 0);
642 mr->device = pd->device;
645 atomic_inc(&pd->usecnt);
646 atomic_set(&mr->usecnt, 0);
649 ret = idr_add_uobj(&ib_uverbs_mr_idr, uobj);
653 memset(&resp, 0, sizeof resp);
654 resp.lkey = mr->lkey;
655 resp.rkey = mr->rkey;
656 resp.mr_handle = uobj->id;
658 if (copy_to_user((void __user *) (unsigned long) cmd.response,
659 &resp, sizeof resp)) {
666 mutex_lock(&file->mutex);
667 list_add_tail(&uobj->list, &file->ucontext->mr_list);
668 mutex_unlock(&file->mutex);
672 up_write(&uobj->mutex);
677 idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
686 put_uobj_write(uobj);
690 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
691 const char __user *buf, int in_len,
694 struct ib_uverbs_dereg_mr cmd;
696 struct ib_uobject *uobj;
699 if (copy_from_user(&cmd, buf, sizeof cmd))
702 uobj = idr_write_uobj(&ib_uverbs_mr_idr, cmd.mr_handle, file->ucontext);
708 ret = ib_dereg_mr(mr);
712 put_uobj_write(uobj);
717 idr_remove_uobj(&ib_uverbs_mr_idr, uobj);
719 mutex_lock(&file->mutex);
720 list_del(&uobj->list);
721 mutex_unlock(&file->mutex);
728 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
729 const char __user *buf, int in_len,
732 struct ib_uverbs_create_comp_channel cmd;
733 struct ib_uverbs_create_comp_channel_resp resp;
736 if (out_len < sizeof resp)
739 if (copy_from_user(&cmd, buf, sizeof cmd))
742 filp = ib_uverbs_alloc_event_file(file, 0, &resp.fd);
744 return PTR_ERR(filp);
746 if (copy_to_user((void __user *) (unsigned long) cmd.response,
747 &resp, sizeof resp)) {
748 put_unused_fd(resp.fd);
753 fd_install(resp.fd, filp);
757 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
758 const char __user *buf, int in_len,
761 struct ib_uverbs_create_cq cmd;
762 struct ib_uverbs_create_cq_resp resp;
763 struct ib_udata udata;
764 struct ib_ucq_object *obj;
765 struct ib_uverbs_event_file *ev_file = NULL;
769 if (out_len < sizeof resp)
772 if (copy_from_user(&cmd, buf, sizeof cmd))
775 INIT_UDATA(&udata, buf + sizeof cmd,
776 (unsigned long) cmd.response + sizeof resp,
777 in_len - sizeof cmd, out_len - sizeof resp);
779 if (cmd.comp_vector >= file->device->num_comp_vectors)
782 obj = kmalloc(sizeof *obj, GFP_KERNEL);
786 init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &cq_lock_key);
787 down_write(&obj->uobject.mutex);
789 if (cmd.comp_channel >= 0) {
790 ev_file = ib_uverbs_lookup_comp_file(cmd.comp_channel);
797 obj->uverbs_file = file;
798 obj->comp_events_reported = 0;
799 obj->async_events_reported = 0;
800 INIT_LIST_HEAD(&obj->comp_list);
801 INIT_LIST_HEAD(&obj->async_list);
803 cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe,
805 file->ucontext, &udata);
811 cq->device = file->device->ib_dev;
812 cq->uobject = &obj->uobject;
813 cq->comp_handler = ib_uverbs_comp_handler;
814 cq->event_handler = ib_uverbs_cq_event_handler;
815 cq->cq_context = ev_file;
816 atomic_set(&cq->usecnt, 0);
818 obj->uobject.object = cq;
819 ret = idr_add_uobj(&ib_uverbs_cq_idr, &obj->uobject);
823 memset(&resp, 0, sizeof resp);
824 resp.cq_handle = obj->uobject.id;
827 if (copy_to_user((void __user *) (unsigned long) cmd.response,
828 &resp, sizeof resp)) {
833 mutex_lock(&file->mutex);
834 list_add_tail(&obj->uobject.list, &file->ucontext->cq_list);
835 mutex_unlock(&file->mutex);
837 obj->uobject.live = 1;
839 up_write(&obj->uobject.mutex);
844 idr_remove_uobj(&ib_uverbs_cq_idr, &obj->uobject);
851 ib_uverbs_release_ucq(file, ev_file, obj);
854 put_uobj_write(&obj->uobject);
858 ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
859 const char __user *buf, int in_len,
862 struct ib_uverbs_resize_cq cmd;
863 struct ib_uverbs_resize_cq_resp resp;
864 struct ib_udata udata;
868 if (copy_from_user(&cmd, buf, sizeof cmd))
871 INIT_UDATA(&udata, buf + sizeof cmd,
872 (unsigned long) cmd.response + sizeof resp,
873 in_len - sizeof cmd, out_len - sizeof resp);
875 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
879 ret = cq->device->resize_cq(cq, cmd.cqe, &udata);
885 if (copy_to_user((void __user *) (unsigned long) cmd.response,
886 &resp, sizeof resp.cqe))
892 return ret ? ret : in_len;
895 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
896 const char __user *buf, int in_len,
899 struct ib_uverbs_poll_cq cmd;
900 struct ib_uverbs_poll_cq_resp *resp;
907 if (copy_from_user(&cmd, buf, sizeof cmd))
910 wc = kmalloc(cmd.ne * sizeof *wc, GFP_KERNEL);
914 rsize = sizeof *resp + cmd.ne * sizeof(struct ib_uverbs_wc);
915 resp = kmalloc(rsize, GFP_KERNEL);
921 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
927 resp->count = ib_poll_cq(cq, cmd.ne, wc);
931 for (i = 0; i < resp->count; i++) {
932 resp->wc[i].wr_id = wc[i].wr_id;
933 resp->wc[i].status = wc[i].status;
934 resp->wc[i].opcode = wc[i].opcode;
935 resp->wc[i].vendor_err = wc[i].vendor_err;
936 resp->wc[i].byte_len = wc[i].byte_len;
937 resp->wc[i].ex.imm_data = (__u32 __force) wc[i].ex.imm_data;
938 resp->wc[i].qp_num = wc[i].qp->qp_num;
939 resp->wc[i].src_qp = wc[i].src_qp;
940 resp->wc[i].wc_flags = wc[i].wc_flags;
941 resp->wc[i].pkey_index = wc[i].pkey_index;
942 resp->wc[i].slid = wc[i].slid;
943 resp->wc[i].sl = wc[i].sl;
944 resp->wc[i].dlid_path_bits = wc[i].dlid_path_bits;
945 resp->wc[i].port_num = wc[i].port_num;
948 if (copy_to_user((void __user *) (unsigned long) cmd.response, resp, rsize))
956 return ret ? ret : in_len;
959 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
960 const char __user *buf, int in_len,
963 struct ib_uverbs_req_notify_cq cmd;
966 if (copy_from_user(&cmd, buf, sizeof cmd))
969 cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
973 ib_req_notify_cq(cq, cmd.solicited_only ?
974 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
981 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
982 const char __user *buf, int in_len,
985 struct ib_uverbs_destroy_cq cmd;
986 struct ib_uverbs_destroy_cq_resp resp;
987 struct ib_uobject *uobj;
989 struct ib_ucq_object *obj;
990 struct ib_uverbs_event_file *ev_file;
993 if (copy_from_user(&cmd, buf, sizeof cmd))
996 uobj = idr_write_uobj(&ib_uverbs_cq_idr, cmd.cq_handle, file->ucontext);
1000 ev_file = cq->cq_context;
1001 obj = container_of(cq->uobject, struct ib_ucq_object, uobject);
1003 ret = ib_destroy_cq(cq);
1007 put_uobj_write(uobj);
1012 idr_remove_uobj(&ib_uverbs_cq_idr, uobj);
1014 mutex_lock(&file->mutex);
1015 list_del(&uobj->list);
1016 mutex_unlock(&file->mutex);
1018 ib_uverbs_release_ucq(file, ev_file, obj);
1020 memset(&resp, 0, sizeof resp);
1021 resp.comp_events_reported = obj->comp_events_reported;
1022 resp.async_events_reported = obj->async_events_reported;
1026 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1027 &resp, sizeof resp))
1033 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
1034 const char __user *buf, int in_len,
1037 struct ib_uverbs_create_qp cmd;
1038 struct ib_uverbs_create_qp_resp resp;
1039 struct ib_udata udata;
1040 struct ib_uqp_object *obj;
1042 struct ib_cq *scq, *rcq;
1045 struct ib_qp_init_attr attr;
1046 struct ib_xrcd *xrcd;
1047 struct ib_uobject *xrcd_uobj;
1050 if (out_len < sizeof resp)
1053 if (copy_from_user(&cmd, buf, sizeof cmd))
1056 INIT_UDATA(&udata, buf + sizeof cmd,
1057 (unsigned long) cmd.response + sizeof resp,
1058 in_len - sizeof cmd, out_len - sizeof resp);
1060 obj = kmalloc(sizeof *obj, GFP_KERNEL);
1064 init_uobj(&obj->uevent.uobject, cmd.user_handle, file->ucontext, &qp_lock_key);
1065 down_write(&obj->uevent.uobject.mutex);
1067 srq = (cmd.is_srq && cmd.qp_type != IB_QPT_XRC) ?
1068 idr_read_srq(cmd.srq_handle, file->ucontext) : NULL;
1069 xrcd = cmd.qp_type == IB_QPT_XRC ?
1070 idr_read_xrcd(cmd.srq_handle, file->ucontext, &xrcd_uobj) : NULL;
1071 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1072 scq = idr_read_cq(cmd.send_cq_handle, file->ucontext, 0);
1073 rcq = cmd.recv_cq_handle == cmd.send_cq_handle ?
1074 scq : idr_read_cq(cmd.recv_cq_handle, file->ucontext, 1);
1076 if (!pd || !scq || !rcq || (cmd.is_srq && !srq) ||
1077 (cmd.qp_type == IB_QPT_XRC && !xrcd)) {
1082 attr.create_flags = 0;
1083 attr.event_handler = ib_uverbs_qp_event_handler;
1084 attr.qp_context = file;
1088 attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
1089 attr.qp_type = cmd.qp_type;
1091 attr.create_flags = 0;
1093 attr.cap.max_send_wr = cmd.max_send_wr;
1094 attr.cap.max_recv_wr = cmd.max_recv_wr;
1095 attr.cap.max_send_sge = cmd.max_send_sge;
1096 attr.cap.max_recv_sge = cmd.max_recv_sge;
1097 attr.cap.max_inline_data = cmd.max_inline_data;
1099 obj->uevent.events_reported = 0;
1100 INIT_LIST_HEAD(&obj->uevent.event_list);
1101 INIT_LIST_HEAD(&obj->mcast_list);
1103 qp = pd->device->create_qp(pd, &attr, &udata);
1109 qp->device = pd->device;
1111 qp->send_cq = attr.send_cq;
1112 qp->recv_cq = attr.recv_cq;
1114 qp->uobject = &obj->uevent.uobject;
1115 qp->event_handler = attr.event_handler;
1116 qp->qp_context = attr.qp_context;
1117 qp->qp_type = attr.qp_type;
1118 qp->xrcd = attr.xrcd;
1119 atomic_inc(&pd->usecnt);
1120 atomic_inc(&attr.send_cq->usecnt);
1121 atomic_inc(&attr.recv_cq->usecnt);
1123 atomic_inc(&attr.srq->usecnt);
1125 atomic_inc(&attr.xrcd->usecnt);
1127 obj->uevent.uobject.object = qp;
1128 ret = idr_add_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1132 memset(&resp, 0, sizeof resp);
1133 resp.qpn = qp->qp_num;
1134 resp.qp_handle = obj->uevent.uobject.id;
1135 resp.max_recv_sge = attr.cap.max_recv_sge;
1136 resp.max_send_sge = attr.cap.max_send_sge;
1137 resp.max_recv_wr = attr.cap.max_recv_wr;
1138 resp.max_send_wr = attr.cap.max_send_wr;
1139 resp.max_inline_data = attr.cap.max_inline_data;
1141 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1142 &resp, sizeof resp)) {
1154 put_xrcd_read(xrcd_uobj);
1156 mutex_lock(&file->mutex);
1157 list_add_tail(&obj->uevent.uobject.list, &file->ucontext->qp_list);
1158 mutex_unlock(&file->mutex);
1160 obj->uevent.uobject.live = 1;
1162 up_write(&obj->uevent.uobject.mutex);
1167 idr_remove_uobj(&ib_uverbs_qp_idr, &obj->uevent.uobject);
1177 if (rcq && rcq != scq)
1182 put_xrcd_read(xrcd_uobj);
1184 put_uobj_write(&obj->uevent.uobject);
1188 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
1189 const char __user *buf, int in_len,
1192 struct ib_uverbs_query_qp cmd;
1193 struct ib_uverbs_query_qp_resp resp;
1195 struct ib_qp_attr *attr;
1196 struct ib_qp_init_attr *init_attr;
1199 if (copy_from_user(&cmd, buf, sizeof cmd))
1202 attr = kmalloc(sizeof *attr, GFP_KERNEL);
1203 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL);
1204 if (!attr || !init_attr) {
1209 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1215 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr);
1222 memset(&resp, 0, sizeof resp);
1224 resp.qp_state = attr->qp_state;
1225 resp.cur_qp_state = attr->cur_qp_state;
1226 resp.path_mtu = attr->path_mtu;
1227 resp.path_mig_state = attr->path_mig_state;
1228 resp.qkey = attr->qkey;
1229 resp.rq_psn = attr->rq_psn;
1230 resp.sq_psn = attr->sq_psn;
1231 resp.dest_qp_num = attr->dest_qp_num;
1232 resp.qp_access_flags = attr->qp_access_flags;
1233 resp.pkey_index = attr->pkey_index;
1234 resp.alt_pkey_index = attr->alt_pkey_index;
1235 resp.sq_draining = attr->sq_draining;
1236 resp.max_rd_atomic = attr->max_rd_atomic;
1237 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic;
1238 resp.min_rnr_timer = attr->min_rnr_timer;
1239 resp.port_num = attr->port_num;
1240 resp.timeout = attr->timeout;
1241 resp.retry_cnt = attr->retry_cnt;
1242 resp.rnr_retry = attr->rnr_retry;
1243 resp.alt_port_num = attr->alt_port_num;
1244 resp.alt_timeout = attr->alt_timeout;
1246 memcpy(resp.dest.dgid, attr->ah_attr.grh.dgid.raw, 16);
1247 resp.dest.flow_label = attr->ah_attr.grh.flow_label;
1248 resp.dest.sgid_index = attr->ah_attr.grh.sgid_index;
1249 resp.dest.hop_limit = attr->ah_attr.grh.hop_limit;
1250 resp.dest.traffic_class = attr->ah_attr.grh.traffic_class;
1251 resp.dest.dlid = attr->ah_attr.dlid;
1252 resp.dest.sl = attr->ah_attr.sl;
1253 resp.dest.src_path_bits = attr->ah_attr.src_path_bits;
1254 resp.dest.static_rate = attr->ah_attr.static_rate;
1255 resp.dest.is_global = !!(attr->ah_attr.ah_flags & IB_AH_GRH);
1256 resp.dest.port_num = attr->ah_attr.port_num;
1258 memcpy(resp.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16);
1259 resp.alt_dest.flow_label = attr->alt_ah_attr.grh.flow_label;
1260 resp.alt_dest.sgid_index = attr->alt_ah_attr.grh.sgid_index;
1261 resp.alt_dest.hop_limit = attr->alt_ah_attr.grh.hop_limit;
1262 resp.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class;
1263 resp.alt_dest.dlid = attr->alt_ah_attr.dlid;
1264 resp.alt_dest.sl = attr->alt_ah_attr.sl;
1265 resp.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits;
1266 resp.alt_dest.static_rate = attr->alt_ah_attr.static_rate;
1267 resp.alt_dest.is_global = !!(attr->alt_ah_attr.ah_flags & IB_AH_GRH);
1268 resp.alt_dest.port_num = attr->alt_ah_attr.port_num;
1270 resp.max_send_wr = init_attr->cap.max_send_wr;
1271 resp.max_recv_wr = init_attr->cap.max_recv_wr;
1272 resp.max_send_sge = init_attr->cap.max_send_sge;
1273 resp.max_recv_sge = init_attr->cap.max_recv_sge;
1274 resp.max_inline_data = init_attr->cap.max_inline_data;
1275 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
1277 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1278 &resp, sizeof resp))
1285 return ret ? ret : in_len;
1288 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
1289 const char __user *buf, int in_len,
1292 struct ib_uverbs_modify_qp cmd;
1293 struct ib_udata udata;
1295 struct ib_qp_attr *attr;
1298 if (copy_from_user(&cmd, buf, sizeof cmd))
1301 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
1304 attr = kmalloc(sizeof *attr, GFP_KERNEL);
1308 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1314 attr->qp_state = cmd.qp_state;
1315 attr->cur_qp_state = cmd.cur_qp_state;
1316 attr->path_mtu = cmd.path_mtu;
1317 attr->path_mig_state = cmd.path_mig_state;
1318 attr->qkey = cmd.qkey;
1319 attr->rq_psn = cmd.rq_psn;
1320 attr->sq_psn = cmd.sq_psn;
1321 attr->dest_qp_num = cmd.dest_qp_num;
1322 attr->qp_access_flags = cmd.qp_access_flags;
1323 attr->pkey_index = cmd.pkey_index;
1324 attr->alt_pkey_index = cmd.alt_pkey_index;
1325 attr->en_sqd_async_notify = cmd.en_sqd_async_notify;
1326 attr->max_rd_atomic = cmd.max_rd_atomic;
1327 attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic;
1328 attr->min_rnr_timer = cmd.min_rnr_timer;
1329 attr->port_num = cmd.port_num;
1330 attr->timeout = cmd.timeout;
1331 attr->retry_cnt = cmd.retry_cnt;
1332 attr->rnr_retry = cmd.rnr_retry;
1333 attr->alt_port_num = cmd.alt_port_num;
1334 attr->alt_timeout = cmd.alt_timeout;
1336 memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16);
1337 attr->ah_attr.grh.flow_label = cmd.dest.flow_label;
1338 attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index;
1339 attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit;
1340 attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class;
1341 attr->ah_attr.dlid = cmd.dest.dlid;
1342 attr->ah_attr.sl = cmd.dest.sl;
1343 attr->ah_attr.src_path_bits = cmd.dest.src_path_bits;
1344 attr->ah_attr.static_rate = cmd.dest.static_rate;
1345 attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0;
1346 attr->ah_attr.port_num = cmd.dest.port_num;
1348 memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16);
1349 attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label;
1350 attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index;
1351 attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit;
1352 attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class;
1353 attr->alt_ah_attr.dlid = cmd.alt_dest.dlid;
1354 attr->alt_ah_attr.sl = cmd.alt_dest.sl;
1355 attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits;
1356 attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate;
1357 attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0;
1358 attr->alt_ah_attr.port_num = cmd.alt_dest.port_num;
1360 ret = qp->device->modify_qp(qp, attr, cmd.attr_mask, &udata);
1375 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
1376 const char __user *buf, int in_len,
1379 struct ib_uverbs_destroy_qp cmd;
1380 struct ib_uverbs_destroy_qp_resp resp;
1381 struct ib_uobject *uobj;
1383 struct ib_uqp_object *obj;
1386 if (copy_from_user(&cmd, buf, sizeof cmd))
1389 memset(&resp, 0, sizeof resp);
1391 uobj = idr_write_uobj(&ib_uverbs_qp_idr, cmd.qp_handle, file->ucontext);
1395 obj = container_of(uobj, struct ib_uqp_object, uevent.uobject);
1397 if (!list_empty(&obj->mcast_list)) {
1398 put_uobj_write(uobj);
1402 ret = ib_destroy_qp(qp);
1406 put_uobj_write(uobj);
1411 idr_remove_uobj(&ib_uverbs_qp_idr, uobj);
1413 mutex_lock(&file->mutex);
1414 list_del(&uobj->list);
1415 mutex_unlock(&file->mutex);
1417 ib_uverbs_release_uevent(file, &obj->uevent);
1419 resp.events_reported = obj->uevent.events_reported;
1423 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1424 &resp, sizeof resp))
1430 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
1431 const char __user *buf, int in_len,
1434 struct ib_uverbs_post_send cmd;
1435 struct ib_uverbs_post_send_resp resp;
1436 struct ib_uverbs_send_wr *user_wr;
1437 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr;
1441 ssize_t ret = -EINVAL;
1443 if (copy_from_user(&cmd, buf, sizeof cmd))
1446 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count +
1447 cmd.sge_count * sizeof (struct ib_uverbs_sge))
1450 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr))
1453 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL);
1457 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1461 is_ud = qp->qp_type == IB_QPT_UD;
1464 for (i = 0; i < cmd.wr_count; ++i) {
1465 if (copy_from_user(user_wr,
1466 buf + sizeof cmd + i * cmd.wqe_size,
1472 if (user_wr->num_sge + sg_ind > cmd.sge_count) {
1477 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
1478 user_wr->num_sge * sizeof (struct ib_sge),
1492 next->wr_id = user_wr->wr_id;
1493 next->num_sge = user_wr->num_sge;
1494 next->opcode = user_wr->opcode;
1495 next->send_flags = user_wr->send_flags;
1498 next->wr.ud.ah = idr_read_ah(user_wr->wr.ud.ah,
1500 if (!next->wr.ud.ah) {
1504 next->wr.ud.remote_qpn = user_wr->wr.ud.remote_qpn;
1505 next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey;
1507 switch (next->opcode) {
1508 case IB_WR_RDMA_WRITE_WITH_IMM:
1510 (__be32 __force) user_wr->ex.imm_data;
1511 case IB_WR_RDMA_WRITE:
1512 case IB_WR_RDMA_READ:
1513 next->wr.rdma.remote_addr =
1514 user_wr->wr.rdma.remote_addr;
1515 next->wr.rdma.rkey =
1516 user_wr->wr.rdma.rkey;
1518 case IB_WR_SEND_WITH_IMM:
1520 (__be32 __force) user_wr->ex.imm_data;
1522 case IB_WR_SEND_WITH_INV:
1523 next->ex.invalidate_rkey =
1524 user_wr->ex.invalidate_rkey;
1526 case IB_WR_ATOMIC_CMP_AND_SWP:
1527 case IB_WR_ATOMIC_FETCH_AND_ADD:
1528 next->wr.atomic.remote_addr =
1529 user_wr->wr.atomic.remote_addr;
1530 next->wr.atomic.compare_add =
1531 user_wr->wr.atomic.compare_add;
1532 next->wr.atomic.swap = user_wr->wr.atomic.swap;
1533 next->wr.atomic.rkey = user_wr->wr.atomic.rkey;
1540 if (next->num_sge) {
1541 next->sg_list = (void *) next +
1542 ALIGN(sizeof *next, sizeof (struct ib_sge));
1543 if (copy_from_user(next->sg_list,
1545 cmd.wr_count * cmd.wqe_size +
1546 sg_ind * sizeof (struct ib_sge),
1547 next->num_sge * sizeof (struct ib_sge))) {
1551 sg_ind += next->num_sge;
1553 next->sg_list = NULL;
1557 ret = qp->device->post_send(qp, wr, &bad_wr);
1559 for (next = wr; next; next = next->next) {
1565 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1566 &resp, sizeof resp))
1573 if (is_ud && wr->wr.ud.ah)
1574 put_ah_read(wr->wr.ud.ah);
1583 return ret ? ret : in_len;
1586 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf,
1592 struct ib_uverbs_recv_wr *user_wr;
1593 struct ib_recv_wr *wr = NULL, *last, *next;
1598 if (in_len < wqe_size * wr_count +
1599 sge_count * sizeof (struct ib_uverbs_sge))
1600 return ERR_PTR(-EINVAL);
1602 if (wqe_size < sizeof (struct ib_uverbs_recv_wr))
1603 return ERR_PTR(-EINVAL);
1605 user_wr = kmalloc(wqe_size, GFP_KERNEL);
1607 return ERR_PTR(-ENOMEM);
1611 for (i = 0; i < wr_count; ++i) {
1612 if (copy_from_user(user_wr, buf + i * wqe_size,
1618 if (user_wr->num_sge + sg_ind > sge_count) {
1623 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
1624 user_wr->num_sge * sizeof (struct ib_sge),
1638 next->wr_id = user_wr->wr_id;
1639 next->num_sge = user_wr->num_sge;
1641 if (next->num_sge) {
1642 next->sg_list = (void *) next +
1643 ALIGN(sizeof *next, sizeof (struct ib_sge));
1644 if (copy_from_user(next->sg_list,
1645 buf + wr_count * wqe_size +
1646 sg_ind * sizeof (struct ib_sge),
1647 next->num_sge * sizeof (struct ib_sge))) {
1651 sg_ind += next->num_sge;
1653 next->sg_list = NULL;
1668 return ERR_PTR(ret);
1671 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
1672 const char __user *buf, int in_len,
1675 struct ib_uverbs_post_recv cmd;
1676 struct ib_uverbs_post_recv_resp resp;
1677 struct ib_recv_wr *wr, *next, *bad_wr;
1679 ssize_t ret = -EINVAL;
1681 if (copy_from_user(&cmd, buf, sizeof cmd))
1684 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
1685 in_len - sizeof cmd, cmd.wr_count,
1686 cmd.sge_count, cmd.wqe_size);
1690 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1695 ret = qp->device->post_recv(qp, wr, &bad_wr);
1700 for (next = wr; next; next = next->next) {
1706 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1707 &resp, sizeof resp))
1717 return ret ? ret : in_len;
1720 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
1721 const char __user *buf, int in_len,
1724 struct ib_uverbs_post_srq_recv cmd;
1725 struct ib_uverbs_post_srq_recv_resp resp;
1726 struct ib_recv_wr *wr, *next, *bad_wr;
1728 ssize_t ret = -EINVAL;
1730 if (copy_from_user(&cmd, buf, sizeof cmd))
1733 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
1734 in_len - sizeof cmd, cmd.wr_count,
1735 cmd.sge_count, cmd.wqe_size);
1739 srq = idr_read_srq(cmd.srq_handle, file->ucontext);
1744 ret = srq->device->post_srq_recv(srq, wr, &bad_wr);
1749 for (next = wr; next; next = next->next) {
1755 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1756 &resp, sizeof resp))
1766 return ret ? ret : in_len;
1769 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
1770 const char __user *buf, int in_len,
1773 struct ib_uverbs_create_ah cmd;
1774 struct ib_uverbs_create_ah_resp resp;
1775 struct ib_uobject *uobj;
1778 struct ib_ah_attr attr;
1781 if (out_len < sizeof resp)
1784 if (copy_from_user(&cmd, buf, sizeof cmd))
1787 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
1791 init_uobj(uobj, cmd.user_handle, file->ucontext, &ah_lock_key);
1792 down_write(&uobj->mutex);
1794 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
1800 attr.dlid = cmd.attr.dlid;
1801 attr.sl = cmd.attr.sl;
1802 attr.src_path_bits = cmd.attr.src_path_bits;
1803 attr.static_rate = cmd.attr.static_rate;
1804 attr.ah_flags = cmd.attr.is_global ? IB_AH_GRH : 0;
1805 attr.port_num = cmd.attr.port_num;
1806 attr.grh.flow_label = cmd.attr.grh.flow_label;
1807 attr.grh.sgid_index = cmd.attr.grh.sgid_index;
1808 attr.grh.hop_limit = cmd.attr.grh.hop_limit;
1809 attr.grh.traffic_class = cmd.attr.grh.traffic_class;
1810 memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16);
1812 ah = ib_create_ah(pd, &attr);
1821 ret = idr_add_uobj(&ib_uverbs_ah_idr, uobj);
1825 resp.ah_handle = uobj->id;
1827 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1828 &resp, sizeof resp)) {
1835 mutex_lock(&file->mutex);
1836 list_add_tail(&uobj->list, &file->ucontext->ah_list);
1837 mutex_unlock(&file->mutex);
1841 up_write(&uobj->mutex);
1846 idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
1855 put_uobj_write(uobj);
1859 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file,
1860 const char __user *buf, int in_len, int out_len)
1862 struct ib_uverbs_destroy_ah cmd;
1864 struct ib_uobject *uobj;
1867 if (copy_from_user(&cmd, buf, sizeof cmd))
1870 uobj = idr_write_uobj(&ib_uverbs_ah_idr, cmd.ah_handle, file->ucontext);
1875 ret = ib_destroy_ah(ah);
1879 put_uobj_write(uobj);
1884 idr_remove_uobj(&ib_uverbs_ah_idr, uobj);
1886 mutex_lock(&file->mutex);
1887 list_del(&uobj->list);
1888 mutex_unlock(&file->mutex);
1895 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
1896 const char __user *buf, int in_len,
1899 struct ib_uverbs_attach_mcast cmd;
1901 struct ib_uqp_object *obj;
1902 struct ib_uverbs_mcast_entry *mcast;
1905 if (copy_from_user(&cmd, buf, sizeof cmd))
1908 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1912 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
1914 list_for_each_entry(mcast, &obj->mcast_list, list)
1915 if (cmd.mlid == mcast->lid &&
1916 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
1921 mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
1927 mcast->lid = cmd.mlid;
1928 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw);
1930 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid);
1932 list_add_tail(&mcast->list, &obj->mcast_list);
1939 return ret ? ret : in_len;
1942 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
1943 const char __user *buf, int in_len,
1946 struct ib_uverbs_detach_mcast cmd;
1947 struct ib_uqp_object *obj;
1949 struct ib_uverbs_mcast_entry *mcast;
1952 if (copy_from_user(&cmd, buf, sizeof cmd))
1955 qp = idr_read_qp(cmd.qp_handle, file->ucontext);
1959 ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid);
1963 obj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
1965 list_for_each_entry(mcast, &obj->mcast_list, list)
1966 if (cmd.mlid == mcast->lid &&
1967 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
1968 list_del(&mcast->list);
1976 return ret ? ret : in_len;
1979 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
1980 const char __user *buf, int in_len,
1983 struct ib_uverbs_create_srq cmd;
1984 struct ib_uverbs_create_srq_resp resp;
1985 struct ib_udata udata;
1986 struct ib_uevent_object *obj;
1989 struct ib_srq_init_attr attr;
1992 if (out_len < sizeof resp)
1995 if (copy_from_user(&cmd, buf, sizeof cmd))
1998 INIT_UDATA(&udata, buf + sizeof cmd,
1999 (unsigned long) cmd.response + sizeof resp,
2000 in_len - sizeof cmd, out_len - sizeof resp);
2002 obj = kmalloc(sizeof *obj, GFP_KERNEL);
2006 init_uobj(&obj->uobject, cmd.user_handle, file->ucontext, &srq_lock_key);
2007 down_write(&obj->uobject.mutex);
2009 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
2015 attr.event_handler = ib_uverbs_srq_event_handler;
2016 attr.srq_context = file;
2017 attr.attr.max_wr = cmd.max_wr;
2018 attr.attr.max_sge = cmd.max_sge;
2019 attr.attr.srq_limit = cmd.srq_limit;
2021 obj->events_reported = 0;
2022 INIT_LIST_HEAD(&obj->event_list);
2024 srq = pd->device->create_srq(pd, &attr, &udata);
2030 srq->device = pd->device;
2032 srq->uobject = &obj->uobject;
2033 srq->event_handler = attr.event_handler;
2034 srq->srq_context = attr.srq_context;
2035 srq->ext.xrc.cq = NULL;
2036 srq->ext.xrc.xrcd = NULL;
2037 atomic_inc(&pd->usecnt);
2038 atomic_set(&srq->usecnt, 0);
2040 obj->uobject.object = srq;
2041 ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uobject);
2045 memset(&resp, 0, sizeof resp);
2046 resp.srq_handle = obj->uobject.id;
2047 resp.max_wr = attr.attr.max_wr;
2048 resp.max_sge = attr.attr.max_sge;
2050 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2051 &resp, sizeof resp)) {
2058 mutex_lock(&file->mutex);
2059 list_add_tail(&obj->uobject.list, &file->ucontext->srq_list);
2060 mutex_unlock(&file->mutex);
2062 obj->uobject.live = 1;
2064 up_write(&obj->uobject.mutex);
2069 idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uobject);
2072 ib_destroy_srq(srq);
2078 put_uobj_write(&obj->uobject);
2082 ssize_t ib_uverbs_create_xrc_srq(struct ib_uverbs_file *file,
2083 const char __user *buf, int in_len,
2086 struct ib_uverbs_create_xsrq cmd;
2087 struct ib_uverbs_create_srq_resp resp;
2088 struct ib_udata udata;
2089 struct ib_uevent_object *obj;
2092 struct ib_cq *xrc_cq;
2093 struct ib_xrcd *xrcd;
2094 struct ib_srq_init_attr attr;
2095 struct ib_uobject *xrcd_uobj;
2098 if (out_len < sizeof resp)
2101 if (copy_from_user(&cmd, buf, sizeof cmd))
2104 INIT_UDATA(&udata, buf + sizeof cmd,
2105 (unsigned long) cmd.response + sizeof resp,
2106 in_len - sizeof cmd, out_len - sizeof resp);
2108 obj = kmalloc(sizeof *obj, GFP_KERNEL);
2112 init_uobj(&obj->uobject, cmd.user_handle, file->ucontext,
2114 down_write(&obj->uobject.mutex);
2116 pd = idr_read_pd(cmd.pd_handle, file->ucontext);
2122 xrc_cq = idr_read_cq(cmd.cq_handle, file->ucontext, 0);
2128 xrcd = idr_read_xrcd(cmd.xrcd_handle, file->ucontext, &xrcd_uobj);
2135 attr.event_handler = ib_uverbs_srq_event_handler;
2136 attr.srq_context = file;
2137 attr.attr.max_wr = cmd.max_wr;
2138 attr.attr.max_sge = cmd.max_sge;
2139 attr.attr.srq_limit = cmd.srq_limit;
2141 obj->events_reported = 0;
2142 INIT_LIST_HEAD(&obj->event_list);
2144 srq = pd->device->create_xrc_srq(pd, xrc_cq, xrcd, &attr, &udata);
2150 srq->device = pd->device;
2152 srq->uobject = &obj->uobject;
2153 srq->event_handler = attr.event_handler;
2154 srq->srq_context = attr.srq_context;
2155 srq->ext.xrc.cq = xrc_cq;
2156 srq->ext.xrc.xrcd = xrcd;
2157 atomic_inc(&pd->usecnt);
2158 atomic_inc(&xrc_cq->usecnt);
2159 atomic_inc(&xrcd->usecnt);
2161 atomic_set(&srq->usecnt, 0);
2163 obj->uobject.object = srq;
2164 ret = idr_add_uobj(&ib_uverbs_srq_idr, &obj->uobject);
2168 memset(&resp, 0, sizeof resp);
2169 resp.srq_handle = obj->uobject.id;
2170 resp.max_wr = attr.attr.max_wr;
2171 resp.max_sge = attr.attr.max_sge;
2173 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2174 &resp, sizeof resp)) {
2179 put_xrcd_read(xrcd_uobj);
2180 put_cq_read(xrc_cq);
2183 mutex_lock(&file->mutex);
2184 list_add_tail(&obj->uobject.list, &file->ucontext->srq_list);
2185 mutex_unlock(&file->mutex);
2187 obj->uobject.live = 1;
2189 up_write(&obj->uobject.mutex);
2194 idr_remove_uobj(&ib_uverbs_srq_idr, &obj->uobject);
2197 ib_destroy_srq(srq);
2200 put_xrcd_read(xrcd_uobj);
2203 put_cq_read(xrc_cq);
2209 put_uobj_write(&obj->uobject);
2213 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
2214 const char __user *buf, int in_len,
2217 struct ib_uverbs_modify_srq cmd;
2218 struct ib_udata udata;
2220 struct ib_srq_attr attr;
2223 if (copy_from_user(&cmd, buf, sizeof cmd))
2226 INIT_UDATA(&udata, buf + sizeof cmd, NULL, in_len - sizeof cmd,
2229 srq = idr_read_srq(cmd.srq_handle, file->ucontext);
2233 attr.max_wr = cmd.max_wr;
2234 attr.srq_limit = cmd.srq_limit;
2236 ret = srq->device->modify_srq(srq, &attr, cmd.attr_mask, &udata);
2240 return ret ? ret : in_len;
2243 ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
2244 const char __user *buf,
2245 int in_len, int out_len)
2247 struct ib_uverbs_query_srq cmd;
2248 struct ib_uverbs_query_srq_resp resp;
2249 struct ib_srq_attr attr;
2253 if (out_len < sizeof resp)
2256 if (copy_from_user(&cmd, buf, sizeof cmd))
2259 srq = idr_read_srq(cmd.srq_handle, file->ucontext);
2263 ret = ib_query_srq(srq, &attr);
2270 memset(&resp, 0, sizeof resp);
2272 resp.max_wr = attr.max_wr;
2273 resp.max_sge = attr.max_sge;
2274 resp.srq_limit = attr.srq_limit;
2276 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2277 &resp, sizeof resp))
2283 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
2284 const char __user *buf, int in_len,
2287 struct ib_uverbs_destroy_srq cmd;
2288 struct ib_uverbs_destroy_srq_resp resp;
2289 struct ib_uobject *uobj;
2291 struct ib_uevent_object *obj;
2294 if (copy_from_user(&cmd, buf, sizeof cmd))
2297 uobj = idr_write_uobj(&ib_uverbs_srq_idr, cmd.srq_handle, file->ucontext);
2301 obj = container_of(uobj, struct ib_uevent_object, uobject);
2303 ret = ib_destroy_srq(srq);
2307 put_uobj_write(uobj);
2312 idr_remove_uobj(&ib_uverbs_srq_idr, uobj);
2314 mutex_lock(&file->mutex);
2315 list_del(&uobj->list);
2316 mutex_unlock(&file->mutex);
2318 ib_uverbs_release_uevent(file, obj);
2320 memset(&resp, 0, sizeof resp);
2321 resp.events_reported = obj->events_reported;
2325 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2326 &resp, sizeof resp))
2329 return ret ? ret : in_len;
2332 static struct inode *xrc_file2inode(struct file *f)
2334 return f->f_dentry->d_inode;
2337 struct xrcd_table_entry {
2338 struct rb_node node;
2339 struct inode *inode;
2340 struct ib_xrcd *xrcd;
2343 static int xrcd_table_insert(struct ib_device *dev,
2345 struct ib_xrcd *xrcd)
2347 struct xrcd_table_entry *entry, *scan;
2348 struct rb_node **p = &dev->ib_uverbs_xrcd_table.rb_node;
2349 struct rb_node *parent = NULL;
2351 entry = kmalloc(sizeof(struct xrcd_table_entry), GFP_KERNEL);
2360 scan = rb_entry(parent, struct xrcd_table_entry, node);
2362 if (i_n < scan->inode)
2364 else if (i_n > scan->inode)
2365 p = &(*p)->rb_right;
2372 rb_link_node(&entry->node, parent, p);
2373 rb_insert_color(&entry->node, &dev->ib_uverbs_xrcd_table);
2378 static struct xrcd_table_entry *xrcd_table_search(struct ib_device *dev,
2381 struct xrcd_table_entry *scan;
2382 struct rb_node **p = &dev->ib_uverbs_xrcd_table.rb_node;
2383 struct rb_node *parent = NULL;
2387 scan = rb_entry(parent, struct xrcd_table_entry, node);
2389 if (i_n < scan->inode)
2391 else if (i_n > scan->inode)
2392 p = &(*p)->rb_right;
2399 static int find_xrcd(struct ib_device *dev, struct inode *i_n,
2400 struct ib_xrcd **xrcd)
2402 struct xrcd_table_entry *entry;
2404 entry = xrcd_table_search(dev, i_n);
2408 *xrcd = entry->xrcd;
2413 static void xrcd_table_delete(struct ib_device *dev,
2416 struct xrcd_table_entry *entry = xrcd_table_search(dev, i_n);
2420 rb_erase(&entry->node, &dev->ib_uverbs_xrcd_table);
2425 ssize_t ib_uverbs_open_xrc_domain(struct ib_uverbs_file *file,
2426 const char __user *buf, int in_len,
2429 struct ib_uverbs_open_xrc_domain cmd;
2430 struct ib_uverbs_open_xrc_domain_resp resp;
2431 struct ib_udata udata;
2432 struct ib_uobject *uobj;
2433 struct ib_uxrcd_object *xrcd_uobj;
2434 struct ib_xrcd *xrcd = NULL;
2435 struct file *f = NULL;
2436 struct inode *inode = NULL;
2440 if (out_len < sizeof resp)
2443 if (copy_from_user(&cmd, buf, sizeof cmd))
2446 INIT_UDATA(&udata, buf + sizeof cmd,
2447 (unsigned long) cmd.response + sizeof resp,
2448 in_len - sizeof cmd, out_len - sizeof resp);
2450 mutex_lock(&file->device->ib_dev->xrcd_table_mutex);
2451 if (cmd.fd != (u32) (-1)) {
2452 /* search for file descriptor */
2456 goto err_table_mutex_unlock;
2459 inode = xrc_file2inode(f);
2462 goto err_table_mutex_unlock;
2465 ret = find_xrcd(file->device->ib_dev, inode, &xrcd);
2466 if (ret && !(cmd.oflags & O_CREAT)) {
2467 /* no file descriptor. Need CREATE flag */
2469 goto err_table_mutex_unlock;
2472 if (xrcd && cmd.oflags & O_EXCL) {
2474 goto err_table_mutex_unlock;
2478 xrcd_uobj = kmalloc(sizeof *xrcd_uobj, GFP_KERNEL);
2481 goto err_table_mutex_unlock;
2484 uobj = &xrcd_uobj->uobject;
2485 init_uobj(uobj, 0, file->ucontext, &pd_lock_key);
2486 down_write(&uobj->mutex);
2489 xrcd = file->device->ib_dev->alloc_xrcd(file->device->ib_dev,
2490 file->ucontext, &udata);
2492 ret = PTR_ERR(xrcd);
2495 xrcd->uobject = (cmd.fd == -1) ? uobj : NULL;
2496 xrcd->inode = inode;
2497 xrcd->device = file->device->ib_dev;
2498 atomic_set(&xrcd->usecnt, 0);
2502 uobj->object = xrcd;
2503 ret = idr_add_uobj(&ib_uverbs_xrc_domain_idr, uobj);
2507 memset(&resp, 0, sizeof resp);
2508 resp.xrcd_handle = uobj->id;
2512 /* create new inode/xrcd table entry */
2513 ret = xrcd_table_insert(file->device->ib_dev, inode, xrcd);
2515 goto err_insert_xrcd;
2517 atomic_inc(&xrcd->usecnt);
2522 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2523 &resp, sizeof resp)) {
2528 INIT_LIST_HEAD(&xrcd_uobj->xrc_reg_qp_list);
2530 mutex_lock(&file->mutex);
2531 list_add_tail(&uobj->list, &file->ucontext->xrcd_list);
2532 mutex_unlock(&file->mutex);
2536 up_write(&uobj->mutex);
2538 mutex_unlock(&file->device->ib_dev->xrcd_table_mutex);
2545 xrcd_table_delete(file->device->ib_dev, inode);
2546 atomic_dec(&xrcd->usecnt);
2550 idr_remove_uobj(&ib_uverbs_xrc_domain_idr, uobj);
2553 ib_dealloc_xrcd(xrcd);
2556 put_uobj_write(uobj);
2558 err_table_mutex_unlock:
2562 mutex_unlock(&file->device->ib_dev->xrcd_table_mutex);
2566 ssize_t ib_uverbs_close_xrc_domain(struct ib_uverbs_file *file,
2567 const char __user *buf, int in_len,
2570 struct ib_uverbs_close_xrc_domain cmd;
2571 struct ib_uobject *uobj, *t_uobj;
2572 struct ib_uxrcd_object *xrcd_uobj;
2573 struct ib_xrcd *xrcd = NULL;
2574 struct inode *inode = NULL;
2577 if (copy_from_user(&cmd, buf, sizeof cmd))
2580 mutex_lock(&file->device->ib_dev->xrcd_table_mutex);
2581 uobj = idr_write_uobj(&ib_uverbs_xrc_domain_idr, cmd.xrcd_handle,
2585 goto err_unlock_mutex;
2588 mutex_lock(&file->mutex);
2590 list_for_each_entry(t_uobj, &file->ucontext->qp_list, list) {
2591 struct ib_qp *qp = t_uobj->object;
2592 if (qp->xrcd && qp->xrcd == uobj->object) {
2599 list_for_each_entry(t_uobj, &file->ucontext->srq_list, list) {
2600 struct ib_srq *srq = t_uobj->object;
2601 if (srq->ext.xrc.xrcd && srq->ext.xrc.xrcd == uobj->object) {
2607 mutex_unlock(&file->mutex);
2609 put_uobj_write(uobj);
2610 goto err_unlock_mutex;
2613 xrcd_uobj = container_of(uobj, struct ib_uxrcd_object, uobject);
2614 if (!list_empty(&xrcd_uobj->xrc_reg_qp_list)) {
2616 put_uobj_write(uobj);
2617 goto err_unlock_mutex;
2620 xrcd = (struct ib_xrcd *) (uobj->object);
2621 inode = xrcd->inode;
2624 atomic_dec(&xrcd->usecnt);
2626 ret = ib_dealloc_xrcd(uobj->object);
2630 put_uobj_write(uobj);
2633 goto err_unlock_mutex;
2636 xrcd_table_delete(file->device->ib_dev, inode);
2638 idr_remove_uobj(&ib_uverbs_xrc_domain_idr, uobj);
2640 mutex_lock(&file->mutex);
2641 list_del(&uobj->list);
2642 mutex_unlock(&file->mutex);
2646 mutex_unlock(&file->device->ib_dev->xrcd_table_mutex);
2650 mutex_unlock(&file->device->ib_dev->xrcd_table_mutex);
2654 void ib_uverbs_dealloc_xrcd(struct ib_device *ib_dev,
2655 struct ib_xrcd *xrcd)
2657 struct inode *inode = NULL;
2660 inode = xrcd->inode;
2662 atomic_dec(&xrcd->usecnt);
2664 ret = ib_dealloc_xrcd(xrcd);
2666 xrcd_table_delete(ib_dev, inode);
2669 ssize_t ib_uverbs_create_xrc_rcv_qp(struct ib_uverbs_file *file,
2670 const char __user *buf, int in_len,
2673 struct ib_uverbs_create_xrc_rcv_qp cmd;
2674 struct ib_uverbs_create_xrc_rcv_qp_resp resp;
2675 struct ib_uxrc_rcv_object *obj;
2676 struct ib_qp_init_attr init_attr;
2677 struct ib_xrcd *xrcd;
2678 struct ib_uobject *uobj;
2679 struct ib_uxrcd_object *xrcd_uobj;
2683 if (out_len < sizeof resp)
2686 if (copy_from_user(&cmd, buf, sizeof cmd))
2689 obj = kzalloc(sizeof *obj, GFP_KERNEL);
2693 xrcd = idr_read_xrcd(cmd.xrc_domain_handle, file->ucontext, &uobj);
2699 init_attr.event_handler = ib_uverbs_xrc_rcv_qp_event_handler;
2700 init_attr.qp_context = file;
2701 init_attr.srq = NULL;
2702 init_attr.sq_sig_type =
2703 cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
2704 init_attr.qp_type = IB_QPT_XRC;
2705 init_attr.xrcd = xrcd;
2707 init_attr.cap.max_send_wr = 1;
2708 init_attr.cap.max_recv_wr = 0;
2709 init_attr.cap.max_send_sge = 1;
2710 init_attr.cap.max_recv_sge = 0;
2711 init_attr.cap.max_inline_data = 0;
2713 err = xrcd->device->create_xrc_rcv_qp(&init_attr, &qp_num);
2717 memset(&resp, 0, sizeof resp);
2720 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2721 &resp, sizeof resp)) {
2726 atomic_inc(&xrcd->usecnt);
2727 put_xrcd_read(uobj);
2728 obj->qp_num = qp_num;
2729 obj->domain_handle = cmd.xrc_domain_handle;
2730 xrcd_uobj = container_of(uobj, struct ib_uxrcd_object, uobject);
2731 mutex_lock(&file->device->ib_dev->xrcd_table_mutex);
2732 list_add_tail(&obj->list, &xrcd_uobj->xrc_reg_qp_list);
2733 mutex_unlock(&file->device->ib_dev->xrcd_table_mutex);
2738 xrcd->device->unreg_xrc_rcv_qp(xrcd, file, qp_num);
2740 put_xrcd_read(uobj);
2746 ssize_t ib_uverbs_modify_xrc_rcv_qp(struct ib_uverbs_file *file,
2747 const char __user *buf, int in_len,
2750 struct ib_uverbs_modify_xrc_rcv_qp cmd;
2751 struct ib_qp_attr *attr;
2752 struct ib_xrcd *xrcd;
2753 struct ib_uobject *uobj;
2756 if (copy_from_user(&cmd, buf, sizeof cmd))
2759 attr = kzalloc(sizeof *attr, GFP_KERNEL);
2763 xrcd = idr_read_xrcd(cmd.xrc_domain_handle, file->ucontext, &uobj);
2769 attr->qp_state = cmd.qp_state;
2770 attr->cur_qp_state = cmd.cur_qp_state;
2771 attr->qp_access_flags = cmd.qp_access_flags;
2772 attr->pkey_index = cmd.pkey_index;
2773 attr->port_num = cmd.port_num;
2774 attr->path_mtu = cmd.path_mtu;
2775 attr->path_mig_state = cmd.path_mig_state;
2776 attr->qkey = cmd.qkey;
2777 attr->rq_psn = cmd.rq_psn;
2778 attr->sq_psn = cmd.sq_psn;
2779 attr->dest_qp_num = cmd.dest_qp_num;
2780 attr->alt_pkey_index = cmd.alt_pkey_index;
2781 attr->en_sqd_async_notify = cmd.en_sqd_async_notify;
2782 attr->max_rd_atomic = cmd.max_rd_atomic;
2783 attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic;
2784 attr->min_rnr_timer = cmd.min_rnr_timer;
2785 attr->port_num = cmd.port_num;
2786 attr->timeout = cmd.timeout;
2787 attr->retry_cnt = cmd.retry_cnt;
2788 attr->rnr_retry = cmd.rnr_retry;
2789 attr->alt_port_num = cmd.alt_port_num;
2790 attr->alt_timeout = cmd.alt_timeout;
2792 memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16);
2793 attr->ah_attr.grh.flow_label = cmd.dest.flow_label;
2794 attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index;
2795 attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit;
2796 attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class;
2797 attr->ah_attr.dlid = cmd.dest.dlid;
2798 attr->ah_attr.sl = cmd.dest.sl;
2799 attr->ah_attr.src_path_bits = cmd.dest.src_path_bits;
2800 attr->ah_attr.static_rate = cmd.dest.static_rate;
2801 attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0;
2802 attr->ah_attr.port_num = cmd.dest.port_num;
2804 memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16);
2805 attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label;
2806 attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index;
2807 attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit;
2808 attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class;
2809 attr->alt_ah_attr.dlid = cmd.alt_dest.dlid;
2810 attr->alt_ah_attr.sl = cmd.alt_dest.sl;
2811 attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits;
2812 attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate;
2813 attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0;
2814 attr->alt_ah_attr.port_num = cmd.alt_dest.port_num;
2816 err = xrcd->device->modify_xrc_rcv_qp(xrcd, cmd.qp_num, attr, cmd.attr_mask);
2817 put_xrcd_read(uobj);
2819 return err ? err : in_len;
2822 ssize_t ib_uverbs_query_xrc_rcv_qp(struct ib_uverbs_file *file,
2823 const char __user *buf, int in_len,
2826 struct ib_uverbs_query_xrc_rcv_qp cmd;
2827 struct ib_uverbs_query_qp_resp resp;
2828 struct ib_qp_attr *attr;
2829 struct ib_qp_init_attr *init_attr;
2830 struct ib_xrcd *xrcd;
2831 struct ib_uobject *uobj;
2834 if (copy_from_user(&cmd, buf, sizeof cmd))
2837 attr = kmalloc(sizeof *attr, GFP_KERNEL);
2838 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL);
2839 if (!attr || !init_attr) {
2844 xrcd = idr_read_xrcd(cmd.xrc_domain_handle, file->ucontext, &uobj);
2850 ret = xrcd->device->query_xrc_rcv_qp(xrcd, cmd.qp_num, attr,
2851 cmd.attr_mask, init_attr);
2853 put_xrcd_read(uobj);
2858 memset(&resp, 0, sizeof resp);
2859 resp.qp_state = attr->qp_state;
2860 resp.cur_qp_state = attr->cur_qp_state;
2861 resp.path_mtu = attr->path_mtu;
2862 resp.path_mig_state = attr->path_mig_state;
2863 resp.qkey = attr->qkey;
2864 resp.rq_psn = attr->rq_psn;
2865 resp.sq_psn = attr->sq_psn;
2866 resp.dest_qp_num = attr->dest_qp_num;
2867 resp.qp_access_flags = attr->qp_access_flags;
2868 resp.pkey_index = attr->pkey_index;
2869 resp.alt_pkey_index = attr->alt_pkey_index;
2870 resp.sq_draining = attr->sq_draining;
2871 resp.max_rd_atomic = attr->max_rd_atomic;
2872 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic;
2873 resp.min_rnr_timer = attr->min_rnr_timer;
2874 resp.port_num = attr->port_num;
2875 resp.timeout = attr->timeout;
2876 resp.retry_cnt = attr->retry_cnt;
2877 resp.rnr_retry = attr->rnr_retry;
2878 resp.alt_port_num = attr->alt_port_num;
2879 resp.alt_timeout = attr->alt_timeout;
2881 memcpy(resp.dest.dgid, attr->ah_attr.grh.dgid.raw, 16);
2882 resp.dest.flow_label = attr->ah_attr.grh.flow_label;
2883 resp.dest.sgid_index = attr->ah_attr.grh.sgid_index;
2884 resp.dest.hop_limit = attr->ah_attr.grh.hop_limit;
2885 resp.dest.traffic_class = attr->ah_attr.grh.traffic_class;
2886 resp.dest.dlid = attr->ah_attr.dlid;
2887 resp.dest.sl = attr->ah_attr.sl;
2888 resp.dest.src_path_bits = attr->ah_attr.src_path_bits;
2889 resp.dest.static_rate = attr->ah_attr.static_rate;
2890 resp.dest.is_global = !!(attr->ah_attr.ah_flags & IB_AH_GRH);
2891 resp.dest.port_num = attr->ah_attr.port_num;
2893 memcpy(resp.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16);
2894 resp.alt_dest.flow_label = attr->alt_ah_attr.grh.flow_label;
2895 resp.alt_dest.sgid_index = attr->alt_ah_attr.grh.sgid_index;
2896 resp.alt_dest.hop_limit = attr->alt_ah_attr.grh.hop_limit;
2897 resp.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class;
2898 resp.alt_dest.dlid = attr->alt_ah_attr.dlid;
2899 resp.alt_dest.sl = attr->alt_ah_attr.sl;
2900 resp.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits;
2901 resp.alt_dest.static_rate = attr->alt_ah_attr.static_rate;
2902 resp.alt_dest.is_global = !!(attr->alt_ah_attr.ah_flags & IB_AH_GRH);
2903 resp.alt_dest.port_num = attr->alt_ah_attr.port_num;
2905 resp.max_send_wr = init_attr->cap.max_send_wr;
2906 resp.max_recv_wr = init_attr->cap.max_recv_wr;
2907 resp.max_send_sge = init_attr->cap.max_send_sge;
2908 resp.max_recv_sge = init_attr->cap.max_recv_sge;
2909 resp.max_inline_data = init_attr->cap.max_inline_data;
2910 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
2912 if (copy_to_user((void __user *) (unsigned long) cmd.response,
2913 &resp, sizeof resp))
2920 return ret ? ret : in_len;
2923 ssize_t ib_uverbs_reg_xrc_rcv_qp(struct ib_uverbs_file *file,
2924 const char __user *buf, int in_len,
2927 struct ib_uverbs_reg_xrc_rcv_qp cmd;
2928 struct ib_uxrc_rcv_object *qp_obj, *tmp;
2929 struct ib_xrcd *xrcd;
2930 struct ib_uobject *uobj;
2931 struct ib_uxrcd_object *xrcd_uobj;
2934 if (copy_from_user(&cmd, buf, sizeof cmd))
2937 qp_obj = kmalloc(sizeof *qp_obj, GFP_KERNEL);
2941 xrcd = idr_read_xrcd(cmd.xrc_domain_handle, file->ucontext, &uobj);
2947 ret = xrcd->device->reg_xrc_rcv_qp(xrcd, file, cmd.qp_num);
2951 xrcd_uobj = container_of(uobj, struct ib_uxrcd_object, uobject);
2952 mutex_lock(&file->device->ib_dev->xrcd_table_mutex);
2953 list_for_each_entry(tmp, &xrcd_uobj->xrc_reg_qp_list, list)
2954 if (cmd.qp_num == tmp->qp_num) {
2956 mutex_unlock(&file->device->ib_dev->xrcd_table_mutex);
2957 put_xrcd_read(uobj);
2960 qp_obj->qp_num = cmd.qp_num;
2961 qp_obj->domain_handle = cmd.xrc_domain_handle;
2962 list_add_tail(&qp_obj->list, &xrcd_uobj->xrc_reg_qp_list);
2963 mutex_unlock(&file->device->ib_dev->xrcd_table_mutex);
2964 atomic_inc(&xrcd->usecnt);
2965 put_xrcd_read(uobj);
2969 put_xrcd_read(uobj);
2976 int ib_uverbs_cleanup_xrc_rcv_qp(struct ib_uverbs_file *file,
2977 struct ib_xrcd *xrcd, u32 qp_num)
2980 err = xrcd->device->unreg_xrc_rcv_qp(xrcd, file, qp_num);
2982 atomic_dec(&xrcd->usecnt);
2986 ssize_t ib_uverbs_unreg_xrc_rcv_qp(struct ib_uverbs_file *file,
2987 const char __user *buf, int in_len,
2990 struct ib_uverbs_unreg_xrc_rcv_qp cmd;
2991 struct ib_uxrc_rcv_object *qp_obj, *tmp;
2992 struct ib_xrcd *xrcd;
2993 struct ib_uobject *uobj;
2994 struct ib_uxrcd_object *xrcd_uobj;
2997 if (copy_from_user(&cmd, buf, sizeof cmd))
3000 xrcd = idr_read_xrcd(cmd.xrc_domain_handle, file->ucontext, &uobj);
3004 ret = xrcd->device->unreg_xrc_rcv_qp(xrcd, file, cmd.qp_num);
3006 put_xrcd_read(uobj);
3009 atomic_dec(&xrcd->usecnt);
3011 xrcd_uobj = container_of(uobj, struct ib_uxrcd_object, uobject);
3012 mutex_lock(&file->device->ib_dev->xrcd_table_mutex);
3013 list_for_each_entry_safe(qp_obj, tmp, &xrcd_uobj->xrc_reg_qp_list, list)
3014 if (cmd.qp_num == qp_obj->qp_num) {
3015 list_del(&qp_obj->list);
3019 mutex_unlock(&file->device->ib_dev->xrcd_table_mutex);
3020 put_xrcd_read(uobj);