2 * Copyright (c) 2007 Cisco, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #endif /* HAVE_CONFIG_H */
42 #include <netinet/in.h>
48 int mlx4_query_device(struct ibv_context *context, struct ibv_device_attr *attr)
50 struct ibv_query_device cmd;
52 unsigned major, minor, sub_minor;
55 ret = ibv_cmd_query_device(context, attr, &raw_fw_ver, &cmd, sizeof cmd);
59 major = (raw_fw_ver >> 32) & 0xffff;
60 minor = (raw_fw_ver >> 16) & 0xffff;
61 sub_minor = raw_fw_ver & 0xffff;
63 snprintf(attr->fw_ver, sizeof attr->fw_ver,
64 "%d.%d.%03d", major, minor, sub_minor);
69 int mlx4_query_port(struct ibv_context *context, uint8_t port,
70 struct ibv_port_attr *attr)
72 struct ibv_query_port cmd;
74 return ibv_cmd_query_port(context, port, attr, &cmd, sizeof cmd);
77 struct ibv_pd *mlx4_alloc_pd(struct ibv_context *context)
79 struct ibv_alloc_pd cmd;
80 struct mlx4_alloc_pd_resp resp;
83 pd = malloc(sizeof *pd);
87 if (ibv_cmd_alloc_pd(context, &pd->ibv_pd, &cmd, sizeof cmd,
88 &resp.ibv_resp, sizeof resp)) {
98 int mlx4_free_pd(struct ibv_pd *pd)
102 ret = ibv_cmd_dealloc_pd(pd);
110 struct ibv_mr *mlx4_reg_mr(struct ibv_pd *pd, void *addr, size_t length,
111 enum ibv_access_flags access)
114 struct ibv_reg_mr cmd;
117 mr = malloc(sizeof *mr);
121 #ifdef IBV_CMD_REG_MR_HAS_RESP_PARAMS
123 struct ibv_reg_mr_resp resp;
125 ret = ibv_cmd_reg_mr(pd, addr, length, (uintptr_t) addr,
126 access, mr, &cmd, sizeof cmd,
130 ret = ibv_cmd_reg_mr(pd, addr, length, (uintptr_t) addr, access, mr,
141 int mlx4_dereg_mr(struct ibv_mr *mr)
145 ret = ibv_cmd_dereg_mr(mr);
153 static int align_queue_size(int req)
157 for (nent = 1; nent < req; nent <<= 1)
163 struct ibv_cq *mlx4_create_cq(struct ibv_context *context, int cqe,
164 struct ibv_comp_channel *channel,
167 struct mlx4_create_cq cmd;
168 struct mlx4_create_cq_resp resp;
171 struct mlx4_context *mctx = to_mctx(context);
173 /* Sanity check CQ size before proceeding */
177 cq = malloc(sizeof *cq);
183 if (pthread_spin_init(&cq->lock, PTHREAD_PROCESS_PRIVATE))
186 cqe = align_queue_size(cqe + 1);
188 if (mlx4_alloc_cq_buf(to_mdev(context->device), &cq->buf, cqe, mctx->cqe_size))
191 cq->cqe_size = mctx->cqe_size;
193 cq->set_ci_db = mlx4_alloc_db(to_mctx(context), MLX4_DB_TYPE_CQ);
197 cq->arm_db = cq->set_ci_db + 1;
202 cmd.buf_addr = (uintptr_t) cq->buf.buf;
203 cmd.db_addr = (uintptr_t) cq->set_ci_db;
205 ret = ibv_cmd_create_cq(context, cqe - 1, channel, comp_vector,
206 &cq->ibv_cq, &cmd.ibv_cmd, sizeof cmd,
207 &resp.ibv_resp, sizeof resp);
216 mlx4_free_db(to_mctx(context), MLX4_DB_TYPE_CQ, cq->set_ci_db);
219 mlx4_free_buf(&cq->buf);
227 int mlx4_resize_cq(struct ibv_cq *ibcq, int cqe)
229 struct mlx4_cq *cq = to_mcq(ibcq);
230 struct mlx4_resize_cq cmd;
232 int old_cqe, outst_cqe, ret;
234 /* Sanity check CQ size before proceeding */
238 pthread_spin_lock(&cq->lock);
240 cqe = align_queue_size(cqe + 1);
241 if (cqe == ibcq->cqe + 1) {
246 /* Can't be smaller then the number of outstanding CQEs */
247 outst_cqe = mlx4_get_outstanding_cqes(cq);
248 if (cqe < outst_cqe + 1) {
253 ret = mlx4_alloc_cq_buf(to_mdev(ibcq->context->device), &buf, cqe,
259 cmd.buf_addr = (uintptr_t) buf.buf;
261 #ifdef IBV_CMD_RESIZE_CQ_HAS_RESP_PARAMS
263 struct ibv_resize_cq_resp resp;
264 ret = ibv_cmd_resize_cq(ibcq, cqe - 1, &cmd.ibv_cmd, sizeof cmd,
268 ret = ibv_cmd_resize_cq(ibcq, cqe - 1, &cmd.ibv_cmd, sizeof cmd);
275 mlx4_cq_resize_copy_cqes(cq, buf.buf, old_cqe);
277 mlx4_free_buf(&cq->buf);
281 pthread_spin_unlock(&cq->lock);
285 int mlx4_destroy_cq(struct ibv_cq *cq)
289 ret = ibv_cmd_destroy_cq(cq);
293 mlx4_free_db(to_mctx(cq->context), MLX4_DB_TYPE_CQ, to_mcq(cq)->set_ci_db);
294 mlx4_free_buf(&to_mcq(cq)->buf);
300 struct ibv_srq *mlx4_create_srq(struct ibv_pd *pd,
301 struct ibv_srq_init_attr *attr)
303 struct mlx4_create_srq cmd;
304 struct mlx4_create_srq_resp resp;
305 struct mlx4_srq *srq;
308 /* Sanity check SRQ size before proceeding */
309 if (attr->attr.max_wr > 1 << 16 || attr->attr.max_sge > 64)
312 srq = malloc(sizeof *srq);
316 if (pthread_spin_init(&srq->lock, PTHREAD_PROCESS_PRIVATE))
319 srq->max = align_queue_size(attr->attr.max_wr + 1);
320 srq->max_gs = attr->attr.max_sge;
323 if (mlx4_alloc_srq_buf(pd, &attr->attr, srq))
326 srq->db = mlx4_alloc_db(to_mctx(pd->context), MLX4_DB_TYPE_RQ);
332 cmd.buf_addr = (uintptr_t) srq->buf.buf;
333 cmd.db_addr = (uintptr_t) srq->db;
335 ret = ibv_cmd_create_srq(pd, &srq->ibv_srq, attr,
336 &cmd.ibv_cmd, sizeof cmd,
337 &resp.ibv_resp, sizeof resp);
341 srq->srqn = resp.srqn;
343 return &srq->ibv_srq;
346 mlx4_free_db(to_mctx(pd->context), MLX4_DB_TYPE_RQ, srq->db);
350 mlx4_free_buf(&srq->buf);
358 int mlx4_modify_srq(struct ibv_srq *srq,
359 struct ibv_srq_attr *attr,
360 enum ibv_srq_attr_mask attr_mask)
362 struct ibv_modify_srq cmd;
364 return ibv_cmd_modify_srq(srq, attr, attr_mask, &cmd, sizeof cmd);
367 int mlx4_query_srq(struct ibv_srq *srq,
368 struct ibv_srq_attr *attr)
370 struct ibv_query_srq cmd;
372 return ibv_cmd_query_srq(srq, attr, &cmd, sizeof cmd);
375 int mlx4_destroy_srq(struct ibv_srq *ibsrq)
377 struct mlx4_srq *srq = to_msrq(ibsrq);
378 struct mlx4_cq *mcq = NULL;
383 mcq = to_mcq(ibsrq->xrc_cq);
384 mlx4_cq_clean(mcq, 0, srq);
385 pthread_spin_lock(&mcq->lock);
386 mlx4_clear_xrc_srq(to_mctx(ibsrq->context), srq->srqn);
387 pthread_spin_unlock(&mcq->lock);
390 ret = ibv_cmd_destroy_srq(ibsrq);
393 pthread_spin_lock(&mcq->lock);
394 mlx4_store_xrc_srq(to_mctx(ibsrq->context),
396 pthread_spin_unlock(&mcq->lock);
401 mlx4_free_db(to_mctx(ibsrq->context), MLX4_DB_TYPE_RQ, srq->db);
402 mlx4_free_buf(&srq->buf);
409 static int verify_sizes(struct ibv_qp_init_attr *attr, struct mlx4_context *context)
414 if (attr->cap.max_send_wr > context->max_qp_wr ||
415 attr->cap.max_recv_wr > context->max_qp_wr ||
416 attr->cap.max_send_sge > context->max_sge ||
417 attr->cap.max_recv_sge > context->max_sge)
420 if (attr->cap.max_inline_data) {
421 nsegs = num_inline_segs(attr->cap.max_inline_data, attr->qp_type);
422 size = MLX4_MAX_WQE_SIZE - nsegs * sizeof (struct mlx4_wqe_inline_seg);
423 switch (attr->qp_type) {
425 size -= (sizeof (struct mlx4_wqe_ctrl_seg) +
426 sizeof (struct mlx4_wqe_datagram_seg));
432 size -= (sizeof (struct mlx4_wqe_ctrl_seg) +
433 sizeof (struct mlx4_wqe_raddr_seg));
440 if (attr->cap.max_inline_data > size)
447 struct ibv_qp *mlx4_create_qp(struct ibv_pd *pd, struct ibv_qp_init_attr *attr)
449 struct mlx4_create_qp cmd;
450 struct ibv_create_qp_resp resp;
453 struct mlx4_context *context = to_mctx(pd->context);
456 /* Sanity check QP size before proceeding */
457 if (verify_sizes(attr, context))
460 qp = malloc(sizeof *qp);
464 mlx4_calc_sq_wqe_size(&attr->cap, attr->qp_type, qp);
467 * We need to leave 2 KB + 1 WQE of headroom in the SQ to
468 * allow HW to prefetch.
470 qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + 1;
471 qp->sq.wqe_cnt = align_queue_size(attr->cap.max_send_wr + qp->sq_spare_wqes);
472 qp->rq.wqe_cnt = align_queue_size(attr->cap.max_recv_wr);
474 if (attr->srq || attr->qp_type == IBV_QPT_XRC)
475 attr->cap.max_recv_wr = qp->rq.wqe_cnt = 0;
477 if (attr->cap.max_recv_sge < 1)
478 attr->cap.max_recv_sge = 1;
479 if (attr->cap.max_recv_wr < 1)
480 attr->cap.max_recv_wr = 1;
483 if (mlx4_alloc_qp_buf(pd, &attr->cap, attr->qp_type, qp))
486 mlx4_init_qp_indices(qp);
488 if (pthread_spin_init(&qp->sq.lock, PTHREAD_PROCESS_PRIVATE) ||
489 pthread_spin_init(&qp->rq.lock, PTHREAD_PROCESS_PRIVATE))
492 if (!attr->srq && attr->qp_type != IBV_QPT_XRC) {
493 qp->db = mlx4_alloc_db(to_mctx(pd->context), MLX4_DB_TYPE_RQ);
500 cmd.buf_addr = (uintptr_t) qp->buf.buf;
501 if (attr->srq || attr->qp_type == IBV_QPT_XRC)
504 cmd.db_addr = (uintptr_t) qp->db;
505 cmd.log_sq_stride = qp->sq.wqe_shift;
506 for (cmd.log_sq_bb_count = 0;
507 qp->sq.wqe_cnt > 1 << cmd.log_sq_bb_count;
508 ++cmd.log_sq_bb_count)
510 cmd.sq_no_prefetch = 0; /* OK for ABI 2: just a reserved field */
511 memset(cmd.reserved, 0, sizeof cmd.reserved);
513 pthread_mutex_lock(&to_mctx(pd->context)->qp_table_mutex);
515 ret = ibv_cmd_create_qp(pd, &qp->ibv_qp, attr, &cmd.ibv_cmd, sizeof cmd,
520 ret = mlx4_store_qp(to_mctx(pd->context), qp->ibv_qp.qp_num, qp);
523 pthread_mutex_unlock(&to_mctx(pd->context)->qp_table_mutex);
525 qp->rq.wqe_cnt = attr->cap.max_recv_wr;
526 qp->rq.max_gs = attr->cap.max_recv_sge;
528 /* adjust rq maxima to not exceed reported device maxima */
529 attr->cap.max_recv_wr = min(context->max_qp_wr, attr->cap.max_recv_wr);
530 attr->cap.max_recv_sge = min(context->max_sge, attr->cap.max_recv_sge);
532 qp->rq.max_post = attr->cap.max_recv_wr;
533 mlx4_set_sq_sizes(qp, &attr->cap, attr->qp_type);
535 qp->doorbell_qpn = htonl(qp->ibv_qp.qp_num << 8);
536 if (attr->sq_sig_all)
537 qp->sq_signal_bits = htonl(MLX4_WQE_CTRL_CQ_UPDATE);
539 qp->sq_signal_bits = 0;
544 ibv_cmd_destroy_qp(&qp->ibv_qp);
547 pthread_mutex_unlock(&to_mctx(pd->context)->qp_table_mutex);
548 if (!attr->srq && attr->qp_type != IBV_QPT_XRC)
549 mlx4_free_db(to_mctx(pd->context), MLX4_DB_TYPE_RQ, qp->db);
555 mlx4_free_buf(&qp->buf);
563 int mlx4_query_qp(struct ibv_qp *ibqp, struct ibv_qp_attr *attr,
564 enum ibv_qp_attr_mask attr_mask,
565 struct ibv_qp_init_attr *init_attr)
567 struct ibv_query_qp cmd;
568 struct mlx4_qp *qp = to_mqp(ibqp);
571 ret = ibv_cmd_query_qp(ibqp, attr, attr_mask, init_attr, &cmd, sizeof cmd);
575 init_attr->cap.max_send_wr = qp->sq.max_post;
576 init_attr->cap.max_send_sge = qp->sq.max_gs;
577 init_attr->cap.max_inline_data = qp->max_inline_data;
579 attr->cap = init_attr->cap;
584 int mlx4_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
585 enum ibv_qp_attr_mask attr_mask)
587 struct ibv_modify_qp cmd;
590 if (qp->state == IBV_QPS_RESET &&
591 attr_mask & IBV_QP_STATE &&
592 attr->qp_state == IBV_QPS_INIT) {
593 mlx4_qp_init_sq_ownership(to_mqp(qp));
596 ret = ibv_cmd_modify_qp(qp, attr, attr_mask, &cmd, sizeof cmd);
599 (attr_mask & IBV_QP_STATE) &&
600 attr->qp_state == IBV_QPS_RESET) {
601 mlx4_cq_clean(to_mcq(qp->recv_cq), qp->qp_num,
602 qp->srq ? to_msrq(qp->srq) : NULL);
603 if (qp->send_cq != qp->recv_cq)
604 mlx4_cq_clean(to_mcq(qp->send_cq), qp->qp_num, NULL);
606 mlx4_init_qp_indices(to_mqp(qp));
607 if (!qp->srq && qp->qp_type != IBV_QPT_XRC)
614 static void mlx4_lock_cqs(struct ibv_qp *qp)
616 struct mlx4_cq *send_cq = to_mcq(qp->send_cq);
617 struct mlx4_cq *recv_cq = to_mcq(qp->recv_cq);
619 if (send_cq == recv_cq)
620 pthread_spin_lock(&send_cq->lock);
621 else if (send_cq->cqn < recv_cq->cqn) {
622 pthread_spin_lock(&send_cq->lock);
623 pthread_spin_lock(&recv_cq->lock);
625 pthread_spin_lock(&recv_cq->lock);
626 pthread_spin_lock(&send_cq->lock);
630 static void mlx4_unlock_cqs(struct ibv_qp *qp)
632 struct mlx4_cq *send_cq = to_mcq(qp->send_cq);
633 struct mlx4_cq *recv_cq = to_mcq(qp->recv_cq);
635 if (send_cq == recv_cq)
636 pthread_spin_unlock(&send_cq->lock);
637 else if (send_cq->cqn < recv_cq->cqn) {
638 pthread_spin_unlock(&recv_cq->lock);
639 pthread_spin_unlock(&send_cq->lock);
641 pthread_spin_unlock(&send_cq->lock);
642 pthread_spin_unlock(&recv_cq->lock);
646 int mlx4_destroy_qp(struct ibv_qp *ibqp)
648 struct mlx4_qp *qp = to_mqp(ibqp);
651 pthread_mutex_lock(&to_mctx(ibqp->context)->qp_table_mutex);
652 ret = ibv_cmd_destroy_qp(ibqp);
654 pthread_mutex_unlock(&to_mctx(ibqp->context)->qp_table_mutex);
660 __mlx4_cq_clean(to_mcq(ibqp->recv_cq), ibqp->qp_num,
661 ibqp->srq ? to_msrq(ibqp->srq) : NULL);
662 if (ibqp->send_cq != ibqp->recv_cq)
663 __mlx4_cq_clean(to_mcq(ibqp->send_cq), ibqp->qp_num, NULL);
665 mlx4_clear_qp(to_mctx(ibqp->context), ibqp->qp_num);
667 mlx4_unlock_cqs(ibqp);
668 pthread_mutex_unlock(&to_mctx(ibqp->context)->qp_table_mutex);
670 if (!ibqp->srq && ibqp->qp_type != IBV_QPT_XRC)
671 mlx4_free_db(to_mctx(ibqp->context), MLX4_DB_TYPE_RQ, qp->db);
675 mlx4_free_buf(&qp->buf);
681 struct ibv_ah *mlx4_create_ah(struct ibv_pd *pd, struct ibv_ah_attr *attr)
684 struct ibv_port_attr port_attr;
687 ah = malloc(sizeof *ah);
691 memset(ah, 0, sizeof *ah);
693 ah->av.port_pd = htonl(to_mpd(pd)->pdn | (attr->port_num << 24));
694 ah->av.g_slid = attr->src_path_bits;
695 ah->av.dlid = htons(attr->dlid);
696 if (attr->static_rate) {
697 ah->av.stat_rate = attr->static_rate + MLX4_STAT_RATE_OFFSET;
698 /* XXX check rate cap? */
700 ah->av.sl_tclass_flowlabel = htonl(attr->sl << 28);
701 if (attr->is_global) {
702 ah->av.g_slid |= 0x80;
703 ah->av.gid_index = attr->grh.sgid_index;
704 ah->av.hop_limit = attr->grh.hop_limit;
705 ah->av.sl_tclass_flowlabel |=
706 htonl((attr->grh.traffic_class << 20) |
707 attr->grh.flow_label);
708 memcpy(ah->av.dgid, attr->grh.dgid.raw, 16);
711 if (ibv_query_port(pd->context, attr->port_num, &port_attr))
714 if (port_attr.link_layer == IBV_LINK_LAYER_ETHERNET) {
715 if (ibv_resolve_eth_gid(pd, attr->port_num,
716 (union ibv_gid *)ah->av.dgid,
717 attr->grh.sgid_index,
719 &ah->tagged, &is_mcast))
723 ah->av.dlid = htons(0xc000);
724 ah->av.port_pd |= htonl(1 << 31);
727 ah->av.port_pd |= htonl(1 << 29);
728 ah->vlan |= (attr->sl & 7) << 13;
739 int mlx4_destroy_ah(struct ibv_ah *ah)
746 #ifdef HAVE_IBV_XRC_OPS
747 struct ibv_srq *mlx4_create_xrc_srq(struct ibv_pd *pd,
748 struct ibv_xrc_domain *xrc_domain,
749 struct ibv_cq *xrc_cq,
750 struct ibv_srq_init_attr *attr)
752 struct mlx4_create_xrc_srq cmd;
753 struct mlx4_create_srq_resp resp;
754 struct mlx4_srq *srq;
757 /* Sanity check SRQ size before proceeding */
758 if (attr->attr.max_wr > 1 << 16 || attr->attr.max_sge > 64)
761 srq = malloc(sizeof *srq);
765 if (pthread_spin_init(&srq->lock, PTHREAD_PROCESS_PRIVATE))
768 srq->max = align_queue_size(attr->attr.max_wr + 1);
769 srq->max_gs = attr->attr.max_sge;
772 if (mlx4_alloc_srq_buf(pd, &attr->attr, srq))
775 srq->db = mlx4_alloc_db(to_mctx(pd->context), MLX4_DB_TYPE_RQ);
781 cmd.buf_addr = (uintptr_t) srq->buf.buf;
782 cmd.db_addr = (uintptr_t) srq->db;
784 ret = ibv_cmd_create_xrc_srq(pd, &srq->ibv_srq, attr,
787 &cmd.ibv_cmd, sizeof cmd,
788 &resp.ibv_resp, sizeof resp);
792 srq->ibv_srq.xrc_srq_num = srq->srqn = resp.srqn;
794 ret = mlx4_store_xrc_srq(to_mctx(pd->context), srq->ibv_srq.xrc_srq_num, srq);
798 return &srq->ibv_srq;
801 ibv_cmd_destroy_srq(&srq->ibv_srq);
804 mlx4_free_db(to_mctx(pd->context), MLX4_DB_TYPE_RQ, srq->db);
808 mlx4_free_buf(&srq->buf);
816 struct ibv_xrc_domain *mlx4_open_xrc_domain(struct ibv_context *context,
820 struct mlx4_open_xrc_domain_resp resp;
821 struct mlx4_xrc_domain *xrcd;
823 xrcd = malloc(sizeof *xrcd);
827 ret = ibv_cmd_open_xrc_domain(context, fd, oflag, &xrcd->ibv_xrcd,
828 &resp.ibv_resp, sizeof resp);
834 xrcd->xrcdn = resp.xrcdn;
835 return &xrcd->ibv_xrcd;
838 int mlx4_close_xrc_domain(struct ibv_xrc_domain *d)
841 ret = ibv_cmd_close_xrc_domain(d);
847 int mlx4_create_xrc_rcv_qp(struct ibv_qp_init_attr *init_attr,
848 uint32_t *xrc_qp_num)
851 return ibv_cmd_create_xrc_rcv_qp(init_attr, xrc_qp_num);
854 int mlx4_modify_xrc_rcv_qp(struct ibv_xrc_domain *xrc_domain,
856 struct ibv_qp_attr *attr,
859 return ibv_cmd_modify_xrc_rcv_qp(xrc_domain, xrc_qp_num,
863 int mlx4_query_xrc_rcv_qp(struct ibv_xrc_domain *xrc_domain,
865 struct ibv_qp_attr *attr,
867 struct ibv_qp_init_attr *init_attr)
871 ret = ibv_cmd_query_xrc_rcv_qp(xrc_domain, xrc_qp_num,
872 attr, attr_mask, init_attr);
876 init_attr->cap.max_send_wr = init_attr->cap.max_send_sge = 1;
877 init_attr->cap.max_recv_sge = init_attr->cap.max_recv_wr = 0;
878 init_attr->cap.max_inline_data = 0;
879 init_attr->recv_cq = init_attr->send_cq = NULL;
880 init_attr->srq = NULL;
881 init_attr->xrc_domain = xrc_domain;
882 init_attr->qp_type = IBV_QPT_XRC;
883 init_attr->qp_context = NULL;
884 attr->cap = init_attr->cap;
889 int mlx4_reg_xrc_rcv_qp(struct ibv_xrc_domain *xrc_domain,
892 return ibv_cmd_reg_xrc_rcv_qp(xrc_domain, xrc_qp_num);
895 int mlx4_unreg_xrc_rcv_qp(struct ibv_xrc_domain *xrc_domain,
898 return ibv_cmd_unreg_xrc_rcv_qp(xrc_domain, xrc_qp_num);