2 * Copyright (c) 2007 Cisco, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #endif /* HAVE_CONFIG_H */
42 #include <netinet/in.h>
48 int mlx4_query_device(struct ibv_context *context, struct ibv_device_attr *attr)
50 struct ibv_query_device cmd;
52 unsigned major, minor, sub_minor;
55 ret = ibv_cmd_query_device(context, attr, &raw_fw_ver, &cmd, sizeof cmd);
59 major = (raw_fw_ver >> 32) & 0xffff;
60 minor = (raw_fw_ver >> 16) & 0xffff;
61 sub_minor = raw_fw_ver & 0xffff;
63 snprintf(attr->fw_ver, sizeof attr->fw_ver,
64 "%d.%d.%03d", major, minor, sub_minor);
69 int mlx4_query_port(struct ibv_context *context, uint8_t port,
70 struct ibv_port_attr *attr)
72 struct ibv_query_port cmd;
74 return ibv_cmd_query_port(context, port, attr, &cmd, sizeof cmd);
77 struct ibv_pd *mlx4_alloc_pd(struct ibv_context *context)
79 struct ibv_alloc_pd cmd;
80 struct mlx4_alloc_pd_resp resp;
83 pd = malloc(sizeof *pd);
87 if (ibv_cmd_alloc_pd(context, &pd->ibv_pd, &cmd, sizeof cmd,
88 &resp.ibv_resp, sizeof resp)) {
98 int mlx4_free_pd(struct ibv_pd *pd)
102 ret = ibv_cmd_dealloc_pd(pd);
110 struct ibv_mr *mlx4_reg_mr(struct ibv_pd *pd, void *addr, size_t length,
111 enum ibv_access_flags access)
114 struct ibv_reg_mr cmd;
117 mr = malloc(sizeof *mr);
121 #ifdef IBV_CMD_REG_MR_HAS_RESP_PARAMS
123 struct ibv_reg_mr_resp resp;
125 ret = ibv_cmd_reg_mr(pd, addr, length, (uintptr_t) addr,
126 access, mr, &cmd, sizeof cmd,
130 ret = ibv_cmd_reg_mr(pd, addr, length, (uintptr_t) addr, access, mr,
141 int mlx4_dereg_mr(struct ibv_mr *mr)
145 ret = ibv_cmd_dereg_mr(mr);
153 static int align_queue_size(int req)
157 for (nent = 1; nent < req; nent <<= 1)
163 struct ibv_cq *mlx4_create_cq(struct ibv_context *context, int cqe,
164 struct ibv_comp_channel *channel,
167 struct mlx4_create_cq cmd;
168 struct mlx4_create_cq_resp resp;
172 /* Sanity check CQ size before proceeding */
176 cq = malloc(sizeof *cq);
182 if (pthread_spin_init(&cq->lock, PTHREAD_PROCESS_PRIVATE))
185 cqe = align_queue_size(cqe + 1);
187 if (mlx4_alloc_cq_buf(to_mdev(context->device), &cq->buf, cqe))
190 cq->set_ci_db = mlx4_alloc_db(to_mctx(context), MLX4_DB_TYPE_CQ);
194 cq->arm_db = cq->set_ci_db + 1;
199 cmd.buf_addr = (uintptr_t) cq->buf.buf;
200 cmd.db_addr = (uintptr_t) cq->set_ci_db;
202 ret = ibv_cmd_create_cq(context, cqe - 1, channel, comp_vector,
203 &cq->ibv_cq, &cmd.ibv_cmd, sizeof cmd,
204 &resp.ibv_resp, sizeof resp);
213 mlx4_free_db(to_mctx(context), MLX4_DB_TYPE_CQ, cq->set_ci_db);
216 mlx4_free_buf(&cq->buf);
224 int mlx4_resize_cq(struct ibv_cq *ibcq, int cqe)
226 struct mlx4_cq *cq = to_mcq(ibcq);
227 struct mlx4_resize_cq cmd;
229 int old_cqe, outst_cqe, ret;
231 /* Sanity check CQ size before proceeding */
235 pthread_spin_lock(&cq->lock);
237 cqe = align_queue_size(cqe + 1);
238 if (cqe == ibcq->cqe + 1) {
243 /* Can't be smaller then the number of outstanding CQEs */
244 outst_cqe = mlx4_get_outstanding_cqes(cq);
245 if (cqe < outst_cqe + 1) {
250 ret = mlx4_alloc_cq_buf(to_mdev(ibcq->context->device), &buf, cqe);
255 cmd.buf_addr = (uintptr_t) buf.buf;
257 #ifdef IBV_CMD_RESIZE_CQ_HAS_RESP_PARAMS
259 struct ibv_resize_cq_resp resp;
260 ret = ibv_cmd_resize_cq(ibcq, cqe - 1, &cmd.ibv_cmd, sizeof cmd,
264 ret = ibv_cmd_resize_cq(ibcq, cqe - 1, &cmd.ibv_cmd, sizeof cmd);
271 mlx4_cq_resize_copy_cqes(cq, buf.buf, old_cqe);
273 mlx4_free_buf(&cq->buf);
277 pthread_spin_unlock(&cq->lock);
281 int mlx4_destroy_cq(struct ibv_cq *cq)
285 ret = ibv_cmd_destroy_cq(cq);
289 mlx4_free_db(to_mctx(cq->context), MLX4_DB_TYPE_CQ, to_mcq(cq)->set_ci_db);
290 mlx4_free_buf(&to_mcq(cq)->buf);
296 struct ibv_srq *mlx4_create_srq(struct ibv_pd *pd,
297 struct ibv_srq_init_attr *attr)
299 struct mlx4_create_srq cmd;
300 struct mlx4_create_srq_resp resp;
301 struct mlx4_srq *srq;
304 /* Sanity check SRQ size before proceeding */
305 if (attr->attr.max_wr > 1 << 16 || attr->attr.max_sge > 64)
308 srq = malloc(sizeof *srq);
312 if (pthread_spin_init(&srq->lock, PTHREAD_PROCESS_PRIVATE))
315 srq->max = align_queue_size(attr->attr.max_wr + 1);
316 srq->max_gs = attr->attr.max_sge;
319 if (mlx4_alloc_srq_buf(pd, &attr->attr, srq))
322 srq->db = mlx4_alloc_db(to_mctx(pd->context), MLX4_DB_TYPE_RQ);
328 cmd.buf_addr = (uintptr_t) srq->buf.buf;
329 cmd.db_addr = (uintptr_t) srq->db;
331 ret = ibv_cmd_create_srq(pd, &srq->ibv_srq, attr,
332 &cmd.ibv_cmd, sizeof cmd,
333 &resp.ibv_resp, sizeof resp);
337 srq->srqn = resp.srqn;
339 return &srq->ibv_srq;
342 mlx4_free_db(to_mctx(pd->context), MLX4_DB_TYPE_RQ, srq->db);
346 mlx4_free_buf(&srq->buf);
354 int mlx4_modify_srq(struct ibv_srq *srq,
355 struct ibv_srq_attr *attr,
356 enum ibv_srq_attr_mask attr_mask)
358 struct ibv_modify_srq cmd;
360 return ibv_cmd_modify_srq(srq, attr, attr_mask, &cmd, sizeof cmd);
363 int mlx4_query_srq(struct ibv_srq *srq,
364 struct ibv_srq_attr *attr)
366 struct ibv_query_srq cmd;
368 return ibv_cmd_query_srq(srq, attr, &cmd, sizeof cmd);
371 int mlx4_destroy_srq(struct ibv_srq *ibsrq)
373 struct mlx4_srq *srq = to_msrq(ibsrq);
374 struct mlx4_cq *mcq = NULL;
379 mcq = to_mcq(ibsrq->xrc_cq);
380 mlx4_cq_clean(mcq, 0, srq);
381 pthread_spin_lock(&mcq->lock);
382 mlx4_clear_xrc_srq(to_mctx(ibsrq->context), srq->srqn);
383 pthread_spin_unlock(&mcq->lock);
386 ret = ibv_cmd_destroy_srq(ibsrq);
389 pthread_spin_lock(&mcq->lock);
390 mlx4_store_xrc_srq(to_mctx(ibsrq->context),
392 pthread_spin_unlock(&mcq->lock);
397 mlx4_free_db(to_mctx(ibsrq->context), MLX4_DB_TYPE_RQ, srq->db);
398 mlx4_free_buf(&srq->buf);
405 static int verify_sizes(struct ibv_qp_init_attr *attr, struct mlx4_context *context)
410 if (attr->cap.max_send_wr > context->max_qp_wr ||
411 attr->cap.max_recv_wr > context->max_qp_wr ||
412 attr->cap.max_send_sge > context->max_sge ||
413 attr->cap.max_recv_sge > context->max_sge)
416 if (attr->cap.max_inline_data) {
417 nsegs = num_inline_segs(attr->cap.max_inline_data, attr->qp_type);
418 size = MLX4_MAX_WQE_SIZE - nsegs * sizeof (struct mlx4_wqe_inline_seg);
419 switch (attr->qp_type) {
421 size -= (sizeof (struct mlx4_wqe_ctrl_seg) +
422 sizeof (struct mlx4_wqe_datagram_seg));
428 size -= (sizeof (struct mlx4_wqe_ctrl_seg) +
429 sizeof (struct mlx4_wqe_raddr_seg));
436 if (attr->cap.max_inline_data > size)
443 struct ibv_qp *mlx4_create_qp(struct ibv_pd *pd, struct ibv_qp_init_attr *attr)
445 struct mlx4_create_qp cmd;
446 struct ibv_create_qp_resp resp;
449 struct mlx4_context *context = to_mctx(pd->context);
452 /* Sanity check QP size before proceeding */
453 if (verify_sizes(attr, context))
456 qp = malloc(sizeof *qp);
460 mlx4_calc_sq_wqe_size(&attr->cap, attr->qp_type, qp);
463 * We need to leave 2 KB + 1 WQE of headroom in the SQ to
464 * allow HW to prefetch.
466 qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + 1;
467 qp->sq.wqe_cnt = align_queue_size(attr->cap.max_send_wr + qp->sq_spare_wqes);
468 qp->rq.wqe_cnt = align_queue_size(attr->cap.max_recv_wr);
470 if (attr->srq || attr->qp_type == IBV_QPT_XRC)
471 attr->cap.max_recv_wr = qp->rq.wqe_cnt = 0;
473 if (attr->cap.max_recv_sge < 1)
474 attr->cap.max_recv_sge = 1;
475 if (attr->cap.max_recv_wr < 1)
476 attr->cap.max_recv_wr = 1;
479 if (mlx4_alloc_qp_buf(pd, &attr->cap, attr->qp_type, qp))
482 mlx4_init_qp_indices(qp);
484 if (pthread_spin_init(&qp->sq.lock, PTHREAD_PROCESS_PRIVATE) ||
485 pthread_spin_init(&qp->rq.lock, PTHREAD_PROCESS_PRIVATE))
488 if (!attr->srq && attr->qp_type != IBV_QPT_XRC) {
489 qp->db = mlx4_alloc_db(to_mctx(pd->context), MLX4_DB_TYPE_RQ);
496 cmd.buf_addr = (uintptr_t) qp->buf.buf;
497 if (attr->srq || attr->qp_type == IBV_QPT_XRC)
500 cmd.db_addr = (uintptr_t) qp->db;
501 cmd.log_sq_stride = qp->sq.wqe_shift;
502 for (cmd.log_sq_bb_count = 0;
503 qp->sq.wqe_cnt > 1 << cmd.log_sq_bb_count;
504 ++cmd.log_sq_bb_count)
506 cmd.sq_no_prefetch = 0; /* OK for ABI 2: just a reserved field */
507 memset(cmd.reserved, 0, sizeof cmd.reserved);
509 pthread_mutex_lock(&to_mctx(pd->context)->qp_table_mutex);
511 ret = ibv_cmd_create_qp(pd, &qp->ibv_qp, attr, &cmd.ibv_cmd, sizeof cmd,
516 ret = mlx4_store_qp(to_mctx(pd->context), qp->ibv_qp.qp_num, qp);
519 pthread_mutex_unlock(&to_mctx(pd->context)->qp_table_mutex);
521 qp->rq.wqe_cnt = attr->cap.max_recv_wr;
522 qp->rq.max_gs = attr->cap.max_recv_sge;
524 /* adjust rq maxima to not exceed reported device maxima */
525 attr->cap.max_recv_wr = min(context->max_qp_wr, attr->cap.max_recv_wr);
526 attr->cap.max_recv_sge = min(context->max_sge, attr->cap.max_recv_sge);
528 qp->rq.max_post = attr->cap.max_recv_wr;
529 mlx4_set_sq_sizes(qp, &attr->cap, attr->qp_type);
531 qp->doorbell_qpn = htonl(qp->ibv_qp.qp_num << 8);
532 if (attr->sq_sig_all)
533 qp->sq_signal_bits = htonl(MLX4_WQE_CTRL_CQ_UPDATE);
535 qp->sq_signal_bits = 0;
540 ibv_cmd_destroy_qp(&qp->ibv_qp);
543 pthread_mutex_unlock(&to_mctx(pd->context)->qp_table_mutex);
544 if (!attr->srq && attr->qp_type != IBV_QPT_XRC)
545 mlx4_free_db(to_mctx(pd->context), MLX4_DB_TYPE_RQ, qp->db);
551 mlx4_free_buf(&qp->buf);
559 int mlx4_query_qp(struct ibv_qp *ibqp, struct ibv_qp_attr *attr,
560 enum ibv_qp_attr_mask attr_mask,
561 struct ibv_qp_init_attr *init_attr)
563 struct ibv_query_qp cmd;
564 struct mlx4_qp *qp = to_mqp(ibqp);
567 ret = ibv_cmd_query_qp(ibqp, attr, attr_mask, init_attr, &cmd, sizeof cmd);
571 init_attr->cap.max_send_wr = qp->sq.max_post;
572 init_attr->cap.max_send_sge = qp->sq.max_gs;
573 init_attr->cap.max_inline_data = qp->max_inline_data;
575 attr->cap = init_attr->cap;
580 int mlx4_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
581 enum ibv_qp_attr_mask attr_mask)
583 struct ibv_modify_qp cmd;
586 if (qp->state == IBV_QPS_RESET &&
587 attr_mask & IBV_QP_STATE &&
588 attr->qp_state == IBV_QPS_INIT) {
589 mlx4_qp_init_sq_ownership(to_mqp(qp));
592 ret = ibv_cmd_modify_qp(qp, attr, attr_mask, &cmd, sizeof cmd);
595 (attr_mask & IBV_QP_STATE) &&
596 attr->qp_state == IBV_QPS_RESET) {
597 mlx4_cq_clean(to_mcq(qp->recv_cq), qp->qp_num,
598 qp->srq ? to_msrq(qp->srq) : NULL);
599 if (qp->send_cq != qp->recv_cq)
600 mlx4_cq_clean(to_mcq(qp->send_cq), qp->qp_num, NULL);
602 mlx4_init_qp_indices(to_mqp(qp));
603 if (!qp->srq && qp->qp_type != IBV_QPT_XRC)
610 static void mlx4_lock_cqs(struct ibv_qp *qp)
612 struct mlx4_cq *send_cq = to_mcq(qp->send_cq);
613 struct mlx4_cq *recv_cq = to_mcq(qp->recv_cq);
615 if (send_cq == recv_cq)
616 pthread_spin_lock(&send_cq->lock);
617 else if (send_cq->cqn < recv_cq->cqn) {
618 pthread_spin_lock(&send_cq->lock);
619 pthread_spin_lock(&recv_cq->lock);
621 pthread_spin_lock(&recv_cq->lock);
622 pthread_spin_lock(&send_cq->lock);
626 static void mlx4_unlock_cqs(struct ibv_qp *qp)
628 struct mlx4_cq *send_cq = to_mcq(qp->send_cq);
629 struct mlx4_cq *recv_cq = to_mcq(qp->recv_cq);
631 if (send_cq == recv_cq)
632 pthread_spin_unlock(&send_cq->lock);
633 else if (send_cq->cqn < recv_cq->cqn) {
634 pthread_spin_unlock(&recv_cq->lock);
635 pthread_spin_unlock(&send_cq->lock);
637 pthread_spin_unlock(&send_cq->lock);
638 pthread_spin_unlock(&recv_cq->lock);
642 int mlx4_destroy_qp(struct ibv_qp *ibqp)
644 struct mlx4_qp *qp = to_mqp(ibqp);
647 pthread_mutex_lock(&to_mctx(ibqp->context)->qp_table_mutex);
648 ret = ibv_cmd_destroy_qp(ibqp);
650 pthread_mutex_unlock(&to_mctx(ibqp->context)->qp_table_mutex);
656 __mlx4_cq_clean(to_mcq(ibqp->recv_cq), ibqp->qp_num,
657 ibqp->srq ? to_msrq(ibqp->srq) : NULL);
658 if (ibqp->send_cq != ibqp->recv_cq)
659 __mlx4_cq_clean(to_mcq(ibqp->send_cq), ibqp->qp_num, NULL);
661 mlx4_clear_qp(to_mctx(ibqp->context), ibqp->qp_num);
663 mlx4_unlock_cqs(ibqp);
664 pthread_mutex_unlock(&to_mctx(ibqp->context)->qp_table_mutex);
666 if (!ibqp->srq && ibqp->qp_type != IBV_QPT_XRC)
667 mlx4_free_db(to_mctx(ibqp->context), MLX4_DB_TYPE_RQ, qp->db);
671 mlx4_free_buf(&qp->buf);
677 struct ibv_ah *mlx4_create_ah(struct ibv_pd *pd, struct ibv_ah_attr *attr)
680 struct ibv_port_attr port_attr;
683 ah = malloc(sizeof *ah);
687 memset(ah, 0, sizeof *ah);
689 ah->av.port_pd = htonl(to_mpd(pd)->pdn | (attr->port_num << 24));
690 ah->av.g_slid = attr->src_path_bits;
691 ah->av.dlid = htons(attr->dlid);
692 if (attr->static_rate) {
693 ah->av.stat_rate = attr->static_rate + MLX4_STAT_RATE_OFFSET;
694 /* XXX check rate cap? */
696 ah->av.sl_tclass_flowlabel = htonl(attr->sl << 28);
697 if (attr->is_global) {
698 ah->av.g_slid |= 0x80;
699 ah->av.gid_index = attr->grh.sgid_index;
700 ah->av.hop_limit = attr->grh.hop_limit;
701 ah->av.sl_tclass_flowlabel |=
702 htonl((attr->grh.traffic_class << 20) |
703 attr->grh.flow_label);
704 memcpy(ah->av.dgid, attr->grh.dgid.raw, 16);
707 if (ibv_query_port(pd->context, attr->port_num, &port_attr))
710 if (port_attr.link_layer == IBV_LINK_LAYER_ETHERNET) {
711 if (ibv_resolve_eth_gid(pd, attr->port_num,
712 (union ibv_gid *)ah->av.dgid,
713 attr->grh.sgid_index,
715 &ah->tagged, &is_mcast))
719 ah->av.dlid = htons(0xc000);
720 ah->av.port_pd |= htonl(1 << 31);
723 ah->av.port_pd |= htonl(1 << 29);
724 ah->vlan |= (attr->sl & 7) << 13;
735 int mlx4_destroy_ah(struct ibv_ah *ah)
742 #ifdef HAVE_IBV_XRC_OPS
743 struct ibv_srq *mlx4_create_xrc_srq(struct ibv_pd *pd,
744 struct ibv_xrc_domain *xrc_domain,
745 struct ibv_cq *xrc_cq,
746 struct ibv_srq_init_attr *attr)
748 struct mlx4_create_xrc_srq cmd;
749 struct mlx4_create_srq_resp resp;
750 struct mlx4_srq *srq;
753 /* Sanity check SRQ size before proceeding */
754 if (attr->attr.max_wr > 1 << 16 || attr->attr.max_sge > 64)
757 srq = malloc(sizeof *srq);
761 if (pthread_spin_init(&srq->lock, PTHREAD_PROCESS_PRIVATE))
764 srq->max = align_queue_size(attr->attr.max_wr + 1);
765 srq->max_gs = attr->attr.max_sge;
768 if (mlx4_alloc_srq_buf(pd, &attr->attr, srq))
771 srq->db = mlx4_alloc_db(to_mctx(pd->context), MLX4_DB_TYPE_RQ);
777 cmd.buf_addr = (uintptr_t) srq->buf.buf;
778 cmd.db_addr = (uintptr_t) srq->db;
780 ret = ibv_cmd_create_xrc_srq(pd, &srq->ibv_srq, attr,
783 &cmd.ibv_cmd, sizeof cmd,
784 &resp.ibv_resp, sizeof resp);
788 srq->ibv_srq.xrc_srq_num = srq->srqn = resp.srqn;
790 ret = mlx4_store_xrc_srq(to_mctx(pd->context), srq->ibv_srq.xrc_srq_num, srq);
794 return &srq->ibv_srq;
797 ibv_cmd_destroy_srq(&srq->ibv_srq);
800 mlx4_free_db(to_mctx(pd->context), MLX4_DB_TYPE_RQ, srq->db);
804 mlx4_free_buf(&srq->buf);
812 struct ibv_xrc_domain *mlx4_open_xrc_domain(struct ibv_context *context,
816 struct mlx4_open_xrc_domain_resp resp;
817 struct mlx4_xrc_domain *xrcd;
819 xrcd = malloc(sizeof *xrcd);
823 ret = ibv_cmd_open_xrc_domain(context, fd, oflag, &xrcd->ibv_xrcd,
824 &resp.ibv_resp, sizeof resp);
830 xrcd->xrcdn = resp.xrcdn;
831 return &xrcd->ibv_xrcd;
834 int mlx4_close_xrc_domain(struct ibv_xrc_domain *d)
837 ret = ibv_cmd_close_xrc_domain(d);
843 int mlx4_create_xrc_rcv_qp(struct ibv_qp_init_attr *init_attr,
844 uint32_t *xrc_qp_num)
847 return ibv_cmd_create_xrc_rcv_qp(init_attr, xrc_qp_num);
850 int mlx4_modify_xrc_rcv_qp(struct ibv_xrc_domain *xrc_domain,
852 struct ibv_qp_attr *attr,
855 return ibv_cmd_modify_xrc_rcv_qp(xrc_domain, xrc_qp_num,
859 int mlx4_query_xrc_rcv_qp(struct ibv_xrc_domain *xrc_domain,
861 struct ibv_qp_attr *attr,
863 struct ibv_qp_init_attr *init_attr)
867 ret = ibv_cmd_query_xrc_rcv_qp(xrc_domain, xrc_qp_num,
868 attr, attr_mask, init_attr);
872 init_attr->cap.max_send_wr = init_attr->cap.max_send_sge = 1;
873 init_attr->cap.max_recv_sge = init_attr->cap.max_recv_wr = 0;
874 init_attr->cap.max_inline_data = 0;
875 init_attr->recv_cq = init_attr->send_cq = NULL;
876 init_attr->srq = NULL;
877 init_attr->xrc_domain = xrc_domain;
878 init_attr->qp_type = IBV_QPT_XRC;
879 init_attr->qp_context = NULL;
880 attr->cap = init_attr->cap;
885 int mlx4_reg_xrc_rcv_qp(struct ibv_xrc_domain *xrc_domain,
888 return ibv_cmd_reg_xrc_rcv_qp(xrc_domain, xrc_qp_num);
891 int mlx4_unreg_xrc_rcv_qp(struct ibv_xrc_domain *xrc_domain,
894 return ibv_cmd_unreg_xrc_rcv_qp(xrc_domain, xrc_qp_num);