2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
4 * Copyright (c) 2006 Cisco Systems, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
37 #endif /* HAVE_CONFIG_H */
48 static int ibv_cmd_get_context_v2(struct ibv_context *context,
49 struct ibv_get_context *new_cmd,
51 struct ibv_get_context_resp *resp,
54 struct ibv_abi_compat_v2 *t;
55 struct ibv_get_context_v2 *cmd;
59 t = malloc(sizeof *t);
62 pthread_mutex_init(&t->in_use, NULL);
64 cmd_size = sizeof *cmd + new_cmd_size - sizeof *new_cmd;
65 cmd = alloca(cmd_size);
66 memcpy(cmd->driver_data, new_cmd->driver_data, new_cmd_size - sizeof *new_cmd);
68 IBV_INIT_CMD_RESP(cmd, cmd_size, GET_CONTEXT, resp, resp_size);
69 cmd->cq_fd_tab = (uintptr_t) &cq_fd;
71 if (write(context->cmd_fd, cmd, cmd_size) != cmd_size)
74 VALGRIND_MAKE_MEM_DEFINED(resp, resp_size);
76 context->async_fd = resp->async_fd;
77 context->num_comp_vectors = 1;
78 t->channel.context = context;
79 t->channel.fd = cq_fd;
80 t->channel.refcnt = 0;
81 context->abi_compat = t;
86 int ibv_cmd_get_context(struct ibv_context *context, struct ibv_get_context *cmd,
87 size_t cmd_size, struct ibv_get_context_resp *resp,
91 return ibv_cmd_get_context_v2(context, cmd, cmd_size, resp, resp_size);
93 IBV_INIT_CMD_RESP(cmd, cmd_size, GET_CONTEXT, resp, resp_size);
95 if (write(context->cmd_fd, cmd, cmd_size) != cmd_size)
98 VALGRIND_MAKE_MEM_DEFINED(resp, resp_size);
100 context->async_fd = resp->async_fd;
101 context->num_comp_vectors = resp->num_comp_vectors;
106 int ibv_cmd_query_device(struct ibv_context *context,
107 struct ibv_device_attr *device_attr,
108 uint64_t *raw_fw_ver,
109 struct ibv_query_device *cmd, size_t cmd_size)
111 struct ibv_query_device_resp resp;
113 IBV_INIT_CMD_RESP(cmd, cmd_size, QUERY_DEVICE, &resp, sizeof resp);
115 if (write(context->cmd_fd, cmd, cmd_size) != cmd_size)
118 VALGRIND_MAKE_MEM_DEFINED(&resp, sizeof resp);
120 memset(device_attr->fw_ver, 0, sizeof device_attr->fw_ver);
121 *raw_fw_ver = resp.fw_ver;
122 device_attr->node_guid = resp.node_guid;
123 device_attr->sys_image_guid = resp.sys_image_guid;
124 device_attr->max_mr_size = resp.max_mr_size;
125 device_attr->page_size_cap = resp.page_size_cap;
126 device_attr->vendor_id = resp.vendor_id;
127 device_attr->vendor_part_id = resp.vendor_part_id;
128 device_attr->hw_ver = resp.hw_ver;
129 device_attr->max_qp = resp.max_qp;
130 device_attr->max_qp_wr = resp.max_qp_wr;
131 device_attr->device_cap_flags = resp.device_cap_flags;
132 device_attr->max_sge = resp.max_sge;
133 device_attr->max_sge_rd = resp.max_sge_rd;
134 device_attr->max_cq = resp.max_cq;
135 device_attr->max_cqe = resp.max_cqe;
136 device_attr->max_mr = resp.max_mr;
137 device_attr->max_pd = resp.max_pd;
138 device_attr->max_qp_rd_atom = resp.max_qp_rd_atom;
139 device_attr->max_ee_rd_atom = resp.max_ee_rd_atom;
140 device_attr->max_res_rd_atom = resp.max_res_rd_atom;
141 device_attr->max_qp_init_rd_atom = resp.max_qp_init_rd_atom;
142 device_attr->max_ee_init_rd_atom = resp.max_ee_init_rd_atom;
143 device_attr->atomic_cap = resp.atomic_cap;
144 device_attr->max_ee = resp.max_ee;
145 device_attr->max_rdd = resp.max_rdd;
146 device_attr->max_mw = resp.max_mw;
147 device_attr->max_raw_ipv6_qp = resp.max_raw_ipv6_qp;
148 device_attr->max_raw_ethy_qp = resp.max_raw_ethy_qp;
149 device_attr->max_mcast_grp = resp.max_mcast_grp;
150 device_attr->max_mcast_qp_attach = resp.max_mcast_qp_attach;
151 device_attr->max_total_mcast_qp_attach = resp.max_total_mcast_qp_attach;
152 device_attr->max_ah = resp.max_ah;
153 device_attr->max_fmr = resp.max_fmr;
154 device_attr->max_map_per_fmr = resp.max_map_per_fmr;
155 device_attr->max_srq = resp.max_srq;
156 device_attr->max_srq_wr = resp.max_srq_wr;
157 device_attr->max_srq_sge = resp.max_srq_sge;
158 device_attr->max_pkeys = resp.max_pkeys;
159 device_attr->local_ca_ack_delay = resp.local_ca_ack_delay;
160 device_attr->phys_port_cnt = resp.phys_port_cnt;
165 int ibv_cmd_query_port(struct ibv_context *context, uint8_t port_num,
166 struct ibv_port_attr *port_attr,
167 struct ibv_query_port *cmd, size_t cmd_size)
169 struct ibv_query_port_resp resp;
171 IBV_INIT_CMD_RESP(cmd, cmd_size, QUERY_PORT, &resp, sizeof resp);
172 cmd->port_num = port_num;
173 memset(cmd->reserved, 0, sizeof cmd->reserved);
175 if (write(context->cmd_fd, cmd, cmd_size) != cmd_size)
178 VALGRIND_MAKE_MEM_DEFINED(&resp, sizeof resp);
180 port_attr->state = resp.state;
181 port_attr->max_mtu = resp.max_mtu;
182 port_attr->active_mtu = resp.active_mtu;
183 port_attr->gid_tbl_len = resp.gid_tbl_len;
184 port_attr->port_cap_flags = resp.port_cap_flags;
185 port_attr->max_msg_sz = resp.max_msg_sz;
186 port_attr->bad_pkey_cntr = resp.bad_pkey_cntr;
187 port_attr->qkey_viol_cntr = resp.qkey_viol_cntr;
188 port_attr->pkey_tbl_len = resp.pkey_tbl_len;
189 port_attr->lid = resp.lid;
190 port_attr->sm_lid = resp.sm_lid;
191 port_attr->lmc = resp.lmc;
192 port_attr->max_vl_num = resp.max_vl_num;
193 port_attr->sm_sl = resp.sm_sl;
194 port_attr->subnet_timeout = resp.subnet_timeout;
195 port_attr->init_type_reply = resp.init_type_reply;
196 port_attr->active_width = resp.active_width;
197 port_attr->active_speed = resp.active_speed;
198 port_attr->phys_state = resp.phys_state;
199 port_attr->link_layer = resp.link_layer;
204 int ibv_cmd_alloc_pd(struct ibv_context *context, struct ibv_pd *pd,
205 struct ibv_alloc_pd *cmd, size_t cmd_size,
206 struct ibv_alloc_pd_resp *resp, size_t resp_size)
208 IBV_INIT_CMD_RESP(cmd, cmd_size, ALLOC_PD, resp, resp_size);
210 if (write(context->cmd_fd, cmd, cmd_size) != cmd_size)
213 VALGRIND_MAKE_MEM_DEFINED(resp, resp_size);
215 pd->handle = resp->pd_handle;
216 pd->context = context;
221 int ibv_cmd_dealloc_pd(struct ibv_pd *pd)
223 struct ibv_dealloc_pd cmd;
225 IBV_INIT_CMD(&cmd, sizeof cmd, DEALLOC_PD);
226 cmd.pd_handle = pd->handle;
228 if (write(pd->context->cmd_fd, &cmd, sizeof cmd) != sizeof cmd)
234 int ibv_cmd_reg_mr(struct ibv_pd *pd, void *addr, size_t length,
235 uint64_t hca_va, int access,
236 struct ibv_mr *mr, struct ibv_reg_mr *cmd,
238 struct ibv_reg_mr_resp *resp, size_t resp_size)
241 IBV_INIT_CMD_RESP(cmd, cmd_size, REG_MR, resp, resp_size);
243 cmd->start = (uintptr_t) addr;
244 cmd->length = length;
245 cmd->hca_va = hca_va;
246 cmd->pd_handle = pd->handle;
247 cmd->access_flags = access;
249 if (write(pd->context->cmd_fd, cmd, cmd_size) != cmd_size)
252 VALGRIND_MAKE_MEM_DEFINED(resp, resp_size);
254 mr->handle = resp->mr_handle;
255 mr->lkey = resp->lkey;
256 mr->rkey = resp->rkey;
257 mr->context = pd->context;
262 int ibv_cmd_dereg_mr(struct ibv_mr *mr)
264 struct ibv_dereg_mr cmd;
266 IBV_INIT_CMD(&cmd, sizeof cmd, DEREG_MR);
267 cmd.mr_handle = mr->handle;
269 if (write(mr->context->cmd_fd, &cmd, sizeof cmd) != sizeof cmd)
275 static int ibv_cmd_create_cq_v2(struct ibv_context *context, int cqe,
277 struct ibv_create_cq *new_cmd, size_t new_cmd_size,
278 struct ibv_create_cq_resp *resp, size_t resp_size)
280 struct ibv_create_cq_v2 *cmd;
283 cmd_size = sizeof *cmd + new_cmd_size - sizeof *new_cmd;
284 cmd = alloca(cmd_size);
285 memcpy(cmd->driver_data, new_cmd->driver_data, new_cmd_size - sizeof *new_cmd);
287 IBV_INIT_CMD_RESP(cmd, cmd_size, CREATE_CQ, resp, resp_size);
288 cmd->user_handle = (uintptr_t) cq;
290 cmd->event_handler = 0;
292 if (write(context->cmd_fd, cmd, cmd_size) != cmd_size)
295 VALGRIND_MAKE_MEM_DEFINED(resp, resp_size);
297 cq->handle = resp->cq_handle;
299 cq->context = context;
304 int ibv_cmd_create_cq(struct ibv_context *context, int cqe,
305 struct ibv_comp_channel *channel,
306 int comp_vector, struct ibv_cq *cq,
307 struct ibv_create_cq *cmd, size_t cmd_size,
308 struct ibv_create_cq_resp *resp, size_t resp_size)
311 return ibv_cmd_create_cq_v2(context, cqe, cq,
312 cmd, cmd_size, resp, resp_size);
314 IBV_INIT_CMD_RESP(cmd, cmd_size, CREATE_CQ, resp, resp_size);
315 cmd->user_handle = (uintptr_t) cq;
317 cmd->comp_vector = comp_vector;
318 cmd->comp_channel = channel ? channel->fd : -1;
321 if (write(context->cmd_fd, cmd, cmd_size) != cmd_size)
324 VALGRIND_MAKE_MEM_DEFINED(resp, resp_size);
326 cq->handle = resp->cq_handle;
328 cq->context = context;
333 int ibv_cmd_poll_cq(struct ibv_cq *ibcq, int ne, struct ibv_wc *wc)
335 struct ibv_poll_cq cmd;
336 struct ibv_poll_cq_resp *resp;
341 rsize = sizeof *resp + ne * sizeof(struct ibv_kern_wc);
342 resp = malloc(rsize);
346 IBV_INIT_CMD_RESP(&cmd, sizeof cmd, POLL_CQ, resp, rsize);
347 cmd.cq_handle = ibcq->handle;
350 if (write(ibcq->context->cmd_fd, &cmd, sizeof cmd) != sizeof cmd) {
355 VALGRIND_MAKE_MEM_DEFINED(resp, rsize);
357 for (i = 0; i < resp->count; i++) {
358 wc[i].wr_id = resp->wc[i].wr_id;
359 wc[i].status = resp->wc[i].status;
360 wc[i].opcode = resp->wc[i].opcode;
361 wc[i].vendor_err = resp->wc[i].vendor_err;
362 wc[i].byte_len = resp->wc[i].byte_len;
363 wc[i].imm_data = resp->wc[i].imm_data;
364 wc[i].qp_num = resp->wc[i].qp_num;
365 wc[i].src_qp = resp->wc[i].src_qp;
366 wc[i].wc_flags = resp->wc[i].wc_flags;
367 wc[i].pkey_index = resp->wc[i].pkey_index;
368 wc[i].slid = resp->wc[i].slid;
369 wc[i].sl = resp->wc[i].sl;
370 wc[i].dlid_path_bits = resp->wc[i].dlid_path_bits;
380 int ibv_cmd_req_notify_cq(struct ibv_cq *ibcq, int solicited_only)
382 struct ibv_req_notify_cq cmd;
384 IBV_INIT_CMD(&cmd, sizeof cmd, REQ_NOTIFY_CQ);
385 cmd.cq_handle = ibcq->handle;
386 cmd.solicited = !!solicited_only;
388 if (write(ibcq->context->cmd_fd, &cmd, sizeof cmd) != sizeof cmd)
394 int ibv_cmd_resize_cq(struct ibv_cq *cq, int cqe,
395 struct ibv_resize_cq *cmd, size_t cmd_size,
396 struct ibv_resize_cq_resp *resp, size_t resp_size)
399 IBV_INIT_CMD_RESP(cmd, cmd_size, RESIZE_CQ, resp, resp_size);
400 cmd->cq_handle = cq->handle;
403 if (write(cq->context->cmd_fd, cmd, cmd_size) != cmd_size)
406 VALGRIND_MAKE_MEM_DEFINED(resp, resp_size);
413 static int ibv_cmd_destroy_cq_v1(struct ibv_cq *cq)
415 struct ibv_destroy_cq_v1 cmd;
417 IBV_INIT_CMD(&cmd, sizeof cmd, DESTROY_CQ);
418 cmd.cq_handle = cq->handle;
420 if (write(cq->context->cmd_fd, &cmd, sizeof cmd) != sizeof cmd)
426 int ibv_cmd_destroy_cq(struct ibv_cq *cq)
428 struct ibv_destroy_cq cmd;
429 struct ibv_destroy_cq_resp resp;
432 return ibv_cmd_destroy_cq_v1(cq);
434 IBV_INIT_CMD_RESP(&cmd, sizeof cmd, DESTROY_CQ, &resp, sizeof resp);
435 cmd.cq_handle = cq->handle;
438 if (write(cq->context->cmd_fd, &cmd, sizeof cmd) != sizeof cmd)
441 VALGRIND_MAKE_MEM_DEFINED(&resp, sizeof resp);
443 pthread_mutex_lock(&cq->mutex);
444 while (cq->comp_events_completed != resp.comp_events_reported ||
445 cq->async_events_completed != resp.async_events_reported)
446 pthread_cond_wait(&cq->cond, &cq->mutex);
447 pthread_mutex_unlock(&cq->mutex);
452 int ibv_cmd_create_srq(struct ibv_pd *pd,
453 struct ibv_srq *srq, struct ibv_srq_init_attr *attr,
454 struct ibv_create_srq *cmd, size_t cmd_size,
455 struct ibv_create_srq_resp *resp, size_t resp_size)
457 IBV_INIT_CMD_RESP(cmd, cmd_size, CREATE_SRQ, resp, resp_size);
458 cmd->user_handle = (uintptr_t) srq;
459 cmd->pd_handle = pd->handle;
460 cmd->max_wr = attr->attr.max_wr;
461 cmd->max_sge = attr->attr.max_sge;
462 cmd->srq_limit = attr->attr.srq_limit;
464 if (write(pd->context->cmd_fd, cmd, cmd_size) != cmd_size)
467 VALGRIND_MAKE_MEM_DEFINED(resp, resp_size);
469 srq->handle = resp->srq_handle;
470 srq->context = pd->context;
473 attr->attr.max_wr = resp->max_wr;
474 attr->attr.max_sge = resp->max_sge;
476 struct ibv_create_srq_resp_v5 *resp_v5 =
477 (struct ibv_create_srq_resp_v5 *) resp;
479 memmove((void *) resp + sizeof *resp,
480 (void *) resp_v5 + sizeof *resp_v5,
481 resp_size - sizeof *resp);
487 int ibv_cmd_create_xrc_srq(struct ibv_pd *pd,
488 struct ibv_srq *srq, struct ibv_srq_init_attr *attr,
489 uint32_t xrcd_handle, uint32_t xrc_cq,
490 struct ibv_create_xrc_srq *cmd, size_t cmd_size,
491 struct ibv_create_srq_resp *resp, size_t resp_size)
493 IBV_INIT_CMD_RESP(cmd, cmd_size, CREATE_XRC_SRQ, resp, resp_size);
494 cmd->user_handle = (uintptr_t) srq;
495 cmd->pd_handle = pd->handle;
496 cmd->max_wr = attr->attr.max_wr;
497 cmd->max_sge = attr->attr.max_sge;
498 cmd->srq_limit = attr->attr.srq_limit;
499 cmd->xrcd_handle = xrcd_handle;
500 cmd->xrc_cq = xrc_cq;
502 if (write(pd->context->cmd_fd, cmd, cmd_size) != cmd_size)
505 VALGRIND_MAKE_MEM_DEFINED(resp, resp_size);
507 srq->handle = resp->srq_handle;
508 srq->context = pd->context;
509 attr->attr.max_wr = resp->max_wr;
510 attr->attr.max_sge = resp->max_sge;
515 static int ibv_cmd_modify_srq_v3(struct ibv_srq *srq,
516 struct ibv_srq_attr *srq_attr,
518 struct ibv_modify_srq *new_cmd,
521 struct ibv_modify_srq_v3 *cmd;
524 cmd_size = sizeof *cmd + new_cmd_size - sizeof *new_cmd;
525 cmd = alloca(cmd_size);
526 memcpy(cmd->driver_data, new_cmd->driver_data, new_cmd_size - sizeof *new_cmd);
528 IBV_INIT_CMD(cmd, cmd_size, MODIFY_SRQ);
530 cmd->srq_handle = srq->handle;
531 cmd->attr_mask = srq_attr_mask;
532 cmd->max_wr = srq_attr->max_wr;
533 cmd->srq_limit = srq_attr->srq_limit;
537 if (write(srq->context->cmd_fd, cmd, cmd_size) != cmd_size)
543 int ibv_cmd_modify_srq(struct ibv_srq *srq,
544 struct ibv_srq_attr *srq_attr,
546 struct ibv_modify_srq *cmd, size_t cmd_size)
549 return ibv_cmd_modify_srq_v3(srq, srq_attr, srq_attr_mask,
552 IBV_INIT_CMD(cmd, cmd_size, MODIFY_SRQ);
554 cmd->srq_handle = srq->handle;
555 cmd->attr_mask = srq_attr_mask;
556 cmd->max_wr = srq_attr->max_wr;
557 cmd->srq_limit = srq_attr->srq_limit;
559 if (write(srq->context->cmd_fd, cmd, cmd_size) != cmd_size)
565 int ibv_cmd_query_srq(struct ibv_srq *srq, struct ibv_srq_attr *srq_attr,
566 struct ibv_query_srq *cmd, size_t cmd_size)
568 struct ibv_query_srq_resp resp;
570 IBV_INIT_CMD_RESP(cmd, cmd_size, QUERY_SRQ, &resp, sizeof resp);
571 cmd->srq_handle = srq->handle;
574 if (write(srq->context->cmd_fd, cmd, cmd_size) != cmd_size)
577 VALGRIND_MAKE_MEM_DEFINED(&resp, sizeof resp);
579 srq_attr->max_wr = resp.max_wr;
580 srq_attr->max_sge = resp.max_sge;
581 srq_attr->srq_limit = resp.srq_limit;
586 static int ibv_cmd_destroy_srq_v1(struct ibv_srq *srq)
588 struct ibv_destroy_srq_v1 cmd;
590 IBV_INIT_CMD(&cmd, sizeof cmd, DESTROY_SRQ);
591 cmd.srq_handle = srq->handle;
593 if (write(srq->context->cmd_fd, &cmd, sizeof cmd) != sizeof cmd)
599 int ibv_cmd_destroy_srq(struct ibv_srq *srq)
601 struct ibv_destroy_srq cmd;
602 struct ibv_destroy_srq_resp resp;
605 return ibv_cmd_destroy_srq_v1(srq);
607 IBV_INIT_CMD_RESP(&cmd, sizeof cmd, DESTROY_SRQ, &resp, sizeof resp);
608 cmd.srq_handle = srq->handle;
611 if (write(srq->context->cmd_fd, &cmd, sizeof cmd) != sizeof cmd)
614 VALGRIND_MAKE_MEM_DEFINED(&resp, sizeof resp);
616 pthread_mutex_lock(&srq->mutex);
617 while (srq->events_completed != resp.events_reported)
618 pthread_cond_wait(&srq->cond, &srq->mutex);
619 pthread_mutex_unlock(&srq->mutex);
624 int ibv_cmd_create_qp(struct ibv_pd *pd,
625 struct ibv_qp *qp, struct ibv_qp_init_attr *attr,
626 struct ibv_create_qp *cmd, size_t cmd_size,
627 struct ibv_create_qp_resp *resp, size_t resp_size)
629 IBV_INIT_CMD_RESP(cmd, cmd_size, CREATE_QP, resp, resp_size);
631 cmd->user_handle = (uintptr_t) qp;
632 cmd->pd_handle = pd->handle;
633 cmd->send_cq_handle = attr->send_cq->handle;
634 cmd->recv_cq_handle = attr->recv_cq->handle;
635 cmd->max_send_wr = attr->cap.max_send_wr;
636 cmd->max_recv_wr = attr->cap.max_recv_wr;
637 cmd->max_send_sge = attr->cap.max_send_sge;
638 cmd->max_recv_sge = attr->cap.max_recv_sge;
639 cmd->max_inline_data = attr->cap.max_inline_data;
640 cmd->sq_sig_all = attr->sq_sig_all;
641 cmd->qp_type = attr->qp_type;
642 cmd->is_srq = !!attr->srq;
643 cmd->srq_handle = attr->qp_type == IBV_QPT_XRC ?
644 (attr->xrc_domain ? attr->xrc_domain->handle : 0) :
645 (attr->srq ? attr->srq->handle : 0);
648 if (write(pd->context->cmd_fd, cmd, cmd_size) != cmd_size)
651 VALGRIND_MAKE_MEM_DEFINED(resp, resp_size);
653 qp->handle = resp->qp_handle;
654 qp->qp_num = resp->qpn;
655 qp->context = pd->context;
658 attr->cap.max_recv_sge = resp->max_recv_sge;
659 attr->cap.max_send_sge = resp->max_send_sge;
660 attr->cap.max_recv_wr = resp->max_recv_wr;
661 attr->cap.max_send_wr = resp->max_send_wr;
662 attr->cap.max_inline_data = resp->max_inline_data;
666 struct ibv_create_qp_resp_v4 *resp_v4 =
667 (struct ibv_create_qp_resp_v4 *) resp;
669 memmove((void *) resp + sizeof *resp,
670 (void *) resp_v4 + sizeof *resp_v4,
671 resp_size - sizeof *resp);
672 } else if (abi_ver <= 3) {
673 struct ibv_create_qp_resp_v3 *resp_v3 =
674 (struct ibv_create_qp_resp_v3 *) resp;
676 memmove((void *) resp + sizeof *resp,
677 (void *) resp_v3 + sizeof *resp_v3,
678 resp_size - sizeof *resp);
684 int ibv_cmd_query_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
686 struct ibv_qp_init_attr *init_attr,
687 struct ibv_query_qp *cmd, size_t cmd_size)
689 struct ibv_query_qp_resp resp;
691 IBV_INIT_CMD_RESP(cmd, cmd_size, QUERY_QP, &resp, sizeof resp);
692 cmd->qp_handle = qp->handle;
693 cmd->attr_mask = attr_mask;
695 if (write(qp->context->cmd_fd, cmd, cmd_size) != cmd_size)
698 VALGRIND_MAKE_MEM_DEFINED(&resp, sizeof resp);
700 attr->qkey = resp.qkey;
701 attr->rq_psn = resp.rq_psn;
702 attr->sq_psn = resp.sq_psn;
703 attr->dest_qp_num = resp.dest_qp_num;
704 attr->qp_access_flags = resp.qp_access_flags;
705 attr->pkey_index = resp.pkey_index;
706 attr->alt_pkey_index = resp.alt_pkey_index;
707 attr->qp_state = resp.qp_state;
708 attr->cur_qp_state = resp.cur_qp_state;
709 attr->path_mtu = resp.path_mtu;
710 attr->path_mig_state = resp.path_mig_state;
711 attr->sq_draining = resp.sq_draining;
712 attr->max_rd_atomic = resp.max_rd_atomic;
713 attr->max_dest_rd_atomic = resp.max_dest_rd_atomic;
714 attr->min_rnr_timer = resp.min_rnr_timer;
715 attr->port_num = resp.port_num;
716 attr->timeout = resp.timeout;
717 attr->retry_cnt = resp.retry_cnt;
718 attr->rnr_retry = resp.rnr_retry;
719 attr->alt_port_num = resp.alt_port_num;
720 attr->alt_timeout = resp.alt_timeout;
721 attr->cap.max_send_wr = resp.max_send_wr;
722 attr->cap.max_recv_wr = resp.max_recv_wr;
723 attr->cap.max_send_sge = resp.max_send_sge;
724 attr->cap.max_recv_sge = resp.max_recv_sge;
725 attr->cap.max_inline_data = resp.max_inline_data;
727 memcpy(attr->ah_attr.grh.dgid.raw, resp.dest.dgid, 16);
728 attr->ah_attr.grh.flow_label = resp.dest.flow_label;
729 attr->ah_attr.dlid = resp.dest.dlid;
730 attr->ah_attr.grh.sgid_index = resp.dest.sgid_index;
731 attr->ah_attr.grh.hop_limit = resp.dest.hop_limit;
732 attr->ah_attr.grh.traffic_class = resp.dest.traffic_class;
733 attr->ah_attr.sl = resp.dest.sl;
734 attr->ah_attr.src_path_bits = resp.dest.src_path_bits;
735 attr->ah_attr.static_rate = resp.dest.static_rate;
736 attr->ah_attr.is_global = resp.dest.is_global;
737 attr->ah_attr.port_num = resp.dest.port_num;
739 memcpy(attr->alt_ah_attr.grh.dgid.raw, resp.alt_dest.dgid, 16);
740 attr->alt_ah_attr.grh.flow_label = resp.alt_dest.flow_label;
741 attr->alt_ah_attr.dlid = resp.alt_dest.dlid;
742 attr->alt_ah_attr.grh.sgid_index = resp.alt_dest.sgid_index;
743 attr->alt_ah_attr.grh.hop_limit = resp.alt_dest.hop_limit;
744 attr->alt_ah_attr.grh.traffic_class = resp.alt_dest.traffic_class;
745 attr->alt_ah_attr.sl = resp.alt_dest.sl;
746 attr->alt_ah_attr.src_path_bits = resp.alt_dest.src_path_bits;
747 attr->alt_ah_attr.static_rate = resp.alt_dest.static_rate;
748 attr->alt_ah_attr.is_global = resp.alt_dest.is_global;
749 attr->alt_ah_attr.port_num = resp.alt_dest.port_num;
751 init_attr->qp_context = qp->qp_context;
752 init_attr->send_cq = qp->send_cq;
753 init_attr->recv_cq = qp->recv_cq;
754 init_attr->srq = qp->srq;
755 init_attr->qp_type = qp->qp_type;
756 if (qp->qp_type == IBV_QPT_XRC)
757 init_attr->xrc_domain = qp->xrc_domain;
758 init_attr->cap.max_send_wr = resp.max_send_wr;
759 init_attr->cap.max_recv_wr = resp.max_recv_wr;
760 init_attr->cap.max_send_sge = resp.max_send_sge;
761 init_attr->cap.max_recv_sge = resp.max_recv_sge;
762 init_attr->cap.max_inline_data = resp.max_inline_data;
763 init_attr->sq_sig_all = resp.sq_sig_all;
768 int ibv_cmd_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr,
770 struct ibv_modify_qp *cmd, size_t cmd_size)
772 IBV_INIT_CMD(cmd, cmd_size, MODIFY_QP);
774 cmd->qp_handle = qp->handle;
775 cmd->attr_mask = attr_mask;
776 cmd->qkey = attr->qkey;
777 cmd->rq_psn = attr->rq_psn;
778 cmd->sq_psn = attr->sq_psn;
779 cmd->dest_qp_num = attr->dest_qp_num;
780 cmd->qp_access_flags = attr->qp_access_flags;
781 cmd->pkey_index = attr->pkey_index;
782 cmd->alt_pkey_index = attr->alt_pkey_index;
783 cmd->qp_state = attr->qp_state;
784 cmd->cur_qp_state = attr->cur_qp_state;
785 cmd->path_mtu = attr->path_mtu;
786 cmd->path_mig_state = attr->path_mig_state;
787 cmd->en_sqd_async_notify = attr->en_sqd_async_notify;
788 cmd->max_rd_atomic = attr->max_rd_atomic;
789 cmd->max_dest_rd_atomic = attr->max_dest_rd_atomic;
790 cmd->min_rnr_timer = attr->min_rnr_timer;
791 cmd->port_num = attr->port_num;
792 cmd->timeout = attr->timeout;
793 cmd->retry_cnt = attr->retry_cnt;
794 cmd->rnr_retry = attr->rnr_retry;
795 cmd->alt_port_num = attr->alt_port_num;
796 cmd->alt_timeout = attr->alt_timeout;
798 memcpy(cmd->dest.dgid, attr->ah_attr.grh.dgid.raw, 16);
799 cmd->dest.flow_label = attr->ah_attr.grh.flow_label;
800 cmd->dest.dlid = attr->ah_attr.dlid;
801 cmd->dest.reserved = 0;
802 cmd->dest.sgid_index = attr->ah_attr.grh.sgid_index;
803 cmd->dest.hop_limit = attr->ah_attr.grh.hop_limit;
804 cmd->dest.traffic_class = attr->ah_attr.grh.traffic_class;
805 cmd->dest.sl = attr->ah_attr.sl;
806 cmd->dest.src_path_bits = attr->ah_attr.src_path_bits;
807 cmd->dest.static_rate = attr->ah_attr.static_rate;
808 cmd->dest.is_global = attr->ah_attr.is_global;
809 cmd->dest.port_num = attr->ah_attr.port_num;
811 memcpy(cmd->alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16);
812 cmd->alt_dest.flow_label = attr->alt_ah_attr.grh.flow_label;
813 cmd->alt_dest.dlid = attr->alt_ah_attr.dlid;
814 cmd->alt_dest.reserved = 0;
815 cmd->alt_dest.sgid_index = attr->alt_ah_attr.grh.sgid_index;
816 cmd->alt_dest.hop_limit = attr->alt_ah_attr.grh.hop_limit;
817 cmd->alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class;
818 cmd->alt_dest.sl = attr->alt_ah_attr.sl;
819 cmd->alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits;
820 cmd->alt_dest.static_rate = attr->alt_ah_attr.static_rate;
821 cmd->alt_dest.is_global = attr->alt_ah_attr.is_global;
822 cmd->alt_dest.port_num = attr->alt_ah_attr.port_num;
824 cmd->reserved[0] = cmd->reserved[1] = 0;
826 if (write(qp->context->cmd_fd, cmd, cmd_size) != cmd_size)
832 int ibv_cmd_create_xrc_rcv_qp(struct ibv_qp_init_attr *init_attr,
833 uint32_t *xrc_rcv_qpn)
835 struct ibv_create_xrc_rcv_qp cmd;
836 struct ibv_create_xrc_rcv_qp_resp resp;
841 IBV_INIT_CMD_RESP(&cmd, sizeof cmd, CREATE_XRC_RCV_QP, &resp,
844 cmd.xrc_domain_handle = init_attr->xrc_domain->handle;
845 cmd.max_send_wr = init_attr->cap.max_send_wr;
846 cmd.max_recv_wr = init_attr->cap.max_recv_wr;
847 cmd.max_send_sge = init_attr->cap.max_send_sge;
848 cmd.max_recv_sge = init_attr->cap.max_recv_sge;
849 cmd.max_inline_data = init_attr->cap.max_inline_data;
850 cmd.sq_sig_all = init_attr->sq_sig_all;
851 cmd.qp_type = init_attr->qp_type;
852 cmd.reserved[0] = cmd.reserved[1] = 0;
854 if (write(init_attr->xrc_domain->context->cmd_fd, &cmd, sizeof cmd) !=
858 *xrc_rcv_qpn = resp.qpn;
863 int ibv_cmd_modify_xrc_rcv_qp(struct ibv_xrc_domain *d, uint32_t xrc_qp_num,
864 struct ibv_qp_attr *attr, int attr_mask)
866 struct ibv_modify_xrc_rcv_qp cmd;
871 IBV_INIT_CMD(&cmd, sizeof cmd, MODIFY_XRC_RCV_QP);
873 cmd.xrc_domain_handle = d->handle;
874 cmd.qp_num = xrc_qp_num;
875 cmd.attr_mask = attr_mask;
876 cmd.qkey = attr->qkey;
877 cmd.rq_psn = attr->rq_psn;
878 cmd.sq_psn = attr->sq_psn;
879 cmd.dest_qp_num = attr->dest_qp_num;
880 cmd.qp_access_flags = attr->qp_access_flags;
881 cmd.pkey_index = attr->pkey_index;
882 cmd.alt_pkey_index = attr->alt_pkey_index;
883 cmd.qp_state = attr->qp_state;
884 cmd.cur_qp_state = attr->cur_qp_state;
885 cmd.path_mtu = attr->path_mtu;
886 cmd.path_mig_state = attr->path_mig_state;
887 cmd.en_sqd_async_notify = attr->en_sqd_async_notify;
888 cmd.max_rd_atomic = attr->max_rd_atomic;
889 cmd.max_dest_rd_atomic = attr->max_dest_rd_atomic;
890 cmd.min_rnr_timer = attr->min_rnr_timer;
891 cmd.port_num = attr->port_num;
892 cmd.timeout = attr->timeout;
893 cmd.retry_cnt = attr->retry_cnt;
894 cmd.rnr_retry = attr->rnr_retry;
895 cmd.alt_port_num = attr->alt_port_num;
896 cmd.alt_timeout = attr->alt_timeout;
898 memcpy(cmd.dest.dgid, attr->ah_attr.grh.dgid.raw, 16);
899 cmd.dest.flow_label = attr->ah_attr.grh.flow_label;
900 cmd.dest.dlid = attr->ah_attr.dlid;
901 cmd.dest.reserved = 0;
902 cmd.dest.sgid_index = attr->ah_attr.grh.sgid_index;
903 cmd.dest.hop_limit = attr->ah_attr.grh.hop_limit;
904 cmd.dest.traffic_class = attr->ah_attr.grh.traffic_class;
905 cmd.dest.sl = attr->ah_attr.sl;
906 cmd.dest.src_path_bits = attr->ah_attr.src_path_bits;
907 cmd.dest.static_rate = attr->ah_attr.static_rate;
908 cmd.dest.is_global = attr->ah_attr.is_global;
909 cmd.dest.port_num = attr->ah_attr.port_num;
911 memcpy(cmd.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16);
912 cmd.alt_dest.flow_label = attr->alt_ah_attr.grh.flow_label;
913 cmd.alt_dest.dlid = attr->alt_ah_attr.dlid;
914 cmd.alt_dest.reserved = 0;
915 cmd.alt_dest.sgid_index = attr->alt_ah_attr.grh.sgid_index;
916 cmd.alt_dest.hop_limit = attr->alt_ah_attr.grh.hop_limit;
917 cmd.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class;
918 cmd.alt_dest.sl = attr->alt_ah_attr.sl;
919 cmd.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits;
920 cmd.alt_dest.static_rate = attr->alt_ah_attr.static_rate;
921 cmd.alt_dest.is_global = attr->alt_ah_attr.is_global;
922 cmd.alt_dest.port_num = attr->alt_ah_attr.port_num;
924 cmd.reserved[0] = cmd.reserved[1] = 0;
926 if (write(d->context->cmd_fd, &cmd, sizeof cmd) != sizeof cmd)
932 int ibv_cmd_query_xrc_rcv_qp(struct ibv_xrc_domain *d, uint32_t xrc_qp_num,
933 struct ibv_qp_attr *attr, int attr_mask,
934 struct ibv_qp_init_attr *init_attr)
936 struct ibv_query_xrc_rcv_qp cmd;
937 struct ibv_query_qp_resp resp;
942 IBV_INIT_CMD_RESP(&cmd, sizeof cmd, QUERY_XRC_RCV_QP, &resp,
944 cmd.xrc_domain_handle = d->handle;
945 cmd.qp_num = xrc_qp_num;
946 cmd.attr_mask = attr_mask;
948 if (write(d->context->cmd_fd, &cmd, sizeof cmd) != sizeof cmd)
951 VALGRIND_MAKE_MEM_DEFINED(&resp, sizeof resp);
953 attr->qkey = resp.qkey;
954 attr->rq_psn = resp.rq_psn;
955 attr->sq_psn = resp.sq_psn;
956 attr->dest_qp_num = resp.dest_qp_num;
957 attr->qp_access_flags = resp.qp_access_flags;
958 attr->pkey_index = resp.pkey_index;
959 attr->alt_pkey_index = resp.alt_pkey_index;
960 attr->qp_state = resp.qp_state;
961 attr->cur_qp_state = resp.cur_qp_state;
962 attr->path_mtu = resp.path_mtu;
963 attr->path_mig_state = resp.path_mig_state;
964 attr->sq_draining = resp.sq_draining;
965 attr->max_rd_atomic = resp.max_rd_atomic;
966 attr->max_dest_rd_atomic = resp.max_dest_rd_atomic;
967 attr->min_rnr_timer = resp.min_rnr_timer;
968 attr->port_num = resp.port_num;
969 attr->timeout = resp.timeout;
970 attr->retry_cnt = resp.retry_cnt;
971 attr->rnr_retry = resp.rnr_retry;
972 attr->alt_port_num = resp.alt_port_num;
973 attr->alt_timeout = resp.alt_timeout;
974 attr->cap.max_send_wr = resp.max_send_wr;
975 attr->cap.max_recv_wr = resp.max_recv_wr;
976 attr->cap.max_send_sge = resp.max_send_sge;
977 attr->cap.max_recv_sge = resp.max_recv_sge;
978 attr->cap.max_inline_data = resp.max_inline_data;
980 memcpy(attr->ah_attr.grh.dgid.raw, resp.dest.dgid, 16);
981 attr->ah_attr.grh.flow_label = resp.dest.flow_label;
982 attr->ah_attr.dlid = resp.dest.dlid;
983 attr->ah_attr.grh.sgid_index = resp.dest.sgid_index;
984 attr->ah_attr.grh.hop_limit = resp.dest.hop_limit;
985 attr->ah_attr.grh.traffic_class = resp.dest.traffic_class;
986 attr->ah_attr.sl = resp.dest.sl;
987 attr->ah_attr.src_path_bits = resp.dest.src_path_bits;
988 attr->ah_attr.static_rate = resp.dest.static_rate;
989 attr->ah_attr.is_global = resp.dest.is_global;
990 attr->ah_attr.port_num = resp.dest.port_num;
992 memcpy(attr->alt_ah_attr.grh.dgid.raw, resp.alt_dest.dgid, 16);
993 attr->alt_ah_attr.grh.flow_label = resp.alt_dest.flow_label;
994 attr->alt_ah_attr.dlid = resp.alt_dest.dlid;
995 attr->alt_ah_attr.grh.sgid_index = resp.alt_dest.sgid_index;
996 attr->alt_ah_attr.grh.hop_limit = resp.alt_dest.hop_limit;
997 attr->alt_ah_attr.grh.traffic_class = resp.alt_dest.traffic_class;
998 attr->alt_ah_attr.sl = resp.alt_dest.sl;
999 attr->alt_ah_attr.src_path_bits = resp.alt_dest.src_path_bits;
1000 attr->alt_ah_attr.static_rate = resp.alt_dest.static_rate;
1001 attr->alt_ah_attr.is_global = resp.alt_dest.is_global;
1002 attr->alt_ah_attr.port_num = resp.alt_dest.port_num;
1004 init_attr->cap.max_send_wr = resp.max_send_wr;
1005 init_attr->cap.max_recv_wr = resp.max_recv_wr;
1006 init_attr->cap.max_send_sge = resp.max_send_sge;
1007 init_attr->cap.max_recv_sge = resp.max_recv_sge;
1008 init_attr->cap.max_inline_data = resp.max_inline_data;
1009 init_attr->sq_sig_all = resp.sq_sig_all;
1014 static int ibv_cmd_destroy_qp_v1(struct ibv_qp *qp)
1016 struct ibv_destroy_qp_v1 cmd;
1018 IBV_INIT_CMD(&cmd, sizeof cmd, DESTROY_QP);
1019 cmd.qp_handle = qp->handle;
1021 if (write(qp->context->cmd_fd, &cmd, sizeof cmd) != sizeof cmd)
1027 int ibv_cmd_post_send(struct ibv_qp *ibqp, struct ibv_send_wr *wr,
1028 struct ibv_send_wr **bad_wr)
1030 struct ibv_post_send *cmd;
1031 struct ibv_post_send_resp resp;
1032 struct ibv_send_wr *i;
1033 struct ibv_kern_send_wr *n, *tmp;
1035 unsigned wr_count = 0;
1036 unsigned sge_count = 0;
1040 for (i = wr; i; i = i->next) {
1042 sge_count += i->num_sge;
1045 cmd_size = sizeof *cmd + wr_count * sizeof *n + sge_count * sizeof *s;
1046 cmd = alloca(cmd_size);
1048 IBV_INIT_CMD_RESP(cmd, cmd_size, POST_SEND, &resp, sizeof resp);
1049 cmd->qp_handle = ibqp->handle;
1050 cmd->wr_count = wr_count;
1051 cmd->sge_count = sge_count;
1052 cmd->wqe_size = sizeof *n;
1054 n = (struct ibv_kern_send_wr *) ((void *) cmd + sizeof *cmd);
1055 s = (struct ibv_sge *) (n + wr_count);
1058 for (i = wr; i; i = i->next) {
1059 tmp->wr_id = i->wr_id;
1060 tmp->num_sge = i->num_sge;
1061 tmp->opcode = i->opcode;
1062 tmp->send_flags = i->send_flags;
1063 tmp->imm_data = i->imm_data;
1064 if (ibqp->qp_type == IBV_QPT_UD) {
1065 tmp->wr.ud.ah = i->wr.ud.ah->handle;
1066 tmp->wr.ud.remote_qpn = i->wr.ud.remote_qpn;
1067 tmp->wr.ud.remote_qkey = i->wr.ud.remote_qkey;
1069 switch (i->opcode) {
1070 case IBV_WR_RDMA_WRITE:
1071 case IBV_WR_RDMA_WRITE_WITH_IMM:
1072 case IBV_WR_RDMA_READ:
1073 tmp->wr.rdma.remote_addr =
1074 i->wr.rdma.remote_addr;
1075 tmp->wr.rdma.rkey = i->wr.rdma.rkey;
1077 case IBV_WR_ATOMIC_CMP_AND_SWP:
1078 case IBV_WR_ATOMIC_FETCH_AND_ADD:
1079 tmp->wr.atomic.remote_addr =
1080 i->wr.atomic.remote_addr;
1081 tmp->wr.atomic.compare_add =
1082 i->wr.atomic.compare_add;
1083 tmp->wr.atomic.swap = i->wr.atomic.swap;
1084 tmp->wr.atomic.rkey = i->wr.atomic.rkey;
1092 memcpy(s, i->sg_list, tmp->num_sge * sizeof *s);
1100 if (write(ibqp->context->cmd_fd, cmd, cmd_size) != cmd_size)
1103 VALGRIND_MAKE_MEM_DEFINED(&resp, sizeof resp);
1105 wr_count = resp.bad_wr;
1117 int ibv_cmd_post_recv(struct ibv_qp *ibqp, struct ibv_recv_wr *wr,
1118 struct ibv_recv_wr **bad_wr)
1120 struct ibv_post_recv *cmd;
1121 struct ibv_post_recv_resp resp;
1122 struct ibv_recv_wr *i;
1123 struct ibv_kern_recv_wr *n, *tmp;
1125 unsigned wr_count = 0;
1126 unsigned sge_count = 0;
1130 for (i = wr; i; i = i->next) {
1132 sge_count += i->num_sge;
1135 cmd_size = sizeof *cmd + wr_count * sizeof *n + sge_count * sizeof *s;
1136 cmd = alloca(cmd_size);
1138 IBV_INIT_CMD_RESP(cmd, cmd_size, POST_RECV, &resp, sizeof resp);
1139 cmd->qp_handle = ibqp->handle;
1140 cmd->wr_count = wr_count;
1141 cmd->sge_count = sge_count;
1142 cmd->wqe_size = sizeof *n;
1144 n = (struct ibv_kern_recv_wr *) ((void *) cmd + sizeof *cmd);
1145 s = (struct ibv_sge *) (n + wr_count);
1148 for (i = wr; i; i = i->next) {
1149 tmp->wr_id = i->wr_id;
1150 tmp->num_sge = i->num_sge;
1153 memcpy(s, i->sg_list, tmp->num_sge * sizeof *s);
1161 if (write(ibqp->context->cmd_fd, cmd, cmd_size) != cmd_size)
1164 VALGRIND_MAKE_MEM_DEFINED(&resp, sizeof resp);
1166 wr_count = resp.bad_wr;
1178 int ibv_cmd_post_srq_recv(struct ibv_srq *srq, struct ibv_recv_wr *wr,
1179 struct ibv_recv_wr **bad_wr)
1181 struct ibv_post_srq_recv *cmd;
1182 struct ibv_post_srq_recv_resp resp;
1183 struct ibv_recv_wr *i;
1184 struct ibv_kern_recv_wr *n, *tmp;
1186 unsigned wr_count = 0;
1187 unsigned sge_count = 0;
1191 for (i = wr; i; i = i->next) {
1193 sge_count += i->num_sge;
1196 cmd_size = sizeof *cmd + wr_count * sizeof *n + sge_count * sizeof *s;
1197 cmd = alloca(cmd_size);
1199 IBV_INIT_CMD_RESP(cmd, cmd_size, POST_SRQ_RECV, &resp, sizeof resp);
1200 cmd->srq_handle = srq->handle;
1201 cmd->wr_count = wr_count;
1202 cmd->sge_count = sge_count;
1203 cmd->wqe_size = sizeof *n;
1205 n = (struct ibv_kern_recv_wr *) ((void *) cmd + sizeof *cmd);
1206 s = (struct ibv_sge *) (n + wr_count);
1209 for (i = wr; i; i = i->next) {
1210 tmp->wr_id = i->wr_id;
1211 tmp->num_sge = i->num_sge;
1214 memcpy(s, i->sg_list, tmp->num_sge * sizeof *s);
1222 if (write(srq->context->cmd_fd, cmd, cmd_size) != cmd_size)
1225 VALGRIND_MAKE_MEM_DEFINED(&resp, sizeof resp);
1227 wr_count = resp.bad_wr;
1239 int ibv_cmd_create_ah(struct ibv_pd *pd, struct ibv_ah *ah,
1240 struct ibv_ah_attr *attr)
1242 struct ibv_create_ah cmd;
1243 struct ibv_create_ah_resp resp;
1245 IBV_INIT_CMD_RESP(&cmd, sizeof cmd, CREATE_AH, &resp, sizeof resp);
1246 cmd.user_handle = (uintptr_t) ah;
1247 cmd.pd_handle = pd->handle;
1248 cmd.attr.dlid = attr->dlid;
1249 cmd.attr.sl = attr->sl;
1250 cmd.attr.src_path_bits = attr->src_path_bits;
1251 cmd.attr.static_rate = attr->static_rate;
1252 cmd.attr.is_global = attr->is_global;
1253 cmd.attr.port_num = attr->port_num;
1254 cmd.attr.grh.flow_label = attr->grh.flow_label;
1255 cmd.attr.grh.sgid_index = attr->grh.sgid_index;
1256 cmd.attr.grh.hop_limit = attr->grh.hop_limit;
1257 cmd.attr.grh.traffic_class = attr->grh.traffic_class;
1258 memcpy(cmd.attr.grh.dgid, attr->grh.dgid.raw, 16);
1260 if (write(pd->context->cmd_fd, &cmd, sizeof cmd) != sizeof cmd)
1263 VALGRIND_MAKE_MEM_DEFINED(&resp, sizeof resp);
1265 ah->handle = resp.handle;
1266 ah->context = pd->context;
1271 int ibv_cmd_destroy_ah(struct ibv_ah *ah)
1273 struct ibv_destroy_ah cmd;
1275 IBV_INIT_CMD(&cmd, sizeof cmd, DESTROY_AH);
1276 cmd.ah_handle = ah->handle;
1278 if (write(ah->context->cmd_fd, &cmd, sizeof cmd) != sizeof cmd)
1284 int ibv_cmd_destroy_qp(struct ibv_qp *qp)
1286 struct ibv_destroy_qp cmd;
1287 struct ibv_destroy_qp_resp resp;
1290 return ibv_cmd_destroy_qp_v1(qp);
1292 IBV_INIT_CMD_RESP(&cmd, sizeof cmd, DESTROY_QP, &resp, sizeof resp);
1293 cmd.qp_handle = qp->handle;
1296 if (write(qp->context->cmd_fd, &cmd, sizeof cmd) != sizeof cmd)
1299 VALGRIND_MAKE_MEM_DEFINED(&resp, sizeof resp);
1301 pthread_mutex_lock(&qp->mutex);
1302 while (qp->events_completed != resp.events_reported)
1303 pthread_cond_wait(&qp->cond, &qp->mutex);
1304 pthread_mutex_unlock(&qp->mutex);
1309 int ibv_cmd_attach_mcast(struct ibv_qp *qp, const union ibv_gid *gid, uint16_t lid)
1311 struct ibv_attach_mcast cmd;
1313 IBV_INIT_CMD(&cmd, sizeof cmd, ATTACH_MCAST);
1314 memcpy(cmd.gid, gid->raw, sizeof cmd.gid);
1315 cmd.qp_handle = qp->handle;
1319 if (write(qp->context->cmd_fd, &cmd, sizeof cmd) != sizeof cmd)
1325 int ibv_cmd_detach_mcast(struct ibv_qp *qp, const union ibv_gid *gid, uint16_t lid)
1327 struct ibv_detach_mcast cmd;
1329 IBV_INIT_CMD(&cmd, sizeof cmd, DETACH_MCAST);
1330 memcpy(cmd.gid, gid->raw, sizeof cmd.gid);
1331 cmd.qp_handle = qp->handle;
1335 if (write(qp->context->cmd_fd, &cmd, sizeof cmd) != sizeof cmd)
1341 int ibv_cmd_open_xrc_domain(struct ibv_context *context, int fd, int oflag,
1342 struct ibv_xrc_domain *d,
1343 struct ibv_open_xrc_domain_resp *resp,
1346 struct ibv_open_xrc_domain cmd;
1351 IBV_INIT_CMD_RESP(&cmd, sizeof cmd, OPEN_XRC_DOMAIN, resp, resp_size);
1355 if (write(context->cmd_fd, &cmd, sizeof cmd) != sizeof cmd)
1358 d->handle = resp->xrcd_handle;
1363 int ibv_cmd_close_xrc_domain(struct ibv_xrc_domain *d)
1365 struct ibv_close_xrc_domain cmd;
1370 IBV_INIT_CMD(&cmd, sizeof cmd, CLOSE_XRC_DOMAIN);
1371 cmd.xrcd_handle = d->handle;
1373 if (write(d->context->cmd_fd, &cmd, sizeof cmd) != sizeof cmd)
1378 int ibv_cmd_reg_xrc_rcv_qp(struct ibv_xrc_domain *d, uint32_t xrc_qp_num)
1380 struct ibv_reg_xrc_rcv_qp cmd;
1385 IBV_INIT_CMD(&cmd, sizeof cmd, REG_XRC_RCV_QP);
1386 cmd.xrc_domain_handle = d->handle;
1387 cmd.qp_num = xrc_qp_num;
1389 if (write(d->context->cmd_fd, &cmd, sizeof cmd) != sizeof cmd)
1394 int ibv_cmd_unreg_xrc_rcv_qp(struct ibv_xrc_domain *d, uint32_t xrc_qp_num)
1396 struct ibv_unreg_xrc_rcv_qp cmd;
1401 IBV_INIT_CMD(&cmd, sizeof cmd, UNREG_XRC_RCV_QP);
1402 cmd.xrc_domain_handle = d->handle;
1403 cmd.qp_num = xrc_qp_num;
1405 if (write(d->context->cmd_fd, &cmd, sizeof cmd) != sizeof cmd)