2 * Copyright (c) 2010-2014 Intel Corporation. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #if !defined(RDMA_VERBS_H)
37 #include <infiniband/verbs.h>
38 #include <rdma/rdma_cma.h>
45 static inline int rdma_seterrno(int ret)
55 * Shared receive queues.
57 int rdma_create_srq(struct rdma_cm_id *id, struct ibv_pd *pd,
58 struct ibv_srq_init_attr *attr);
59 int rdma_create_srq_ex(struct rdma_cm_id *id, struct ibv_srq_init_attr_ex *attr);
61 void rdma_destroy_srq(struct rdma_cm_id *id);
65 * Memory registration helpers.
67 static inline struct ibv_mr *
68 rdma_reg_msgs(struct rdma_cm_id *id, void *addr, size_t length)
70 return ibv_reg_mr(id->pd, addr, length, IBV_ACCESS_LOCAL_WRITE);
73 static inline struct ibv_mr *
74 rdma_reg_read(struct rdma_cm_id *id, void *addr, size_t length)
76 return ibv_reg_mr(id->pd, addr, length, IBV_ACCESS_LOCAL_WRITE |
77 IBV_ACCESS_REMOTE_READ);
80 static inline struct ibv_mr *
81 rdma_reg_write(struct rdma_cm_id *id, void *addr, size_t length)
83 return ibv_reg_mr(id->pd, addr, length, IBV_ACCESS_LOCAL_WRITE |
84 IBV_ACCESS_REMOTE_WRITE);
88 rdma_dereg_mr(struct ibv_mr *mr)
90 return rdma_seterrno(ibv_dereg_mr(mr));
95 * Vectored send, receive, and RDMA operations.
96 * Support multiple scatter-gather entries.
99 rdma_post_recvv(struct rdma_cm_id *id, void *context, struct ibv_sge *sgl,
102 struct ibv_recv_wr wr, *bad;
104 wr.wr_id = (uintptr_t) context;
110 return rdma_seterrno(ibv_post_srq_recv(id->srq, &wr, &bad));
112 return rdma_seterrno(ibv_post_recv(id->qp, &wr, &bad));
116 rdma_post_sendv(struct rdma_cm_id *id, void *context, struct ibv_sge *sgl,
119 struct ibv_send_wr wr, *bad;
121 wr.wr_id = (uintptr_t) context;
125 wr.opcode = IBV_WR_SEND;
126 wr.send_flags = flags;
128 return rdma_seterrno(ibv_post_send(id->qp, &wr, &bad));
132 rdma_post_readv(struct rdma_cm_id *id, void *context, struct ibv_sge *sgl,
133 int nsge, int flags, uint64_t remote_addr, uint32_t rkey)
135 struct ibv_send_wr wr, *bad;
137 wr.wr_id = (uintptr_t) context;
141 wr.opcode = IBV_WR_RDMA_READ;
142 wr.send_flags = flags;
143 wr.wr.rdma.remote_addr = remote_addr;
144 wr.wr.rdma.rkey = rkey;
146 return rdma_seterrno(ibv_post_send(id->qp, &wr, &bad));
150 rdma_post_writev(struct rdma_cm_id *id, void *context, struct ibv_sge *sgl,
151 int nsge, int flags, uint64_t remote_addr, uint32_t rkey)
153 struct ibv_send_wr wr, *bad;
155 wr.wr_id = (uintptr_t) context;
159 wr.opcode = IBV_WR_RDMA_WRITE;
160 wr.send_flags = flags;
161 wr.wr.rdma.remote_addr = remote_addr;
162 wr.wr.rdma.rkey = rkey;
164 return rdma_seterrno(ibv_post_send(id->qp, &wr, &bad));
168 * Simple send, receive, and RDMA calls.
171 rdma_post_recv(struct rdma_cm_id *id, void *context, void *addr,
172 size_t length, struct ibv_mr *mr)
176 assert((addr >= mr->addr) &&
177 (((uint8_t *) addr + length) <= ((uint8_t *) mr->addr + mr->length)));
178 sge.addr = (uint64_t) (uintptr_t) addr;
179 sge.length = (uint32_t) length;
182 return rdma_post_recvv(id, context, &sge, 1);
186 rdma_post_send(struct rdma_cm_id *id, void *context, void *addr,
187 size_t length, struct ibv_mr *mr, int flags)
191 sge.addr = (uint64_t) (uintptr_t) addr;
192 sge.length = (uint32_t) length;
193 sge.lkey = mr ? mr->lkey : 0;
195 return rdma_post_sendv(id, context, &sge, 1, flags);
199 rdma_post_read(struct rdma_cm_id *id, void *context, void *addr,
200 size_t length, struct ibv_mr *mr, int flags,
201 uint64_t remote_addr, uint32_t rkey)
205 sge.addr = (uint64_t) (uintptr_t) addr;
206 sge.length = (uint32_t) length;
209 return rdma_post_readv(id, context, &sge, 1, flags, remote_addr, rkey);
213 rdma_post_write(struct rdma_cm_id *id, void *context, void *addr,
214 size_t length, struct ibv_mr *mr, int flags,
215 uint64_t remote_addr, uint32_t rkey)
219 sge.addr = (uint64_t) (uintptr_t) addr;
220 sge.length = (uint32_t) length;
221 sge.lkey = mr ? mr->lkey : 0;
223 return rdma_post_writev(id, context, &sge, 1, flags, remote_addr, rkey);
227 rdma_post_ud_send(struct rdma_cm_id *id, void *context, void *addr,
228 size_t length, struct ibv_mr *mr, int flags,
229 struct ibv_ah *ah, uint32_t remote_qpn)
231 struct ibv_send_wr wr, *bad;
234 sge.addr = (uint64_t) (uintptr_t) addr;
235 sge.length = (uint32_t) length;
236 sge.lkey = mr ? mr->lkey : 0;
238 wr.wr_id = (uintptr_t) context;
242 wr.opcode = IBV_WR_SEND;
243 wr.send_flags = flags;
245 wr.wr.ud.remote_qpn = remote_qpn;
246 wr.wr.ud.remote_qkey = RDMA_UDP_QKEY;
248 return rdma_seterrno(ibv_post_send(id->qp, &wr, &bad));
252 rdma_get_send_comp(struct rdma_cm_id *id, struct ibv_wc *wc)
259 ret = ibv_poll_cq(id->send_cq, 1, wc);
263 ret = ibv_req_notify_cq(id->send_cq, 0);
265 return rdma_seterrno(ret);
267 ret = ibv_poll_cq(id->send_cq, 1, wc);
271 ret = ibv_get_cq_event(id->send_cq_channel, &cq, &context);
275 assert(cq == id->send_cq && context == id);
276 ibv_ack_cq_events(id->send_cq, 1);
279 return (ret < 0) ? rdma_seterrno(ret) : ret;
283 rdma_get_recv_comp(struct rdma_cm_id *id, struct ibv_wc *wc)
290 ret = ibv_poll_cq(id->recv_cq, 1, wc);
294 ret = ibv_req_notify_cq(id->recv_cq, 0);
296 return rdma_seterrno(ret);
298 ret = ibv_poll_cq(id->recv_cq, 1, wc);
302 ret = ibv_get_cq_event(id->recv_cq_channel, &cq, &context);
306 assert(cq == id->recv_cq && context == id);
307 ibv_ack_cq_events(id->recv_cq, 1);
310 return (ret < 0) ? rdma_seterrno(ret) : ret;
317 #endif /* RDMA_CMA_H */