2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <infiniband/endian.h>
38 #include <sys/types.h>
48 /* Hack to avoid GCC's -Wmissing-prototypes and the similar error from sparse
49 with these prototypes. Symbol versionining requires the goofy names, the
50 prototype must match the version in verbs.h.
52 struct ibv_device **__ibv_get_device_list(int *num_devices);
53 void __ibv_free_device_list(struct ibv_device **list);
54 const char *__ibv_get_device_name(struct ibv_device *device);
55 __be64 __ibv_get_device_guid(struct ibv_device *device);
56 struct ibv_context *__ibv_open_device(struct ibv_device *device);
57 int __ibv_close_device(struct ibv_context *context);
58 int __ibv_get_async_event(struct ibv_context *context,
59 struct ibv_async_event *event);
60 void __ibv_ack_async_event(struct ibv_async_event *event);
62 static pthread_once_t device_list_once = PTHREAD_ONCE_INIT;
63 static int num_devices;
64 static struct ibv_device **device_list;
66 static void count_devices(void)
68 num_devices = ibverbs_init(&device_list);
71 struct ibv_device **__ibv_get_device_list(int *num)
73 struct ibv_device **l;
79 pthread_once(&device_list_once, count_devices);
81 if (num_devices < 0) {
86 l = calloc(num_devices + 1, sizeof (struct ibv_device *));
92 for (i = 0; i < num_devices; ++i)
93 l[i] = device_list[i];
99 default_symver(__ibv_get_device_list, ibv_get_device_list);
101 void __ibv_free_device_list(struct ibv_device **list)
105 default_symver(__ibv_free_device_list, ibv_free_device_list);
107 const char *__ibv_get_device_name(struct ibv_device *device)
111 default_symver(__ibv_get_device_name, ibv_get_device_name);
113 __be64 __ibv_get_device_guid(struct ibv_device *device)
120 if (ibv_read_sysfs_file(device->ibdev_path, "node_guid",
121 attr, sizeof attr) < 0)
124 if (sscanf(attr, "%hx:%hx:%hx:%hx",
125 parts, parts + 1, parts + 2, parts + 3) != 4)
128 for (i = 0; i < 4; ++i)
129 guid = (guid << 16) | parts[i];
131 return htobe64(guid);
133 default_symver(__ibv_get_device_guid, ibv_get_device_guid);
135 void verbs_init_cq(struct ibv_cq *cq, struct ibv_context *context,
136 struct ibv_comp_channel *channel,
139 cq->context = context;
140 cq->channel = channel;
143 pthread_mutex_lock(&context->mutex);
144 ++cq->channel->refcnt;
145 pthread_mutex_unlock(&context->mutex);
148 cq->cq_context = cq_context;
149 cq->comp_events_completed = 0;
150 cq->async_events_completed = 0;
151 pthread_mutex_init(&cq->mutex, NULL);
152 pthread_cond_init(&cq->cond, NULL);
155 static struct ibv_cq_ex *
156 __lib_ibv_create_cq_ex(struct ibv_context *context,
157 struct ibv_cq_init_attr_ex *cq_attr)
159 struct verbs_context *vctx = verbs_get_ctx(context);
160 struct ibv_cq_ex *cq;
162 if (cq_attr->wc_flags & ~IBV_CREATE_CQ_SUP_WC_FLAGS) {
167 cq = vctx->priv->create_cq_ex(context, cq_attr);
170 verbs_init_cq(ibv_cq_ex_to_cq(cq), context,
171 cq_attr->channel, cq_attr->cq_context);
176 struct ibv_context *__ibv_open_device(struct ibv_device *device)
178 struct verbs_device *verbs_device = verbs_get_device(device);
181 struct ibv_context *context;
182 struct verbs_context *context_ex;
184 if (asprintf(&devpath, "/dev/%s", device->dev_name) < 0)
188 * We'll only be doing writes, but we need O_RDWR in case the
189 * provider needs to mmap() the file.
191 cmd_fd = open(devpath, O_RDWR | O_CLOEXEC);
197 if (!verbs_device->ops->init_context) {
198 context = verbs_device->ops->alloc_context(device, cmd_fd);
202 struct verbs_ex_private *priv;
204 /* Library now allocates the context */
205 context_ex = calloc(1, sizeof(*context_ex) +
206 verbs_device->size_of_context);
212 priv = calloc(1, sizeof(*priv));
219 context_ex->priv = priv;
220 context_ex->context.abi_compat = __VERBS_ABI_IS_EXTENDED;
221 context_ex->sz = sizeof(*context_ex);
223 context = &context_ex->context;
224 ret = verbs_device->ops->init_context(verbs_device, context, cmd_fd);
228 * In order to maintain backward/forward binary compatibility
229 * with apps compiled against libibverbs-1.1.8 that use the
230 * flow steering addition, we need to set the two
231 * ABI_placeholder entries to match the driver set flow
232 * entries. This is because apps compiled against
233 * libibverbs-1.1.8 use an inline ibv_create_flow and
234 * ibv_destroy_flow function that looks in the placeholder
235 * spots for the proper entry points. For apps compiled
236 * against libibverbs-1.1.9 and later, the inline functions
237 * will be looking in the right place.
239 context_ex->ABI_placeholder1 = (void (*)(void)) context_ex->ibv_create_flow;
240 context_ex->ABI_placeholder2 = (void (*)(void)) context_ex->ibv_destroy_flow;
242 if (context_ex->create_cq_ex) {
243 priv->create_cq_ex = context_ex->create_cq_ex;
244 context_ex->create_cq_ex = __lib_ibv_create_cq_ex;
248 context->device = device;
249 context->cmd_fd = cmd_fd;
250 pthread_mutex_init(&context->mutex, NULL);
255 free(context_ex->priv);
261 default_symver(__ibv_open_device, ibv_open_device);
263 int __ibv_close_device(struct ibv_context *context)
265 int async_fd = context->async_fd;
266 int cmd_fd = context->cmd_fd;
268 struct verbs_context *context_ex;
269 struct verbs_device *verbs_device = verbs_get_device(context->device);
271 context_ex = verbs_get_ctx(context);
273 verbs_device->ops->uninit_context(verbs_device, context);
274 free(context_ex->priv);
277 verbs_device->ops->free_context(context);
287 default_symver(__ibv_close_device, ibv_close_device);
289 int __ibv_get_async_event(struct ibv_context *context,
290 struct ibv_async_event *event)
292 struct ibv_kern_async_event ev;
294 if (read(context->async_fd, &ev, sizeof ev) != sizeof ev)
297 event->event_type = ev.event_type;
299 switch (event->event_type) {
300 case IBV_EVENT_CQ_ERR:
301 event->element.cq = (void *) (uintptr_t) ev.element;
304 case IBV_EVENT_QP_FATAL:
305 case IBV_EVENT_QP_REQ_ERR:
306 case IBV_EVENT_QP_ACCESS_ERR:
307 case IBV_EVENT_COMM_EST:
308 case IBV_EVENT_SQ_DRAINED:
309 case IBV_EVENT_PATH_MIG:
310 case IBV_EVENT_PATH_MIG_ERR:
311 case IBV_EVENT_QP_LAST_WQE_REACHED:
312 event->element.qp = (void *) (uintptr_t) ev.element;
315 case IBV_EVENT_SRQ_ERR:
316 case IBV_EVENT_SRQ_LIMIT_REACHED:
317 event->element.srq = (void *) (uintptr_t) ev.element;
320 case IBV_EVENT_WQ_FATAL:
321 event->element.wq = (void *) (uintptr_t) ev.element;
324 event->element.port_num = ev.element;
328 if (context->ops.async_event)
329 context->ops.async_event(event);
333 default_symver(__ibv_get_async_event, ibv_get_async_event);
335 void __ibv_ack_async_event(struct ibv_async_event *event)
337 switch (event->event_type) {
338 case IBV_EVENT_CQ_ERR:
340 struct ibv_cq *cq = event->element.cq;
342 pthread_mutex_lock(&cq->mutex);
343 ++cq->async_events_completed;
344 pthread_cond_signal(&cq->cond);
345 pthread_mutex_unlock(&cq->mutex);
350 case IBV_EVENT_QP_FATAL:
351 case IBV_EVENT_QP_REQ_ERR:
352 case IBV_EVENT_QP_ACCESS_ERR:
353 case IBV_EVENT_COMM_EST:
354 case IBV_EVENT_SQ_DRAINED:
355 case IBV_EVENT_PATH_MIG:
356 case IBV_EVENT_PATH_MIG_ERR:
357 case IBV_EVENT_QP_LAST_WQE_REACHED:
359 struct ibv_qp *qp = event->element.qp;
361 pthread_mutex_lock(&qp->mutex);
362 ++qp->events_completed;
363 pthread_cond_signal(&qp->cond);
364 pthread_mutex_unlock(&qp->mutex);
369 case IBV_EVENT_SRQ_ERR:
370 case IBV_EVENT_SRQ_LIMIT_REACHED:
372 struct ibv_srq *srq = event->element.srq;
374 pthread_mutex_lock(&srq->mutex);
375 ++srq->events_completed;
376 pthread_cond_signal(&srq->cond);
377 pthread_mutex_unlock(&srq->mutex);
382 case IBV_EVENT_WQ_FATAL:
384 struct ibv_wq *wq = event->element.wq;
386 pthread_mutex_lock(&wq->mutex);
387 ++wq->events_completed;
388 pthread_cond_signal(&wq->cond);
389 pthread_mutex_unlock(&wq->mutex);
398 default_symver(__ibv_ack_async_event, ibv_ack_async_event);