2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #endif /* HAVE_CONFIG_H */
39 #include <netinet/in.h>
40 #include <sys/types.h>
48 #include <infiniband/arch.h>
52 static pthread_mutex_t device_list_lock = PTHREAD_MUTEX_INITIALIZER;
53 static int num_devices;
54 static struct ibv_device **device_list;
56 struct ibv_device **__ibv_get_device_list(int *num)
58 struct ibv_device **l = 0;
64 pthread_mutex_lock(&device_list_lock);
67 num_devices = ibverbs_init(&device_list);
69 if (num_devices < 0) {
74 l = calloc(num_devices + 1, sizeof (struct ibv_device *));
80 for (i = 0; i < num_devices; ++i)
81 l[i] = device_list[i];
86 pthread_mutex_unlock(&device_list_lock);
89 default_symver(__ibv_get_device_list, ibv_get_device_list);
91 void __ibv_free_device_list(struct ibv_device **list)
95 default_symver(__ibv_free_device_list, ibv_free_device_list);
97 const char *__ibv_get_device_name(struct ibv_device *device)
101 default_symver(__ibv_get_device_name, ibv_get_device_name);
103 uint64_t __ibv_get_device_guid(struct ibv_device *device)
110 if (ibv_read_sysfs_file(device->ibdev_path, "node_guid",
111 attr, sizeof attr) < 0)
114 if (sscanf(attr, "%hx:%hx:%hx:%hx",
115 parts, parts + 1, parts + 2, parts + 3) != 4)
118 for (i = 0; i < 4; ++i)
119 guid = (guid << 16) | parts[i];
123 default_symver(__ibv_get_device_guid, ibv_get_device_guid);
125 struct ibv_context *__ibv_open_device(struct ibv_device *device)
129 struct ibv_context *context;
131 if (asprintf(&devpath, "/dev/%s", device->dev_name) < 0)
135 * We'll only be doing writes, but we need O_RDWR in case the
136 * provider needs to mmap() the file.
138 cmd_fd = open(devpath, O_RDWR);
144 context = device->ops.alloc_context(device, cmd_fd);
148 context->device = device;
149 context->cmd_fd = cmd_fd;
150 pthread_mutex_init(&context->mutex, NULL);
159 default_symver(__ibv_open_device, ibv_open_device);
161 int __ibv_close_device(struct ibv_context *context)
163 int async_fd = context->async_fd;
164 int cmd_fd = context->cmd_fd;
168 struct ibv_abi_compat_v2 *t = context->abi_compat;
169 cq_fd = t->channel.fd;
170 free(context->abi_compat);
173 context->device->ops.free_context(context);
182 default_symver(__ibv_close_device, ibv_close_device);
184 int __ibv_get_async_event(struct ibv_context *context,
185 struct ibv_async_event *event)
187 struct ibv_kern_async_event ev;
189 if (read(context->async_fd, &ev, sizeof ev) != sizeof ev)
192 event->event_type = ev.event_type;
194 if (event->event_type & IBV_XRC_QP_EVENT_FLAG) {
195 event->element.xrc_qp_num = ev.element;
197 switch (event->event_type) {
198 case IBV_EVENT_CQ_ERR:
199 event->element.cq = (void *) (uintptr_t) ev.element;
202 case IBV_EVENT_QP_FATAL:
203 case IBV_EVENT_QP_REQ_ERR:
204 case IBV_EVENT_QP_ACCESS_ERR:
205 case IBV_EVENT_COMM_EST:
206 case IBV_EVENT_SQ_DRAINED:
207 case IBV_EVENT_PATH_MIG:
208 case IBV_EVENT_PATH_MIG_ERR:
209 case IBV_EVENT_QP_LAST_WQE_REACHED:
210 event->element.qp = (void *) (uintptr_t) ev.element;
213 case IBV_EVENT_SRQ_ERR:
214 case IBV_EVENT_SRQ_LIMIT_REACHED:
215 event->element.srq = (void *) (uintptr_t) ev.element;
218 event->element.port_num = ev.element;
222 if (context->ops.async_event)
223 context->ops.async_event(event);
227 default_symver(__ibv_get_async_event, ibv_get_async_event);
229 void __ibv_ack_async_event(struct ibv_async_event *event)
231 switch (event->event_type) {
232 case IBV_EVENT_CQ_ERR:
234 struct ibv_cq *cq = event->element.cq;
236 pthread_mutex_lock(&cq->mutex);
237 ++cq->async_events_completed;
238 pthread_cond_signal(&cq->cond);
239 pthread_mutex_unlock(&cq->mutex);
244 case IBV_EVENT_QP_FATAL:
245 case IBV_EVENT_QP_REQ_ERR:
246 case IBV_EVENT_QP_ACCESS_ERR:
247 case IBV_EVENT_COMM_EST:
248 case IBV_EVENT_SQ_DRAINED:
249 case IBV_EVENT_PATH_MIG:
250 case IBV_EVENT_PATH_MIG_ERR:
251 case IBV_EVENT_QP_LAST_WQE_REACHED:
253 struct ibv_qp *qp = event->element.qp;
255 pthread_mutex_lock(&qp->mutex);
256 ++qp->events_completed;
257 pthread_cond_signal(&qp->cond);
258 pthread_mutex_unlock(&qp->mutex);
263 case IBV_EVENT_SRQ_ERR:
264 case IBV_EVENT_SRQ_LIMIT_REACHED:
266 struct ibv_srq *srq = event->element.srq;
268 pthread_mutex_lock(&srq->mutex);
269 ++srq->events_completed;
270 pthread_cond_signal(&srq->cond);
271 pthread_mutex_unlock(&srq->mutex);
280 default_symver(__ibv_ack_async_event, ibv_ack_async_event);