]> CyberLeo.Net >> Repos - FreeBSD/releng/10.0.git/blob - contrib/ofed/libibverbs/src/device.c
- Copy stable/10 (r259064) to releng/10.0 as part of the
[FreeBSD/releng/10.0.git] / contrib / ofed / libibverbs / src / device.c
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2006, 2007 Cisco Systems, Inc.  All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 #if HAVE_CONFIG_H
35 #  include <config.h>
36 #endif /* HAVE_CONFIG_H */
37
38 #include <stdio.h>
39 #include <netinet/in.h>
40 #include <sys/types.h>
41 #include <sys/stat.h>
42 #include <fcntl.h>
43 #include <unistd.h>
44 #include <stdlib.h>
45 #include <alloca.h>
46 #include <errno.h>
47
48 #include <infiniband/arch.h>
49
50 #include "ibverbs.h"
51
52 static pthread_mutex_t device_list_lock = PTHREAD_MUTEX_INITIALIZER;
53 static int num_devices;
54 static struct ibv_device **device_list;
55
56 struct ibv_device **__ibv_get_device_list(int *num)
57 {
58         struct ibv_device **l = 0;
59         int i;
60
61         if (num)
62                 *num = 0;
63
64         pthread_mutex_lock(&device_list_lock);
65
66         if (!num_devices)
67                 num_devices = ibverbs_init(&device_list);
68
69         if (num_devices < 0) {
70                 errno = -num_devices;
71                 goto out;
72         }
73
74         l = calloc(num_devices + 1, sizeof (struct ibv_device *));
75         if (!l) {
76                 errno = ENOMEM;
77                 goto out;
78         }
79
80         for (i = 0; i < num_devices; ++i)
81                 l[i] = device_list[i];
82         if (num)
83                 *num = num_devices;
84
85 out:
86         pthread_mutex_unlock(&device_list_lock);
87         return l;
88 }
89 default_symver(__ibv_get_device_list, ibv_get_device_list);
90
91 void __ibv_free_device_list(struct ibv_device **list)
92 {
93         free(list);
94 }
95 default_symver(__ibv_free_device_list, ibv_free_device_list);
96
97 const char *__ibv_get_device_name(struct ibv_device *device)
98 {
99         return device->name;
100 }
101 default_symver(__ibv_get_device_name, ibv_get_device_name);
102
103 uint64_t __ibv_get_device_guid(struct ibv_device *device)
104 {
105         char attr[24];
106         uint64_t guid = 0;
107         uint16_t parts[4];
108         int i;
109
110         if (ibv_read_sysfs_file(device->ibdev_path, "node_guid",
111                                 attr, sizeof attr) < 0)
112                 return 0;
113
114         if (sscanf(attr, "%hx:%hx:%hx:%hx",
115                    parts, parts + 1, parts + 2, parts + 3) != 4)
116                 return 0;
117
118         for (i = 0; i < 4; ++i)
119                 guid = (guid << 16) | parts[i];
120
121         return htonll(guid);
122 }
123 default_symver(__ibv_get_device_guid, ibv_get_device_guid);
124
125 struct ibv_context *__ibv_open_device(struct ibv_device *device)
126 {
127         char *devpath;
128         int cmd_fd;
129         struct ibv_context *context;
130
131         if (asprintf(&devpath, "/dev/%s", device->dev_name) < 0)
132                 return NULL;
133
134         /*
135          * We'll only be doing writes, but we need O_RDWR in case the
136          * provider needs to mmap() the file.
137          */
138         cmd_fd = open(devpath, O_RDWR);
139         free(devpath);
140
141         if (cmd_fd < 0)
142                 return NULL;
143
144         context = device->ops.alloc_context(device, cmd_fd);
145         if (!context)
146                 goto err;
147
148         context->device = device;
149         context->cmd_fd = cmd_fd;
150         pthread_mutex_init(&context->mutex, NULL);
151
152         return context;
153
154 err:
155         close(cmd_fd);
156
157         return NULL;
158 }
159 default_symver(__ibv_open_device, ibv_open_device);
160
161 int __ibv_close_device(struct ibv_context *context)
162 {
163         int async_fd = context->async_fd;
164         int cmd_fd   = context->cmd_fd;
165         int cq_fd    = -1;
166
167         if (abi_ver <= 2) {
168                 struct ibv_abi_compat_v2 *t = context->abi_compat;
169                 cq_fd = t->channel.fd;
170                 free(context->abi_compat);
171         }
172
173         context->device->ops.free_context(context);
174
175         close(async_fd);
176         close(cmd_fd);
177         if (abi_ver <= 2)
178                 close(cq_fd);
179
180         return 0;
181 }
182 default_symver(__ibv_close_device, ibv_close_device);
183
184 int __ibv_get_async_event(struct ibv_context *context,
185                           struct ibv_async_event *event)
186 {
187         struct ibv_kern_async_event ev;
188
189         if (read(context->async_fd, &ev, sizeof ev) != sizeof ev)
190                 return -1;
191
192         event->event_type = ev.event_type;
193
194         if (event->event_type & IBV_XRC_QP_EVENT_FLAG) {
195                 event->element.xrc_qp_num = ev.element;
196         } else
197                 switch (event->event_type) {
198                 case IBV_EVENT_CQ_ERR:
199                         event->element.cq = (void *) (uintptr_t) ev.element;
200                         break;
201
202                 case IBV_EVENT_QP_FATAL:
203                 case IBV_EVENT_QP_REQ_ERR:
204                 case IBV_EVENT_QP_ACCESS_ERR:
205                 case IBV_EVENT_COMM_EST:
206                 case IBV_EVENT_SQ_DRAINED:
207                 case IBV_EVENT_PATH_MIG:
208                 case IBV_EVENT_PATH_MIG_ERR:
209                 case IBV_EVENT_QP_LAST_WQE_REACHED:
210                         event->element.qp = (void *) (uintptr_t) ev.element;
211                         break;
212
213                 case IBV_EVENT_SRQ_ERR:
214                 case IBV_EVENT_SRQ_LIMIT_REACHED:
215                         event->element.srq = (void *) (uintptr_t) ev.element;
216                         break;
217                 default:
218                         event->element.port_num = ev.element;
219                         break;
220                 }
221
222         if (context->ops.async_event)
223                 context->ops.async_event(event);
224
225         return 0;
226 }
227 default_symver(__ibv_get_async_event, ibv_get_async_event);
228
229 void __ibv_ack_async_event(struct ibv_async_event *event)
230 {
231         switch (event->event_type) {
232         case IBV_EVENT_CQ_ERR:
233         {
234                 struct ibv_cq *cq = event->element.cq;
235
236                 pthread_mutex_lock(&cq->mutex);
237                 ++cq->async_events_completed;
238                 pthread_cond_signal(&cq->cond);
239                 pthread_mutex_unlock(&cq->mutex);
240
241                 return;
242         }
243
244         case IBV_EVENT_QP_FATAL:
245         case IBV_EVENT_QP_REQ_ERR:
246         case IBV_EVENT_QP_ACCESS_ERR:
247         case IBV_EVENT_COMM_EST:
248         case IBV_EVENT_SQ_DRAINED:
249         case IBV_EVENT_PATH_MIG:
250         case IBV_EVENT_PATH_MIG_ERR:
251         case IBV_EVENT_QP_LAST_WQE_REACHED:
252         {
253                 struct ibv_qp *qp = event->element.qp;
254
255                 pthread_mutex_lock(&qp->mutex);
256                 ++qp->events_completed;
257                 pthread_cond_signal(&qp->cond);
258                 pthread_mutex_unlock(&qp->mutex);
259
260                 return;
261         }
262
263         case IBV_EVENT_SRQ_ERR:
264         case IBV_EVENT_SRQ_LIMIT_REACHED:
265         {
266                 struct ibv_srq *srq = event->element.srq;
267
268                 pthread_mutex_lock(&srq->mutex);
269                 ++srq->events_completed;
270                 pthread_cond_signal(&srq->cond);
271                 pthread_mutex_unlock(&srq->mutex);
272
273                 return;
274         }
275
276         default:
277                 return;
278         }
279 }
280 default_symver(__ibv_ack_async_event, ibv_ack_async_event);