2 * Copyright (c) 2004-2009 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2002-2005 Mellanox Technologies LTD. All rights reserved.
4 * Copyright (c) 1996-2003 Intel Corporation. All rights reserved.
5 * Copyright (c) 2009 HNR Consulting. All rights reserved.
6 * Copyright (c) 2009 Sun Microsystems, Inc. All rights reserved.
8 * This software is available to you under a choice of one of two
9 * licenses. You may choose to be licensed under the terms of the GNU
10 * General Public License (GPL) Version 2, available from the file
11 * COPYING in the main directory of this source tree, or the
12 * OpenIB.org BSD license below:
14 * Redistribution and use in source and binary forms, with or
15 * without modification, are permitted provided that the following
18 * - Redistributions of source code must retain the above
19 * copyright notice, this list of conditions and the following
22 * - Redistributions in binary form must reproduce the above
23 * copyright notice, this list of conditions and the following
24 * disclaimer in the documentation and/or other materials
25 * provided with the distribution.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
28 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
29 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
30 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
31 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
32 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
33 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
40 * Implementation of osm_vendor_t (for umad).
41 * This object represents the OpenIB vendor layer.
42 * This object is part of the opensm family of objects.
51 #endif /* HAVE_CONFIG_H */
53 #ifdef OSM_VENDOR_INTF_OPENIB
60 #include <iba/ib_types.h>
61 #include <complib/cl_qlist.h>
62 #include <complib/cl_math.h>
63 #include <complib/cl_debug.h>
64 #include <opensm/osm_file_ids.h>
65 #define FILE_ID OSM_FILE_VENDOR_IBUMAD_C
66 #include <opensm/osm_madw.h>
67 #include <opensm/osm_log.h>
68 #include <opensm/osm_mad_pool.h>
69 #include <opensm/osm_helper.h>
70 #include <vendor/osm_vendor_api.h>
72 /****s* OpenSM: Vendor UMAD/osm_umad_bind_info_t
74 * osm_umad_bind_info_t
77 * Structure containing bind information.
81 typedef struct _osm_umad_bind_info {
84 osm_mad_pool_t *p_mad_pool;
85 osm_vend_mad_recv_callback_t mad_recv_callback;
86 osm_vend_mad_send_err_callback_t send_err_callback;
90 int agent_id1; /* SMI requires two agents */
93 } osm_umad_bind_info_t;
95 typedef struct _umad_receiver {
101 static void osm_vendor_close_port(osm_vendor_t * const p_vend);
103 static void log_send_error(osm_vendor_t * const p_vend, osm_madw_t *p_madw)
105 if (p_madw->p_mad->mgmt_class != IB_MCLASS_SUBN_DIR) {
107 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 5410: "
108 "Send completed with error (%s) -- dropping\n"
109 "\t\t\tClass 0x%x, Method 0x%X, Attr 0x%X, "
110 "TID 0x%" PRIx64 ", LID %u\n",
111 ib_get_err_str(p_madw->status),
112 p_madw->p_mad->mgmt_class, p_madw->p_mad->method,
113 cl_ntoh16(p_madw->p_mad->attr_id),
114 cl_ntoh64(p_madw->p_mad->trans_id),
115 cl_ntoh16(p_madw->mad_addr.dest_lid));
119 /* Direct routed SMP */
120 p_smp = osm_madw_get_smp_ptr(p_madw);
121 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 5411: "
122 "DR SMP Send completed with error (%s) -- dropping\n"
123 "\t\t\tMethod 0x%X, Attr 0x%X, TID 0x%" PRIx64 "\n",
124 ib_get_err_str(p_madw->status),
125 p_madw->p_mad->method,
126 cl_ntoh16(p_madw->p_mad->attr_id),
127 cl_ntoh64(p_madw->p_mad->trans_id));
128 osm_dump_smp_dr_path(p_vend->p_log, p_smp, OSM_LOG_ERROR);
132 static void clear_madw(osm_vendor_t * p_vend)
134 umad_match_t *m, *e, *old_m;
136 uint8_t old_mgmt_class;
138 OSM_LOG_ENTER(p_vend->p_log);
139 pthread_mutex_lock(&p_vend->match_tbl_mutex);
140 for (m = p_vend->mtbl.tbl, e = m + p_vend->mtbl.max; m < e; m++) {
144 old_mgmt_class = m->mgmt_class;
146 osm_mad_pool_put(((osm_umad_bind_info_t
147 *) ((osm_madw_t *) m->v)->h_bind)->
149 pthread_mutex_unlock(&p_vend->match_tbl_mutex);
150 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 5401: "
151 "evicting entry %p (tid was 0x%" PRIx64
152 " mgmt class 0x%x)\n",
153 old_m, cl_ntoh64(old_tid), old_mgmt_class);
157 pthread_mutex_unlock(&p_vend->match_tbl_mutex);
160 OSM_LOG_EXIT(p_vend->p_log);
163 static osm_madw_t *get_madw(osm_vendor_t * p_vend, ib_net64_t * tid,
167 ib_net64_t mtid = (*tid & CL_HTON64(0x00000000ffffffffULL));
171 * Since mtid == 0 is the empty key, we should not
172 * waste time looking for it
174 if (mtid == 0 || mgmt_class == 0)
177 pthread_mutex_lock(&p_vend->match_tbl_mutex);
178 for (m = p_vend->mtbl.tbl, e = m + p_vend->mtbl.max; m < e; m++) {
179 if (m->tid == mtid && m->mgmt_class == mgmt_class) {
184 pthread_mutex_unlock(&p_vend->match_tbl_mutex);
189 pthread_mutex_unlock(&p_vend->match_tbl_mutex);
194 * If match table full, evict LRU (least recently used) transaction.
195 * Maintain 2 LRUs: one for SMPs, and one for others (GS).
196 * Evict LRU GS transaction if one is available and only evict LRU SMP
197 * transaction if no other choice.
200 put_madw(osm_vendor_t * p_vend, osm_madw_t * p_madw, ib_net64_t tid,
203 umad_match_t *m, *e, *old_lru, *lru = 0, *lru_smp = 0;
204 osm_madw_t *p_req_madw;
205 osm_umad_bind_info_t *p_bind;
207 uint32_t oldest = ~0, oldest_smp = ~0;
208 uint8_t old_mgmt_class;
210 pthread_mutex_lock(&p_vend->match_tbl_mutex);
211 for (m = p_vend->mtbl.tbl, e = m + p_vend->mtbl.max; m < e; m++) {
212 if (m->tid == 0 && m->mgmt_class == 0) {
214 m->mgmt_class = mgmt_class;
217 cl_atomic_inc((atomic32_t *) & p_vend->mtbl.
219 pthread_mutex_unlock(&p_vend->match_tbl_mutex);
222 if (m->mgmt_class == IB_MCLASS_SUBN_DIR ||
223 m->mgmt_class == IB_MCLASS_SUBN_LID) {
224 if (oldest_smp >= m->version) {
225 oldest_smp = m->version;
229 if (oldest >= m->version) {
239 old_mgmt_class = lru->mgmt_class;
241 CL_ASSERT(oldest_smp != ~0);
243 old_tid = lru_smp->tid;
244 old_mgmt_class = lru_smp->mgmt_class;
246 p_req_madw = old_lru->v;
247 p_bind = p_req_madw->h_bind;
248 p_req_madw->status = IB_CANCELED;
249 log_send_error(p_vend, p_req_madw);
250 pthread_mutex_lock(&p_vend->cb_mutex);
251 (*p_bind->send_err_callback) (p_bind->client_context, p_req_madw);
252 pthread_mutex_unlock(&p_vend->cb_mutex);
253 if (mgmt_class == IB_MCLASS_SUBN_DIR ||
254 mgmt_class == IB_MCLASS_SUBN_LID) {
256 lru_smp->mgmt_class = mgmt_class;
259 cl_atomic_inc((atomic32_t *) & p_vend->mtbl.last_version);
262 lru->mgmt_class = mgmt_class;
265 cl_atomic_inc((atomic32_t *) & p_vend->mtbl.last_version);
267 pthread_mutex_unlock(&p_vend->match_tbl_mutex);
268 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 5402: "
269 "evicting entry %p (tid was 0x%" PRIx64
270 " mgmt class 0x%x)\n", old_lru,
271 cl_ntoh64(old_tid), old_mgmt_class);
275 ib_mad_addr_conv(ib_user_mad_t * umad, osm_mad_addr_t * osm_mad_addr,
278 ib_mad_addr_t *ib_mad_addr = umad_get_mad_addr(umad);
280 memset(osm_mad_addr, 0, sizeof(osm_mad_addr_t));
281 osm_mad_addr->dest_lid = ib_mad_addr->lid;
282 osm_mad_addr->path_bits = ib_mad_addr->path_bits;
285 osm_mad_addr->addr_type.smi.source_lid = osm_mad_addr->dest_lid;
286 osm_mad_addr->addr_type.smi.port_num = 255; /* not used */
290 osm_mad_addr->addr_type.gsi.remote_qp = ib_mad_addr->qpn;
291 osm_mad_addr->addr_type.gsi.remote_qkey = ib_mad_addr->qkey;
292 osm_mad_addr->addr_type.gsi.pkey_ix = umad_get_pkey(umad);
293 osm_mad_addr->addr_type.gsi.service_level = ib_mad_addr->sl;
294 if (ib_mad_addr->grh_present) {
295 osm_mad_addr->addr_type.gsi.global_route = 1;
296 osm_mad_addr->addr_type.gsi.grh_info.hop_limit = ib_mad_addr->hop_limit;
297 osm_mad_addr->addr_type.gsi.grh_info.ver_class_flow =
298 ib_grh_set_ver_class_flow(6, /* GRH version */
299 ib_mad_addr->traffic_class,
300 ib_mad_addr->flow_label);
301 memcpy(&osm_mad_addr->addr_type.gsi.grh_info.dest_gid,
302 &ib_mad_addr->gid, 16);
306 static void *swap_mad_bufs(osm_madw_t * p_madw, void *umad)
310 old = p_madw->vend_wrap.umad;
311 p_madw->vend_wrap.umad = umad;
312 p_madw->p_mad = umad_get_mad(umad);
317 static void unlock_mutex(void *arg)
319 pthread_mutex_unlock(arg);
322 static void *umad_receiver(void *p_ptr)
324 umad_receiver_t *const p_ur = (umad_receiver_t *) p_ptr;
325 osm_vendor_t *p_vend = p_ur->p_vend;
326 osm_umad_bind_info_t *p_bind;
327 osm_mad_addr_t osm_addr;
328 osm_madw_t *p_madw, *p_req_madw;
329 ib_mad_t *p_mad, *p_req_mad;
331 int mad_agent, length;
333 OSM_LOG_ENTER(p_ur->p_log);
337 !(umad = umad_alloc(1, umad_size() + MAD_BLOCK_SIZE))) {
338 OSM_LOG(p_ur->p_log, OSM_LOG_ERROR, "ERR 5403: "
339 "can't alloc MAD sized umad\n");
343 length = MAD_BLOCK_SIZE;
344 if ((mad_agent = umad_recv(p_vend->umad_port_id, umad,
346 if (length <= MAD_BLOCK_SIZE) {
347 OSM_LOG(p_ur->p_log, OSM_LOG_ERROR, "ERR 5404: "
348 "recv error on MAD sized umad (%m)\n");
352 /* Need a larger buffer for RMPP */
353 umad = umad_alloc(1, umad_size() + length);
355 OSM_LOG(p_ur->p_log, OSM_LOG_ERROR,
357 "can't alloc umad length %d\n",
362 if ((mad_agent = umad_recv(p_vend->umad_port_id,
365 OSM_LOG(p_ur->p_log, OSM_LOG_ERROR,
367 "recv error on umad length %d (%m)\n",
374 if (mad_agent >= OSM_UMAD_MAX_AGENTS ||
375 !(p_bind = p_vend->agents[mad_agent])) {
376 OSM_LOG(p_ur->p_log, OSM_LOG_ERROR, "ERR 5407: "
377 "invalid mad agent %d - dropping\n", mad_agent);
381 p_mad = (ib_mad_t *) umad_get_mad(umad);
383 ib_mad_addr_conv(umad, &osm_addr,
384 p_mad->mgmt_class == IB_MCLASS_SUBN_LID ||
385 p_mad->mgmt_class == IB_MCLASS_SUBN_DIR);
387 if (!(p_madw = osm_mad_pool_get(p_bind->p_mad_pool,
388 (osm_bind_handle_t) p_bind,
389 MAX(length, MAD_BLOCK_SIZE),
391 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 5408: "
392 "request for a new madw failed -- dropping packet\n");
396 /* Need to fix up MAD size if short RMPP packet */
397 if (length < MAD_BLOCK_SIZE)
398 p_madw->mad_size = length;
401 * Avoid copying by swapping mad buf pointers.
402 * Do not use umad after this line of code.
404 umad = swap_mad_bufs(p_madw, umad);
406 /* if status != 0 then we are handling recv timeout on send */
407 if (umad_status(p_madw->vend_wrap.umad)) {
408 if (!(p_req_madw = get_madw(p_vend, &p_mad->trans_id,
409 p_mad->mgmt_class))) {
410 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR,
412 "Failed to obtain request madw for timed out MAD"
413 " (class=0x%X method=0x%X attr=0x%X tid=0x%"PRIx64") -- dropping\n",
414 p_mad->mgmt_class, p_mad->method,
415 cl_ntoh16(p_mad->attr_id),
416 cl_ntoh64(p_mad->trans_id));
418 p_req_madw->status = IB_TIMEOUT;
419 log_send_error(p_vend, p_req_madw);
420 /* cb frees req_madw */
421 pthread_mutex_lock(&p_vend->cb_mutex);
422 pthread_cleanup_push(unlock_mutex,
424 (*p_bind->send_err_callback) (p_bind->
427 pthread_cleanup_pop(1);
430 osm_mad_pool_put(p_bind->p_mad_pool, p_madw);
435 if (ib_mad_is_response(p_mad)) {
436 p_req_madw = get_madw(p_vend, &p_mad->trans_id,
438 if (PF(!p_req_madw)) {
439 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR,
440 "ERR 5413: Failed to obtain request "
441 "madw for received MAD "
442 "(class=0x%X method=0x%X attr=0x%X "
443 "tid=0x%"PRIx64") -- dropping\n",
444 p_mad->mgmt_class, p_mad->method,
445 cl_ntoh16(p_mad->attr_id),
446 cl_ntoh64(p_mad->trans_id));
447 osm_mad_pool_put(p_bind->p_mad_pool, p_madw);
452 * Check that request MAD was really a request,
453 * and make sure that attribute ID, attribute
454 * modifier and transaction ID are the same in
455 * request and response.
457 * Exception for o15-0.2-1.11:
458 * SA response to a SubnAdmGetMulti() containing a
459 * MultiPathRecord shall have PathRecord attribute ID.
461 p_req_mad = osm_madw_get_mad_ptr(p_req_madw);
462 if (PF(ib_mad_is_response(p_req_mad) ||
463 (p_mad->attr_id != p_req_mad->attr_id &&
464 !(p_mad->mgmt_class == IB_MCLASS_SUBN_ADM &&
465 p_req_mad->attr_id ==
466 IB_MAD_ATTR_MULTIPATH_RECORD &&
467 p_mad->attr_id == IB_MAD_ATTR_PATH_RECORD)) ||
468 p_mad->attr_mod != p_req_mad->attr_mod ||
469 p_mad->trans_id != p_req_mad->trans_id)) {
470 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR,
472 "Response MAD validation failed "
473 "(request attr=0x%X modif=0x%X "
475 "response attr=0x%X modif=0x%X "
476 "tid=0x%"PRIx64") -- dropping\n",
477 cl_ntoh16(p_req_mad->attr_id),
478 cl_ntoh32(p_req_mad->attr_mod),
479 cl_ntoh64(p_req_mad->trans_id),
480 cl_ntoh16(p_mad->attr_id),
481 cl_ntoh32(p_mad->attr_mod),
482 cl_ntoh64(p_mad->trans_id));
483 osm_mad_pool_put(p_bind->p_mad_pool, p_madw);
488 #ifndef VENDOR_RMPP_SUPPORT
489 if ((p_mad->mgmt_class != IB_MCLASS_SUBN_DIR) &&
490 (p_mad->mgmt_class != IB_MCLASS_SUBN_LID) &&
491 (ib_rmpp_is_flag_set((ib_rmpp_mad_t *) p_mad,
492 IB_RMPP_FLAG_ACTIVE))) {
493 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 5414: "
494 "class 0x%x method 0x%x RMPP version %d type "
495 "%d flags 0x%x received -- dropping\n",
496 p_mad->mgmt_class, p_mad->method,
497 ((ib_rmpp_mad_t *) p_mad)->rmpp_version,
498 ((ib_rmpp_mad_t *) p_mad)->rmpp_type,
499 ((ib_rmpp_mad_t *) p_mad)->rmpp_flags);
500 osm_mad_pool_put(p_bind->p_mad_pool, p_madw);
506 pthread_mutex_lock(&p_vend->cb_mutex);
507 pthread_cleanup_push(unlock_mutex, &p_vend->cb_mutex);
508 (*p_bind->mad_recv_callback) (p_madw, p_bind->client_context,
510 pthread_cleanup_pop(1);
513 OSM_LOG_EXIT(p_vend->p_log);
517 static int umad_receiver_start(osm_vendor_t * p_vend)
519 umad_receiver_t *p_ur = p_vend->receiver;
521 p_ur->p_vend = p_vend;
522 p_ur->p_log = p_vend->p_log;
524 if (pthread_create(&p_ur->tid, NULL, umad_receiver, p_ur) != 0)
530 static void umad_receiver_stop(umad_receiver_t * p_ur)
532 pthread_cancel(p_ur->tid);
533 pthread_join(p_ur->tid, NULL);
540 osm_vendor_init(IN osm_vendor_t * const p_vend,
541 IN osm_log_t * const p_log, IN const uint32_t timeout)
546 OSM_LOG_ENTER(p_log);
548 p_vend->p_log = p_log;
549 p_vend->timeout = timeout;
550 p_vend->max_retries = OSM_DEFAULT_RETRY_COUNT;
551 pthread_mutex_init(&p_vend->cb_mutex, NULL);
552 pthread_mutex_init(&p_vend->match_tbl_mutex, NULL);
553 p_vend->umad_port_id = -1;
557 * Open our instance of UMAD.
559 if ((r = umad_init()) < 0) {
560 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR,
561 "ERR 5415: Error opening UMAD\n");
564 if ((n_cas = umad_get_cas_names(p_vend->ca_names,
565 OSM_UMAD_MAX_CAS)) < 0) {
566 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR,
567 "ERR 5416: umad_get_cas_names failed\n");
572 p_vend->ca_count = n_cas;
573 p_vend->mtbl.max = DEFAULT_OSM_UMAD_MAX_PENDING;
575 if ((max = getenv("OSM_UMAD_MAX_PENDING")) != NULL) {
576 int tmp = strtol(max, NULL, 0);
578 p_vend->mtbl.max = tmp;
580 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "Error:"
581 "OSM_UMAD_MAX_PENDING=%d is invalid\n",
585 OSM_LOG(p_vend->p_log, OSM_LOG_INFO, "%d pending umads specified\n",
588 p_vend->mtbl.tbl = calloc(p_vend->mtbl.max, sizeof(*(p_vend->mtbl.tbl)));
589 if (!p_vend->mtbl.tbl) {
590 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "Error:"
591 "failed to allocate vendor match table\n");
592 r = IB_INSUFFICIENT_MEMORY;
601 osm_vendor_t *osm_vendor_new(IN osm_log_t * const p_log,
602 IN const uint32_t timeout)
604 osm_vendor_t *p_vend = NULL;
606 OSM_LOG_ENTER(p_log);
609 OSM_LOG(p_log, OSM_LOG_ERROR, "ERR 5433: "
610 "transaction timeout cannot be 0\n");
614 p_vend = malloc(sizeof(*p_vend));
615 if (p_vend == NULL) {
616 OSM_LOG(p_log, OSM_LOG_ERROR, "ERR 5417: "
617 "Unable to allocate vendor object\n");
621 memset(p_vend, 0, sizeof(*p_vend));
623 if (osm_vendor_init(p_vend, p_log, timeout) != IB_SUCCESS) {
633 void osm_vendor_delete(IN osm_vendor_t ** const pp_vend)
635 osm_vendor_close_port(*pp_vend);
637 clear_madw(*pp_vend);
638 /* make sure all ports are closed */
641 pthread_mutex_destroy(&(*pp_vend)->cb_mutex);
642 pthread_mutex_destroy(&(*pp_vend)->match_tbl_mutex);
643 free((*pp_vend)->mtbl.tbl);
649 osm_vendor_get_all_port_attr(IN osm_vendor_t * const p_vend,
650 IN ib_port_attr_t * const p_attr_array,
651 IN uint32_t * const p_num_ports)
654 ib_port_attr_t *attr = p_attr_array;
658 OSM_LOG_ENTER(p_vend->p_log);
660 CL_ASSERT(p_vend && p_num_ports);
663 r = IB_INVALID_PARAMETER;
664 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 5418: "
665 "Ports in should be > 0\n");
670 r = IB_INSUFFICIENT_MEMORY;
675 for (i = 0; i < p_vend->ca_count && !done; i++) {
676 /* For each CA, retrieve the port attributes */
677 if (umad_get_ca(p_vend->ca_names[i], &ca) == 0) {
678 if (ca.node_type < 1 || ca.node_type > 3)
680 for (j = 0; j <= ca.numports; j++) {
683 attr->port_guid = ca.ports[j]->port_guid;
684 attr->lid = ca.ports[j]->base_lid;
685 attr->port_num = ca.ports[j]->portnum;
686 attr->sm_lid = ca.ports[j]->sm_lid;
687 attr->sm_sl = ca.ports[j]->sm_sl;
688 attr->link_state = ca.ports[j]->state;
689 if (attr->num_pkeys && attr->p_pkey_table) {
690 if (attr->num_pkeys > ca.ports[j]->pkeys_size)
691 attr->num_pkeys = ca.ports[j]->pkeys_size;
692 for (k = 0; k < attr->num_pkeys; k++)
693 attr->p_pkey_table[k] =
694 cl_hton16(ca.ports[j]->pkeys[k]);
696 attr->num_pkeys = ca.ports[j]->pkeys_size;
697 if (attr->num_gids && attr->p_gid_table) {
698 attr->p_gid_table[0].unicast.prefix = cl_hton64(ca.ports[j]->gid_prefix);
699 attr->p_gid_table[0].unicast.interface_id = cl_hton64(ca.ports[j]->port_guid);
703 if (attr - p_attr_array > *p_num_ports) {
708 umad_release_ca(&ca);
712 *p_num_ports = attr - p_attr_array;
715 OSM_LOG_EXIT(p_vend->p_log);
720 osm_vendor_open_port(IN osm_vendor_t * const p_vend,
721 IN const ib_net64_t port_guid)
723 ib_net64_t portguids[OSM_UMAD_MAX_PORTS_PER_CA + 1];
725 int i = 0, umad_port_id = -1;
731 OSM_LOG_ENTER(p_vend->p_log);
733 if (p_vend->umad_port_id >= 0) {
734 umad_port_id = p_vend->umad_port_id;
744 for (ca = 0; ca < p_vend->ca_count; ca++) {
745 if ((r = umad_get_ca_portguids(p_vend->ca_names[ca], portguids,
746 OSM_UMAD_MAX_PORTS_PER_CA + 1)) < 0) {
748 OSM_LOG(p_vend->p_log, OSM_LOG_VERBOSE,
750 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 5421: "
752 "Unable to get CA %s port guids (%s)\n",
753 p_vend->ca_names[ca], strerror(r));
756 for (i = 0; i < r; i++)
757 if (port_guid == portguids[i]) {
758 name = p_vend->ca_names[ca];
764 * No local CA owns this guid!
766 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 5422: "
767 "Unable to find requested CA guid 0x%" PRIx64 "\n",
768 cl_ntoh64(port_guid));
772 /* Validate that node is an IB node type (not iWARP) */
773 if (umad_get_ca(name, &umad_ca) < 0) {
774 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 542A: "
775 "umad_get_ca() failed\n");
779 if (umad_ca.node_type < 1 || umad_ca.node_type > 3) {
780 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 542D: "
781 "Type %d of node \'%s\' is not an IB node type\n",
782 umad_ca.node_type, umad_ca.ca_name);
784 "Type %d of node \'%s\' is not an IB node type\n",
785 umad_ca.node_type, umad_ca.ca_name);
786 umad_release_ca(&umad_ca);
789 umad_release_ca(&umad_ca);
791 /* Port found, try to open it */
792 if (umad_get_port(name, i, &p_vend->umad_port) < 0) {
793 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 542B: "
794 "umad_get_port() failed\n");
798 if ((umad_port_id = umad_open_port(p_vend->umad_port.ca_name,
799 p_vend->umad_port.portnum)) < 0) {
800 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 542C: "
801 "umad_open_port() failed\n");
805 p_vend->umad_port_id = umad_port_id;
807 /* start receiver thread */
808 if (!(p_vend->receiver = calloc(1, sizeof(umad_receiver_t)))) {
809 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 5423: "
810 "Unable to alloc receiver struct\n");
811 umad_close_port(umad_port_id);
812 umad_release_port(&p_vend->umad_port);
813 p_vend->umad_port.port_guid = 0;
814 p_vend->umad_port_id = umad_port_id = -1;
817 if (umad_receiver_start(p_vend) != 0) {
818 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 5420: "
819 "umad_receiver_init failed\n");
820 umad_close_port(umad_port_id);
821 umad_release_port(&p_vend->umad_port);
822 p_vend->umad_port.port_guid = 0;
823 p_vend->umad_port_id = umad_port_id = -1;
827 OSM_LOG_EXIT(p_vend->p_log);
831 static void osm_vendor_close_port(osm_vendor_t * const p_vend)
833 umad_receiver_t *p_ur;
836 p_ur = p_vend->receiver;
837 p_vend->receiver = NULL;
839 umad_receiver_stop(p_ur);
843 if (p_vend->umad_port_id >= 0) {
844 for (i = 0; i < OSM_UMAD_MAX_AGENTS; i++)
845 if (p_vend->agents[i])
846 umad_unregister(p_vend->umad_port_id, i);
847 umad_close_port(p_vend->umad_port_id);
848 umad_release_port(&p_vend->umad_port);
849 p_vend->umad_port.port_guid = 0;
850 p_vend->umad_port_id = -1;
854 static int set_bit(int nr, void *method_mask)
856 long mask, *addr = method_mask;
859 addr += nr / (8 * sizeof(long));
860 mask = 1L << (nr % (8 * sizeof(long)));
861 retval = (mask & *addr) != 0;
867 osm_vendor_bind(IN osm_vendor_t * const p_vend,
868 IN osm_bind_info_t * const p_user_bind,
869 IN osm_mad_pool_t * const p_mad_pool,
870 IN osm_vend_mad_recv_callback_t mad_recv_callback,
871 IN osm_vend_mad_send_err_callback_t send_err_callback,
874 ib_net64_t port_guid;
875 osm_umad_bind_info_t *p_bind = 0;
876 long method_mask[16 / sizeof(long)];
878 uint8_t rmpp_version;
880 OSM_LOG_ENTER(p_vend->p_log);
882 CL_ASSERT(p_user_bind);
883 CL_ASSERT(p_mad_pool);
884 CL_ASSERT(mad_recv_callback);
885 CL_ASSERT(send_err_callback);
887 port_guid = p_user_bind->port_guid;
889 OSM_LOG(p_vend->p_log, OSM_LOG_INFO,
890 "Mgmt class 0x%02x binding to port GUID 0x%" PRIx64 "\n",
891 p_user_bind->mad_class, cl_ntoh64(port_guid));
893 if ((umad_port_id = osm_vendor_open_port(p_vend, port_guid)) < 0) {
894 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 5424: "
895 "Unable to open port 0x%" PRIx64 "\n",
896 cl_ntoh64(port_guid));
900 if (umad_get_issm_path(p_vend->umad_port.ca_name,
901 p_vend->umad_port.portnum,
903 sizeof(p_vend->issm_path)) < 0) {
904 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 542E: "
905 "Cannot resolve issm path for port %s:%u\n",
906 p_vend->umad_port.ca_name, p_vend->umad_port.portnum);
910 if (!(p_bind = malloc(sizeof(*p_bind)))) {
911 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 5425: "
912 "Unable to allocate internal bind object\n");
916 memset(p_bind, 0, sizeof(*p_bind));
917 p_bind->p_vend = p_vend;
918 p_bind->port_id = umad_port_id;
919 p_bind->client_context = context;
920 p_bind->mad_recv_callback = mad_recv_callback;
921 p_bind->send_err_callback = send_err_callback;
922 p_bind->p_mad_pool = p_mad_pool;
923 p_bind->port_guid = port_guid;
924 p_bind->timeout = p_user_bind->timeout ? p_user_bind->timeout :
926 p_bind->max_retries = p_user_bind->retries ? p_user_bind->retries :
929 memset(method_mask, 0, sizeof method_mask);
930 if (p_user_bind->is_responder) {
931 set_bit(IB_MAD_METHOD_GET, &method_mask);
932 set_bit(IB_MAD_METHOD_SET, &method_mask);
933 if (p_user_bind->mad_class == IB_MCLASS_SUBN_ADM) {
934 set_bit(IB_MAD_METHOD_GETTABLE, &method_mask);
935 set_bit(IB_MAD_METHOD_DELETE, &method_mask);
936 #ifdef DUAL_SIDED_RMPP
937 set_bit(IB_MAD_METHOD_GETMULTI, &method_mask);
939 /* Add in IB_MAD_METHOD_GETTRACETABLE */
940 /* when supported by OpenSM */
943 if (p_user_bind->is_report_processor)
944 set_bit(IB_MAD_METHOD_REPORT, &method_mask);
945 if (p_user_bind->is_trap_processor) {
946 set_bit(IB_MAD_METHOD_TRAP, &method_mask);
947 set_bit(IB_MAD_METHOD_TRAP_REPRESS, &method_mask);
949 #ifndef VENDOR_RMPP_SUPPORT
952 /* If SA class, set rmpp_version */
953 if (p_user_bind->mad_class == IB_MCLASS_SUBN_ADM)
959 if ((p_bind->agent_id = umad_register(p_vend->umad_port_id,
960 p_user_bind->mad_class,
961 p_user_bind->class_version,
962 rmpp_version, method_mask)) < 0) {
963 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 5426: "
964 "Unable to register class %u version %u\n",
965 p_user_bind->mad_class, p_user_bind->class_version);
971 if (p_bind->agent_id >= OSM_UMAD_MAX_AGENTS ||
972 p_vend->agents[p_bind->agent_id]) {
973 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 5427: "
974 "bad agent id %u or duplicate agent for class %u vers %u\n",
975 p_bind->agent_id, p_user_bind->mad_class,
976 p_user_bind->class_version);
982 p_vend->agents[p_bind->agent_id] = p_bind;
984 /* If Subn Directed Route class, register Subn LID routed class */
985 if (p_user_bind->mad_class == IB_MCLASS_SUBN_DIR) {
986 if ((p_bind->agent_id1 = umad_register(p_vend->umad_port_id,
991 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 5428: "
992 "Unable to register class 1 version %u\n",
993 p_user_bind->class_version);
999 if (p_bind->agent_id1 >= OSM_UMAD_MAX_AGENTS ||
1000 p_vend->agents[p_bind->agent_id1]) {
1001 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 5429: "
1002 "bad agent id %u or duplicate agent for class 1 vers %u\n",
1003 p_bind->agent_id1, p_user_bind->class_version);
1009 p_vend->agents[p_bind->agent_id1] = p_bind;
1013 OSM_LOG_EXIT(p_vend->p_log);
1014 return ((osm_bind_handle_t) p_bind);
1018 __osm_vendor_recv_dummy_cb(IN osm_madw_t * p_madw,
1019 IN void *bind_context, IN osm_madw_t * p_req_madw)
1023 "__osm_vendor_recv_dummy_cb: Ignoring received MAD after osm_vendor_unbind\n");
1028 __osm_vendor_send_err_dummy_cb(IN void *bind_context,
1029 IN osm_madw_t * p_req_madw)
1033 "__osm_vendor_send_err_dummy_cb: Ignoring send error after osm_vendor_unbind\n");
1037 void osm_vendor_unbind(IN osm_bind_handle_t h_bind)
1039 osm_umad_bind_info_t *p_bind = (osm_umad_bind_info_t *) h_bind;
1040 osm_vendor_t *p_vend = p_bind->p_vend;
1042 OSM_LOG_ENTER(p_vend->p_log);
1044 pthread_mutex_lock(&p_vend->cb_mutex);
1045 p_bind->mad_recv_callback = __osm_vendor_recv_dummy_cb;
1046 p_bind->send_err_callback = __osm_vendor_send_err_dummy_cb;
1047 pthread_mutex_unlock(&p_vend->cb_mutex);
1049 OSM_LOG_EXIT(p_vend->p_log);
1052 ib_mad_t *osm_vendor_get(IN osm_bind_handle_t h_bind,
1053 IN const uint32_t mad_size,
1054 IN osm_vend_wrap_t * const p_vw)
1056 osm_umad_bind_info_t *p_bind = (osm_umad_bind_info_t *) h_bind;
1057 osm_vendor_t *p_vend = p_bind->p_vend;
1059 OSM_LOG_ENTER(p_vend->p_log);
1061 OSM_LOG(p_vend->p_log, OSM_LOG_DEBUG,
1062 "Acquiring UMAD for p_madw = %p, size = %u\n", p_vw, mad_size);
1064 p_vw->size = mad_size;
1065 p_vw->umad = umad_alloc(1, mad_size + umad_size());
1068 p_vw->h_bind = h_bind;
1070 OSM_LOG(p_vend->p_log, OSM_LOG_DEBUG,
1071 "Acquired UMAD %p, size = %u\n", p_vw->umad, p_vw->size);
1073 OSM_LOG_EXIT(p_vend->p_log);
1074 return (p_vw->umad ? umad_get_mad(p_vw->umad) : NULL);
1078 osm_vendor_put(IN osm_bind_handle_t h_bind, IN osm_vend_wrap_t * const p_vw)
1080 osm_umad_bind_info_t *p_bind = (osm_umad_bind_info_t *) h_bind;
1081 osm_vendor_t *p_vend = p_bind->p_vend;
1084 OSM_LOG_ENTER(p_vend->p_log);
1088 OSM_LOG(p_vend->p_log, OSM_LOG_DEBUG, "Retiring UMAD %p\n", p_vw->umad);
1091 * We moved the removal of the transaction to immediately after
1095 /* free the mad but the wrapper is part of the madw object */
1096 umad_free(p_vw->umad);
1098 p_madw = PARENT_STRUCT(p_vw, osm_madw_t, vend_wrap);
1099 p_madw->p_mad = NULL;
1101 OSM_LOG_EXIT(p_vend->p_log);
1105 osm_vendor_send(IN osm_bind_handle_t h_bind,
1106 IN osm_madw_t * const p_madw, IN boolean_t const resp_expected)
1108 osm_umad_bind_info_t *const p_bind = h_bind;
1109 osm_vendor_t *const p_vend = p_bind->p_vend;
1110 osm_vend_wrap_t *const p_vw = osm_madw_get_vend_ptr(p_madw);
1111 osm_mad_addr_t *const p_mad_addr = osm_madw_get_mad_addr_ptr(p_madw);
1112 ib_mad_t *const p_mad = osm_madw_get_mad_ptr(p_madw);
1113 ib_sa_mad_t *const p_sa = (ib_sa_mad_t *) p_mad;
1114 ib_mad_addr_t mad_addr;
1116 int __attribute__((__unused__)) is_rmpp = 0;
1117 uint32_t sent_mad_size;
1119 #ifndef VENDOR_RMPP_SUPPORT
1120 uint32_t paylen = 0;
1123 OSM_LOG_ENTER(p_vend->p_log);
1125 CL_ASSERT(p_vw->h_bind == h_bind);
1126 CL_ASSERT(p_mad == umad_get_mad(p_vw->umad));
1128 if (p_mad->mgmt_class == IB_MCLASS_SUBN_DIR) {
1129 umad_set_addr_net(p_vw->umad, 0xffff, 0, 0, 0);
1130 umad_set_grh(p_vw->umad, NULL);
1133 if (p_mad->mgmt_class == IB_MCLASS_SUBN_LID) {
1134 umad_set_addr_net(p_vw->umad, p_mad_addr->dest_lid, 0, 0, 0);
1135 umad_set_grh(p_vw->umad, NULL);
1139 umad_set_addr_net(p_vw->umad, p_mad_addr->dest_lid,
1140 p_mad_addr->addr_type.gsi.remote_qp,
1141 p_mad_addr->addr_type.gsi.service_level,
1142 IB_QP1_WELL_KNOWN_Q_KEY);
1143 if (p_mad_addr->addr_type.gsi.global_route) {
1144 mad_addr.grh_present = 1;
1145 mad_addr.gid_index = 0;
1146 mad_addr.hop_limit = p_mad_addr->addr_type.gsi.grh_info.hop_limit;
1147 ib_grh_get_ver_class_flow(p_mad_addr->addr_type.gsi.grh_info.ver_class_flow,
1148 NULL, &mad_addr.traffic_class,
1149 &mad_addr.flow_label);
1150 memcpy(&mad_addr.gid, &p_mad_addr->addr_type.gsi.grh_info.dest_gid, 16);
1151 umad_set_grh(p_vw->umad, &mad_addr);
1153 umad_set_grh(p_vw->umad, NULL);
1154 umad_set_pkey(p_vw->umad, p_mad_addr->addr_type.gsi.pkey_ix);
1155 if (ib_class_is_rmpp(p_mad->mgmt_class)) { /* RMPP GS classes */
1156 if (!ib_rmpp_is_flag_set((ib_rmpp_mad_t *) p_sa,
1157 IB_RMPP_FLAG_ACTIVE)) {
1158 /* Clear RMPP header when RMPP not ACTIVE */
1159 p_sa->rmpp_version = 0;
1160 p_sa->rmpp_type = 0;
1161 p_sa->rmpp_flags = 0;
1162 p_sa->rmpp_status = 0;
1163 #ifdef VENDOR_RMPP_SUPPORT
1166 OSM_LOG(p_vend->p_log, OSM_LOG_DEBUG, "RMPP %d length %d\n",
1167 ib_rmpp_is_flag_set((ib_rmpp_mad_t *) p_sa,
1168 IB_RMPP_FLAG_ACTIVE),
1172 p_sa->rmpp_version = 1;
1173 p_sa->seg_num = cl_ntoh32(1); /* first DATA is seg 1 */
1174 p_sa->rmpp_flags |= (uint8_t) 0x70; /* RRespTime of 14 (high 5 bits) */
1175 p_sa->rmpp_status = 0;
1176 paylen = p_madw->mad_size - IB_SA_MAD_HDR_SIZE;
1177 paylen += (IB_SA_MAD_HDR_SIZE - MAD_RMPP_HDR_SIZE);
1178 p_sa->paylen_newwin = cl_ntoh32(paylen);
1185 put_madw(p_vend, p_madw, p_mad->trans_id, p_mad->mgmt_class);
1187 #ifdef VENDOR_RMPP_SUPPORT
1188 sent_mad_size = p_madw->mad_size;
1190 sent_mad_size = is_rmpp ? p_madw->mad_size - IB_SA_MAD_HDR_SIZE :
1193 tid = cl_ntoh64(p_mad->trans_id);
1194 if ((ret = umad_send(p_bind->port_id, p_bind->agent_id, p_vw->umad,
1196 resp_expected ? p_bind->timeout : 0,
1197 p_bind->max_retries)) < 0) {
1198 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 5430: "
1199 "Send p_madw = %p of size %d, Class 0x%x, Method 0x%X, "
1200 "Attr 0x%X, TID 0x%" PRIx64 " failed %d (%m)\n",
1201 p_madw, sent_mad_size, p_mad->mgmt_class,
1202 p_mad->method, cl_ntoh16(p_mad->attr_id), tid, ret);
1203 if (resp_expected) {
1204 get_madw(p_vend, &p_mad->trans_id,
1205 p_mad->mgmt_class); /* remove from aging table */
1206 p_madw->status = IB_ERROR;
1207 pthread_mutex_lock(&p_vend->cb_mutex);
1208 (*p_bind->send_err_callback) (p_bind->client_context, p_madw); /* cb frees madw */
1209 pthread_mutex_unlock(&p_vend->cb_mutex);
1211 osm_mad_pool_put(p_bind->p_mad_pool, p_madw);
1216 osm_mad_pool_put(p_bind->p_mad_pool, p_madw);
1218 OSM_LOG(p_vend->p_log, OSM_LOG_DEBUG, "Completed sending %s TID 0x%" PRIx64 "\n",
1219 resp_expected ? "request" : "response or unsolicited", tid);
1221 OSM_LOG_EXIT(p_vend->p_log);
1225 ib_api_status_t osm_vendor_local_lid_change(IN osm_bind_handle_t h_bind)
1227 osm_umad_bind_info_t *p_bind = (osm_umad_bind_info_t *) h_bind;
1228 osm_vendor_t *p_vend = p_bind->p_vend;
1230 OSM_LOG_ENTER(p_vend->p_log);
1232 OSM_LOG_EXIT(p_vend->p_log);
1236 void osm_vendor_set_sm(IN osm_bind_handle_t h_bind, IN boolean_t is_sm_val)
1238 osm_umad_bind_info_t *p_bind = (osm_umad_bind_info_t *) h_bind;
1239 osm_vendor_t *p_vend = p_bind->p_vend;
1241 OSM_LOG_ENTER(p_vend->p_log);
1242 if (TRUE == is_sm_val) {
1243 p_vend->issmfd = open(p_vend->issm_path, O_NONBLOCK);
1244 if (p_vend->issmfd < 0) {
1245 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 5431: "
1246 "setting IS_SM capmask: cannot open file "
1248 p_vend->issm_path, strerror(errno));
1249 p_vend->issmfd = -1;
1251 } else if (p_vend->issmfd != -1) {
1252 if (0 != close(p_vend->issmfd))
1253 OSM_LOG(p_vend->p_log, OSM_LOG_ERROR, "ERR 5432: "
1254 "clearing IS_SM capmask: cannot close: %s\n",
1256 p_vend->issmfd = -1;
1258 OSM_LOG_EXIT(p_vend->p_log);
1261 void osm_vendor_set_debug(IN osm_vendor_t * const p_vend, IN int32_t level)
1266 #endif /* OSM_VENDOR_INTF_OPENIB */