2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
38 #endif /* HAVE_CONFIG_H */
49 #include <infiniband/cm.h>
50 #include <infiniband/cm_abi.h>
51 #include <infiniband/driver.h>
52 #include <infiniband/marshall.h>
54 #ifdef INCLUDE_VALGRIND
55 # include <valgrind/memcheck.h>
56 # ifndef VALGRIND_MAKE_MEM_DEFINED
57 # warning "Valgrind requested, but VALGRIND_MAKE_MEM_DEFINED undefined"
61 #ifndef VALGRIND_MAKE_MEM_DEFINED
62 # define VALGRIND_MAKE_MEM_DEFINED(addr,len)
65 #define PFX "libibcm: "
68 static pthread_mutex_t mut = PTHREAD_MUTEX_INITIALIZER;
71 IB_UCM_MAX_DEVICES = 32
74 static inline int ERR(int err)
81 #define CM_CREATE_MSG_CMD_RESP(msg, cmd, resp, type, size) \
83 struct cm_abi_cmd_hdr *hdr; \
85 size = sizeof(*hdr) + sizeof(*cmd); \
90 cmd = msg + sizeof(*hdr); \
92 hdr->in = sizeof(*cmd); \
93 hdr->out = sizeof(*resp); \
94 memset(cmd, 0, sizeof(*cmd)); \
95 resp = alloca(sizeof(*resp)); \
98 cmd->response = (uintptr_t)resp;\
101 #define CM_CREATE_MSG_CMD(msg, cmd, type, size) \
103 struct cm_abi_cmd_hdr *hdr; \
105 size = sizeof(*hdr) + sizeof(*cmd); \
106 msg = alloca(size); \
108 return ERR(ENOMEM); \
110 cmd = msg + sizeof(*hdr); \
112 hdr->in = sizeof(*cmd); \
114 memset(cmd, 0, sizeof(*cmd)); \
117 struct cm_id_private {
119 int events_completed;
124 #define container_of(ptr, type, field) \
125 ((type *) ((void *)ptr - offsetof(type, field)))
127 static int check_abi_version(void)
131 if (ibv_read_sysfs_file(ibv_get_sysfs_path(),
132 "class/infiniband_cm/abi_version",
133 value, sizeof value) < 0) {
134 fprintf(stderr, PFX "couldn't read ABI version\n");
138 abi_ver = strtol(value, NULL, 10);
139 if (abi_ver < IB_USER_CM_MIN_ABI_VERSION ||
140 abi_ver > IB_USER_CM_MAX_ABI_VERSION) {
141 fprintf(stderr, PFX "kernel ABI version %d "
142 "doesn't match library version %d.\n",
143 abi_ver, IB_USER_CM_MAX_ABI_VERSION);
149 static int ucm_init(void)
153 pthread_mutex_lock(&mut);
155 ret = check_abi_version();
156 pthread_mutex_unlock(&mut);
161 static int ucm_get_dev_index(char *dev_name)
164 char ibdev[IBV_SYSFS_NAME_MAX];
167 for (i = 0; i < IB_UCM_MAX_DEVICES; i++) {
168 ret = asprintf(&dev_path, "/sys/class/infiniband_cm/ucm%d", i);
172 ret = ibv_read_sysfs_file(dev_path, "ibdev", ibdev, sizeof ibdev);
176 if (!strcmp(dev_name, ibdev)) {
186 struct ib_cm_device* ib_cm_open_device(struct ibv_context *device_context)
188 struct ib_cm_device *dev;
195 index = ucm_get_dev_index(device_context->device->name);
199 dev = malloc(sizeof *dev);
203 dev->device_context = device_context;
205 ret = asprintf(&dev_path, "/dev/infiniband/ucm%d", index);
209 dev->fd = open(dev_path, O_RDWR);
223 void ib_cm_close_device(struct ib_cm_device *device)
229 static void ib_cm_free_id(struct cm_id_private *cm_id_priv)
231 pthread_cond_destroy(&cm_id_priv->cond);
232 pthread_mutex_destroy(&cm_id_priv->mut);
236 static struct cm_id_private *ib_cm_alloc_id(struct ib_cm_device *device,
239 struct cm_id_private *cm_id_priv;
241 cm_id_priv = malloc(sizeof *cm_id_priv);
245 memset(cm_id_priv, 0, sizeof *cm_id_priv);
246 cm_id_priv->id.device = device;
247 cm_id_priv->id.context = context;
248 pthread_mutex_init(&cm_id_priv->mut, NULL);
249 if (pthread_cond_init(&cm_id_priv->cond, NULL))
254 err: ib_cm_free_id(cm_id_priv);
258 int ib_cm_create_id(struct ib_cm_device *device,
259 struct ib_cm_id **cm_id, void *context)
261 struct cm_abi_create_id_resp *resp;
262 struct cm_abi_create_id *cmd;
263 struct cm_id_private *cm_id_priv;
268 cm_id_priv = ib_cm_alloc_id(device, context);
272 CM_CREATE_MSG_CMD_RESP(msg, cmd, resp, IB_USER_CM_CMD_CREATE_ID, size);
273 cmd->uid = (uintptr_t) cm_id_priv;
275 result = write(device->fd, msg, size);
279 VALGRIND_MAKE_MEM_DEFINED(resp, sizeof *resp);
281 cm_id_priv->id.handle = resp->id;
282 *cm_id = &cm_id_priv->id;
285 err: ib_cm_free_id(cm_id_priv);
289 int ib_cm_destroy_id(struct ib_cm_id *cm_id)
291 struct cm_abi_destroy_id_resp *resp;
292 struct cm_abi_destroy_id *cmd;
293 struct cm_id_private *cm_id_priv;
298 CM_CREATE_MSG_CMD_RESP(msg, cmd, resp, IB_USER_CM_CMD_DESTROY_ID, size);
299 cmd->id = cm_id->handle;
301 result = write(cm_id->device->fd, msg, size);
303 return (result >= 0) ? ERR(ECONNREFUSED) : -1;
305 VALGRIND_MAKE_MEM_DEFINED(resp, sizeof *resp);
307 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
309 pthread_mutex_lock(&cm_id_priv->mut);
310 while (cm_id_priv->events_completed < resp->events_reported)
311 pthread_cond_wait(&cm_id_priv->cond, &cm_id_priv->mut);
312 pthread_mutex_unlock(&cm_id_priv->mut);
314 ib_cm_free_id(cm_id_priv);
318 int ib_cm_attr_id(struct ib_cm_id *cm_id, struct ib_cm_attr_param *param)
320 struct cm_abi_attr_id_resp *resp;
321 struct cm_abi_attr_id *cmd;
329 CM_CREATE_MSG_CMD_RESP(msg, cmd, resp, IB_USER_CM_CMD_ATTR_ID, size);
330 cmd->id = cm_id->handle;
332 result = write(cm_id->device->fd, msg, size);
334 return (result >= 0) ? ERR(ECONNREFUSED) : -1;
336 VALGRIND_MAKE_MEM_DEFINED(resp, sizeof *resp);
338 param->service_id = resp->service_id;
339 param->service_mask = resp->service_mask;
340 param->local_id = resp->local_id;
341 param->remote_id = resp->remote_id;
345 int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
346 struct ibv_qp_attr *qp_attr,
349 struct ibv_kern_qp_attr *resp;
350 struct cm_abi_init_qp_attr *cmd;
355 if (!qp_attr || !qp_attr_mask)
358 CM_CREATE_MSG_CMD_RESP(msg, cmd, resp, IB_USER_CM_CMD_INIT_QP_ATTR, size);
359 cmd->id = cm_id->handle;
360 cmd->qp_state = qp_attr->qp_state;
362 result = write(cm_id->device->fd, msg, size);
364 return (result >= 0) ? ERR(ECONNREFUSED) : result;
366 VALGRIND_MAKE_MEM_DEFINED(resp, sizeof *resp);
368 *qp_attr_mask = resp->qp_attr_mask;
369 ibv_copy_qp_attr_from_kern(qp_attr, resp);
374 int ib_cm_listen(struct ib_cm_id *cm_id,
376 uint64_t service_mask)
378 struct cm_abi_listen *cmd;
383 CM_CREATE_MSG_CMD(msg, cmd, IB_USER_CM_CMD_LISTEN, size);
384 cmd->id = cm_id->handle;
385 cmd->service_id = service_id;
386 cmd->service_mask = service_mask;
388 result = write(cm_id->device->fd, msg, size);
390 return (result >= 0) ? ERR(ECONNREFUSED) : -1;
395 int ib_cm_send_req(struct ib_cm_id *cm_id, struct ib_cm_req_param *param)
397 struct ibv_kern_path_rec *p_path;
398 struct ibv_kern_path_rec *a_path;
399 struct cm_abi_req *cmd;
407 CM_CREATE_MSG_CMD(msg, cmd, IB_USER_CM_CMD_SEND_REQ, size);
408 cmd->id = cm_id->handle;
409 cmd->qpn = param->qp_num;
410 cmd->qp_type = param->qp_type;
411 cmd->psn = param->starting_psn;
412 cmd->sid = param->service_id;
413 cmd->peer_to_peer = param->peer_to_peer;
414 cmd->responder_resources = param->responder_resources;
415 cmd->initiator_depth = param->initiator_depth;
416 cmd->remote_cm_response_timeout = param->remote_cm_response_timeout;
417 cmd->flow_control = param->flow_control;
418 cmd->local_cm_response_timeout = param->local_cm_response_timeout;
419 cmd->retry_count = param->retry_count;
420 cmd->rnr_retry_count = param->rnr_retry_count;
421 cmd->max_cm_retries = param->max_cm_retries;
422 cmd->srq = param->srq;
424 if (param->primary_path) {
425 p_path = alloca(sizeof(*p_path));
429 ibv_copy_path_rec_to_kern(p_path, param->primary_path);
430 cmd->primary_path = (uintptr_t) p_path;
433 if (param->alternate_path) {
434 a_path = alloca(sizeof(*a_path));
438 ibv_copy_path_rec_to_kern(a_path, param->alternate_path);
439 cmd->alternate_path = (uintptr_t) a_path;
442 if (param->private_data && param->private_data_len) {
443 cmd->data = (uintptr_t) param->private_data;
444 cmd->len = param->private_data_len;
447 result = write(cm_id->device->fd, msg, size);
449 return (result >= 0) ? ERR(ECONNREFUSED) : -1;
454 int ib_cm_send_rep(struct ib_cm_id *cm_id, struct ib_cm_rep_param *param)
456 struct cm_abi_rep *cmd;
464 CM_CREATE_MSG_CMD(msg, cmd, IB_USER_CM_CMD_SEND_REP, size);
465 cmd->uid = (uintptr_t) container_of(cm_id, struct cm_id_private, id);
466 cmd->id = cm_id->handle;
467 cmd->qpn = param->qp_num;
468 cmd->psn = param->starting_psn;
469 cmd->responder_resources = param->responder_resources;
470 cmd->initiator_depth = param->initiator_depth;
471 cmd->target_ack_delay = param->target_ack_delay;
472 cmd->failover_accepted = param->failover_accepted;
473 cmd->flow_control = param->flow_control;
474 cmd->rnr_retry_count = param->rnr_retry_count;
475 cmd->srq = param->srq;
477 if (param->private_data && param->private_data_len) {
478 cmd->data = (uintptr_t) param->private_data;
479 cmd->len = param->private_data_len;
482 result = write(cm_id->device->fd, msg, size);
484 return (result >= 0) ? ERR(ECONNREFUSED) : -1;
489 static inline int cm_send_private_data(struct ib_cm_id *cm_id,
492 uint8_t private_data_len)
494 struct cm_abi_private_data *cmd;
499 CM_CREATE_MSG_CMD(msg, cmd, type, size);
500 cmd->id = cm_id->handle;
502 if (private_data && private_data_len) {
503 cmd->data = (uintptr_t) private_data;
504 cmd->len = private_data_len;
507 result = write(cm_id->device->fd, msg, size);
509 return (result >= 0) ? ERR(ECONNREFUSED) : -1;
514 int ib_cm_send_rtu(struct ib_cm_id *cm_id,
516 uint8_t private_data_len)
518 return cm_send_private_data(cm_id, IB_USER_CM_CMD_SEND_RTU,
519 private_data, private_data_len);
522 int ib_cm_send_dreq(struct ib_cm_id *cm_id,
524 uint8_t private_data_len)
526 return cm_send_private_data(cm_id, IB_USER_CM_CMD_SEND_DREQ,
527 private_data, private_data_len);
530 int ib_cm_send_drep(struct ib_cm_id *cm_id,
532 uint8_t private_data_len)
534 return cm_send_private_data(cm_id, IB_USER_CM_CMD_SEND_DREP,
535 private_data, private_data_len);
538 static int cm_establish(struct ib_cm_id *cm_id)
540 struct cm_abi_establish *cmd;
545 CM_CREATE_MSG_CMD(msg, cmd, IB_USER_CM_CMD_ESTABLISH, size);
546 cmd->id = cm_id->handle;
548 result = write(cm_id->device->fd, msg, size);
550 return (result >= 0) ? ERR(ECONNREFUSED) : -1;
555 int ib_cm_notify(struct ib_cm_id *cm_id, enum ibv_event_type event)
557 struct cm_abi_notify *cmd;
563 if (event == IBV_EVENT_COMM_EST)
564 return cm_establish(cm_id);
569 CM_CREATE_MSG_CMD(msg, cmd, IB_USER_CM_CMD_NOTIFY, size);
570 cmd->id = cm_id->handle;
573 result = write(cm_id->device->fd, msg, size);
575 return (result >= 0) ? ERR(ECONNREFUSED) : -1;
580 static inline int cm_send_status(struct ib_cm_id *cm_id,
586 uint8_t private_data_len)
588 struct cm_abi_info *cmd;
593 CM_CREATE_MSG_CMD(msg, cmd, type, size);
594 cmd->id = cm_id->handle;
595 cmd->status = status;
597 if (private_data && private_data_len) {
598 cmd->data = (uintptr_t) private_data;
599 cmd->data_len = private_data_len;
602 if (info && info_length) {
603 cmd->info = (uintptr_t) info;
604 cmd->info_len = info_length;
607 result = write(cm_id->device->fd, msg, size);
609 return (result >= 0) ? ERR(ECONNREFUSED) : -1;
614 int ib_cm_send_rej(struct ib_cm_id *cm_id,
615 enum ib_cm_rej_reason reason,
619 uint8_t private_data_len)
621 return cm_send_status(cm_id, IB_USER_CM_CMD_SEND_REJ, reason,
623 private_data, private_data_len);
626 int ib_cm_send_apr(struct ib_cm_id *cm_id,
627 enum ib_cm_apr_status status,
631 uint8_t private_data_len)
633 return cm_send_status(cm_id, IB_USER_CM_CMD_SEND_APR, status,
635 private_data, private_data_len);
638 int ib_cm_send_mra(struct ib_cm_id *cm_id,
639 uint8_t service_timeout,
641 uint8_t private_data_len)
643 struct cm_abi_mra *cmd;
648 CM_CREATE_MSG_CMD(msg, cmd, IB_USER_CM_CMD_SEND_MRA, size);
649 cmd->id = cm_id->handle;
650 cmd->timeout = service_timeout;
652 if (private_data && private_data_len) {
653 cmd->data = (uintptr_t) private_data;
654 cmd->len = private_data_len;
657 result = write(cm_id->device->fd, msg, size);
659 return (result >= 0) ? ERR(ECONNREFUSED) : result;
664 int ib_cm_send_lap(struct ib_cm_id *cm_id,
665 struct ibv_sa_path_rec *alternate_path,
667 uint8_t private_data_len)
669 struct ibv_kern_path_rec *abi_path;
670 struct cm_abi_lap *cmd;
675 CM_CREATE_MSG_CMD(msg, cmd, IB_USER_CM_CMD_SEND_LAP, size);
676 cmd->id = cm_id->handle;
678 if (alternate_path) {
679 abi_path = alloca(sizeof(*abi_path));
683 ibv_copy_path_rec_to_kern(abi_path, alternate_path);
684 cmd->path = (uintptr_t) abi_path;
687 if (private_data && private_data_len) {
688 cmd->data = (uintptr_t) private_data;
689 cmd->len = private_data_len;
692 result = write(cm_id->device->fd, msg, size);
694 return (result >= 0) ? ERR(ECONNREFUSED) : -1;
699 int ib_cm_send_sidr_req(struct ib_cm_id *cm_id,
700 struct ib_cm_sidr_req_param *param)
702 struct ibv_kern_path_rec *abi_path;
703 struct cm_abi_sidr_req *cmd;
711 CM_CREATE_MSG_CMD(msg, cmd, IB_USER_CM_CMD_SEND_SIDR_REQ, size);
712 cmd->id = cm_id->handle;
713 cmd->sid = param->service_id;
714 cmd->timeout = param->timeout_ms;
715 cmd->pkey = param->path->pkey;
716 cmd->max_cm_retries = param->max_cm_retries;
719 abi_path = alloca(sizeof(*abi_path));
723 ibv_copy_path_rec_to_kern(abi_path, param->path);
724 cmd->path = (uintptr_t) abi_path;
727 if (param->private_data && param->private_data_len) {
728 cmd->data = (uintptr_t) param->private_data;
729 cmd->len = param->private_data_len;
732 result = write(cm_id->device->fd, msg, size);
734 return (result >= 0) ? ERR(ECONNREFUSED) : result;
739 int ib_cm_send_sidr_rep(struct ib_cm_id *cm_id,
740 struct ib_cm_sidr_rep_param *param)
742 struct cm_abi_sidr_rep *cmd;
750 CM_CREATE_MSG_CMD(msg, cmd, IB_USER_CM_CMD_SEND_SIDR_REP, size);
751 cmd->id = cm_id->handle;
752 cmd->qpn = param->qp_num;
753 cmd->qkey = param->qkey;
754 cmd->status = param->status;
756 if (param->private_data && param->private_data_len) {
757 cmd->data = (uintptr_t) param->private_data;
758 cmd->data_len = param->private_data_len;
761 if (param->info && param->info_length) {
762 cmd->info = (uintptr_t) param->info;
763 cmd->info_len = param->info_length;
766 result = write(cm_id->device->fd, msg, size);
768 return (result >= 0) ? ERR(ECONNREFUSED) : -1;
773 static void cm_event_req_get(struct ib_cm_req_event_param *ureq,
774 struct cm_abi_req_event_resp *kreq)
776 ureq->remote_ca_guid = kreq->remote_ca_guid;
777 ureq->remote_qkey = kreq->remote_qkey;
778 ureq->remote_qpn = kreq->remote_qpn;
779 ureq->qp_type = kreq->qp_type;
780 ureq->starting_psn = kreq->starting_psn;
781 ureq->responder_resources = kreq->responder_resources;
782 ureq->initiator_depth = kreq->initiator_depth;
783 ureq->local_cm_response_timeout = kreq->local_cm_response_timeout;
784 ureq->flow_control = kreq->flow_control;
785 ureq->remote_cm_response_timeout = kreq->remote_cm_response_timeout;
786 ureq->retry_count = kreq->retry_count;
787 ureq->rnr_retry_count = kreq->rnr_retry_count;
788 ureq->srq = kreq->srq;
789 ureq->port = kreq->port;
791 ibv_copy_path_rec_from_kern(ureq->primary_path, &kreq->primary_path);
792 if (ureq->alternate_path)
793 ibv_copy_path_rec_from_kern(ureq->alternate_path,
794 &kreq->alternate_path);
797 static void cm_event_rep_get(struct ib_cm_rep_event_param *urep,
798 struct cm_abi_rep_event_resp *krep)
800 urep->remote_ca_guid = krep->remote_ca_guid;
801 urep->remote_qkey = krep->remote_qkey;
802 urep->remote_qpn = krep->remote_qpn;
803 urep->starting_psn = krep->starting_psn;
804 urep->responder_resources = krep->responder_resources;
805 urep->initiator_depth = krep->initiator_depth;
806 urep->target_ack_delay = krep->target_ack_delay;
807 urep->failover_accepted = krep->failover_accepted;
808 urep->flow_control = krep->flow_control;
809 urep->rnr_retry_count = krep->rnr_retry_count;
810 urep->srq = krep->srq;
813 static void cm_event_sidr_rep_get(struct ib_cm_sidr_rep_event_param *urep,
814 struct cm_abi_sidr_rep_event_resp *krep)
816 urep->status = krep->status;
817 urep->qkey = krep->qkey;
818 urep->qpn = krep->qpn;
821 int ib_cm_get_event(struct ib_cm_device *device, struct ib_cm_event **event)
823 struct cm_id_private *cm_id_priv;
824 struct cm_abi_cmd_hdr *hdr;
825 struct cm_abi_event_get *cmd;
826 struct cm_abi_event_resp *resp;
827 struct ib_cm_event *evt = NULL;
828 struct ibv_sa_path_rec *path_a = NULL;
829 struct ibv_sa_path_rec *path_b = NULL;
839 size = sizeof(*hdr) + sizeof(*cmd);
845 cmd = msg + sizeof(*hdr);
847 hdr->cmd = IB_USER_CM_CMD_EVENT;
848 hdr->in = sizeof(*cmd);
849 hdr->out = sizeof(*resp);
851 memset(cmd, 0, sizeof(*cmd));
853 resp = alloca(sizeof(*resp));
857 cmd->response = (uintptr_t) resp;
858 cmd->data_len = (uint8_t)(~0U);
859 cmd->info_len = (uint8_t)(~0U);
861 data = malloc(cmd->data_len);
863 result = ERR(ENOMEM);
867 info = malloc(cmd->info_len);
869 result = ERR(ENOMEM);
873 cmd->data = (uintptr_t) data;
874 cmd->info = (uintptr_t) info;
876 result = write(device->fd, msg, size);
877 if (result != size) {
878 result = (result >= 0) ? ERR(ECONNREFUSED) : -1;
882 VALGRIND_MAKE_MEM_DEFINED(resp, sizeof *resp);
887 evt = malloc(sizeof(*evt));
889 result = ERR(ENOMEM);
892 memset(evt, 0, sizeof(*evt));
893 evt->cm_id = (void *) (uintptr_t) resp->uid;
894 evt->event = resp->event;
896 if (resp->present & CM_ABI_PRES_PRIMARY) {
897 path_a = malloc(sizeof(*path_a));
899 result = ERR(ENOMEM);
904 if (resp->present & CM_ABI_PRES_ALTERNATE) {
905 path_b = malloc(sizeof(*path_b));
907 result = ERR(ENOMEM);
912 switch (evt->event) {
913 case IB_CM_REQ_RECEIVED:
914 evt->param.req_rcvd.listen_id = evt->cm_id;
915 cm_id_priv = ib_cm_alloc_id(evt->cm_id->device,
916 evt->cm_id->context);
918 result = ERR(ENOMEM);
921 cm_id_priv->id.handle = resp->id;
922 evt->cm_id = &cm_id_priv->id;
923 evt->param.req_rcvd.primary_path = path_a;
924 evt->param.req_rcvd.alternate_path = path_b;
927 cm_event_req_get(&evt->param.req_rcvd, &resp->u.req_resp);
929 case IB_CM_REP_RECEIVED:
930 cm_event_rep_get(&evt->param.rep_rcvd, &resp->u.rep_resp);
932 case IB_CM_MRA_RECEIVED:
933 evt->param.mra_rcvd.service_timeout = resp->u.mra_resp.timeout;
935 case IB_CM_REJ_RECEIVED:
936 evt->param.rej_rcvd.reason = resp->u.rej_resp.reason;
937 evt->param.rej_rcvd.ari = info;
940 case IB_CM_LAP_RECEIVED:
941 evt->param.lap_rcvd.alternate_path = path_b;
943 ibv_copy_path_rec_from_kern(evt->param.lap_rcvd.alternate_path,
944 &resp->u.lap_resp.path);
946 case IB_CM_APR_RECEIVED:
947 evt->param.apr_rcvd.ap_status = resp->u.apr_resp.status;
948 evt->param.apr_rcvd.apr_info = info;
951 case IB_CM_SIDR_REQ_RECEIVED:
952 evt->param.sidr_req_rcvd.listen_id = evt->cm_id;
953 cm_id_priv = ib_cm_alloc_id(evt->cm_id->device,
954 evt->cm_id->context);
956 result = ERR(ENOMEM);
959 cm_id_priv->id.handle = resp->id;
960 evt->cm_id = &cm_id_priv->id;
961 evt->param.sidr_req_rcvd.pkey = resp->u.sidr_req_resp.pkey;
962 evt->param.sidr_req_rcvd.port = resp->u.sidr_req_resp.port;
964 case IB_CM_SIDR_REP_RECEIVED:
965 cm_event_sidr_rep_get(&evt->param.sidr_rep_rcvd,
966 &resp->u.sidr_rep_resp);
967 evt->param.sidr_rep_rcvd.info = info;
971 evt->param.send_status = resp->u.send_status;
975 if (resp->present & CM_ABI_PRES_DATA) {
976 evt->private_data = data;
998 int ib_cm_ack_event(struct ib_cm_event *event)
1000 struct cm_id_private *cm_id_priv;
1005 if (event->private_data)
1006 free(event->private_data);
1008 cm_id_priv = container_of(event->cm_id, struct cm_id_private, id);
1010 switch (event->event) {
1011 case IB_CM_REQ_RECEIVED:
1012 cm_id_priv = container_of(event->param.req_rcvd.listen_id,
1013 struct cm_id_private, id);
1014 free(event->param.req_rcvd.primary_path);
1015 if (event->param.req_rcvd.alternate_path)
1016 free(event->param.req_rcvd.alternate_path);
1018 case IB_CM_REJ_RECEIVED:
1019 if (event->param.rej_rcvd.ari)
1020 free(event->param.rej_rcvd.ari);
1022 case IB_CM_LAP_RECEIVED:
1023 free(event->param.lap_rcvd.alternate_path);
1025 case IB_CM_APR_RECEIVED:
1026 if (event->param.apr_rcvd.apr_info)
1027 free(event->param.apr_rcvd.apr_info);
1029 case IB_CM_SIDR_REQ_RECEIVED:
1030 cm_id_priv = container_of(event->param.sidr_req_rcvd.listen_id,
1031 struct cm_id_private, id);
1033 case IB_CM_SIDR_REP_RECEIVED:
1034 if (event->param.sidr_rep_rcvd.info)
1035 free(event->param.sidr_rep_rcvd.info);
1040 pthread_mutex_lock(&cm_id_priv->mut);
1041 cm_id_priv->events_completed++;
1042 pthread_cond_signal(&cm_id_priv->cond);
1043 pthread_mutex_unlock(&cm_id_priv->mut);