2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005-2006 Intel Corporation. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
47 #include <infiniband/cm.h>
48 #include <rdma/ib_user_cm.h>
49 #include <infiniband/driver.h>
50 #include <infiniband/marshall.h>
52 #define PFX "libibcm: "
54 #define IB_USER_CM_MIN_ABI_VERSION 4
55 #define IB_USER_CM_MAX_ABI_VERSION 5
58 static pthread_mutex_t mut = PTHREAD_MUTEX_INITIALIZER;
61 IB_UCM_MAX_DEVICES = 32
64 static inline int ERR(int err)
71 #define CM_CREATE_MSG_CMD_RESP(msg, cmd, resp, type, size) \
73 struct ib_ucm_cmd_hdr *hdr; \
75 size = sizeof(*hdr) + sizeof(*cmd); \
80 cmd = msg + sizeof(*hdr); \
82 hdr->in = sizeof(*cmd); \
83 hdr->out = sizeof(*resp); \
84 memset(cmd, 0, sizeof(*cmd)); \
85 resp = alloca(sizeof(*resp)); \
88 cmd->response = (uintptr_t)resp;\
91 #define CM_CREATE_MSG_CMD(msg, cmd, type, size) \
93 struct ib_ucm_cmd_hdr *hdr; \
95 size = sizeof(*hdr) + sizeof(*cmd); \
100 cmd = msg + sizeof(*hdr); \
102 hdr->in = sizeof(*cmd); \
104 memset(cmd, 0, sizeof(*cmd)); \
107 struct cm_id_private {
109 int events_completed;
114 static int check_abi_version(void)
118 if (ibv_read_sysfs_file(ibv_get_sysfs_path(),
119 "class/infiniband_cm/abi_version",
120 value, sizeof value) < 0) {
121 fprintf(stderr, PFX "couldn't read ABI version\n");
125 abi_ver = strtol(value, NULL, 10);
126 if (abi_ver < IB_USER_CM_MIN_ABI_VERSION ||
127 abi_ver > IB_USER_CM_MAX_ABI_VERSION) {
128 fprintf(stderr, PFX "kernel ABI version %d "
129 "doesn't match library version %d.\n",
130 abi_ver, IB_USER_CM_MAX_ABI_VERSION);
136 static int ucm_init(void)
140 pthread_mutex_lock(&mut);
142 ret = check_abi_version();
143 pthread_mutex_unlock(&mut);
148 static int ucm_get_dev_index(char *dev_name)
151 char ibdev[IBV_SYSFS_NAME_MAX];
154 for (i = 0; i < IB_UCM_MAX_DEVICES; i++) {
155 ret = asprintf(&dev_path, "/sys/class/infiniband_cm/ucm%d", i);
159 ret = ibv_read_sysfs_file(dev_path, "ibdev", ibdev, sizeof ibdev);
163 if (!strcmp(dev_name, ibdev)) {
173 struct ib_cm_device* ib_cm_open_device(struct ibv_context *device_context)
175 struct ib_cm_device *dev;
182 index = ucm_get_dev_index(device_context->device->name);
186 dev = malloc(sizeof *dev);
190 dev->device_context = device_context;
192 ret = asprintf(&dev_path, "/dev/ucm%d", index);
196 dev->fd = open(dev_path, O_RDWR);
210 void ib_cm_close_device(struct ib_cm_device *device)
216 static void ib_cm_free_id(struct cm_id_private *cm_id_priv)
218 pthread_cond_destroy(&cm_id_priv->cond);
219 pthread_mutex_destroy(&cm_id_priv->mut);
223 static struct cm_id_private *ib_cm_alloc_id(struct ib_cm_device *device,
226 struct cm_id_private *cm_id_priv;
228 cm_id_priv = malloc(sizeof *cm_id_priv);
232 memset(cm_id_priv, 0, sizeof *cm_id_priv);
233 cm_id_priv->id.device = device;
234 cm_id_priv->id.context = context;
235 pthread_mutex_init(&cm_id_priv->mut, NULL);
236 if (pthread_cond_init(&cm_id_priv->cond, NULL))
241 err: ib_cm_free_id(cm_id_priv);
245 int ib_cm_create_id(struct ib_cm_device *device,
246 struct ib_cm_id **cm_id, void *context)
248 struct ib_ucm_create_id_resp *resp;
249 struct ib_ucm_create_id *cmd;
250 struct cm_id_private *cm_id_priv;
255 cm_id_priv = ib_cm_alloc_id(device, context);
259 CM_CREATE_MSG_CMD_RESP(msg, cmd, resp, IB_USER_CM_CMD_CREATE_ID, size);
260 cmd->uid = (uintptr_t) cm_id_priv;
262 result = write(device->fd, msg, size);
266 VALGRIND_MAKE_MEM_DEFINED(resp, sizeof *resp);
268 cm_id_priv->id.handle = resp->id;
269 *cm_id = &cm_id_priv->id;
272 err: ib_cm_free_id(cm_id_priv);
276 int ib_cm_destroy_id(struct ib_cm_id *cm_id)
278 struct ib_ucm_destroy_id_resp *resp;
279 struct ib_ucm_destroy_id *cmd;
280 struct cm_id_private *cm_id_priv;
285 CM_CREATE_MSG_CMD_RESP(msg, cmd, resp, IB_USER_CM_CMD_DESTROY_ID, size);
286 cmd->id = cm_id->handle;
288 result = write(cm_id->device->fd, msg, size);
290 return (result >= 0) ? ERR(ENODATA) : -1;
292 VALGRIND_MAKE_MEM_DEFINED(resp, sizeof *resp);
294 cm_id_priv = container_of(cm_id, struct cm_id_private, id);
296 pthread_mutex_lock(&cm_id_priv->mut);
297 while (cm_id_priv->events_completed < resp->events_reported)
298 pthread_cond_wait(&cm_id_priv->cond, &cm_id_priv->mut);
299 pthread_mutex_unlock(&cm_id_priv->mut);
301 ib_cm_free_id(cm_id_priv);
305 int ib_cm_attr_id(struct ib_cm_id *cm_id, struct ib_cm_attr_param *param)
307 struct ib_ucm_attr_id_resp *resp;
308 struct ib_ucm_attr_id *cmd;
316 CM_CREATE_MSG_CMD_RESP(msg, cmd, resp, IB_USER_CM_CMD_ATTR_ID, size);
317 cmd->id = cm_id->handle;
319 result = write(cm_id->device->fd, msg, size);
321 return (result >= 0) ? ERR(ENODATA) : -1;
323 VALGRIND_MAKE_MEM_DEFINED(resp, sizeof *resp);
325 param->service_id = resp->service_id;
326 param->service_mask = resp->service_mask;
327 param->local_id = resp->local_id;
328 param->remote_id = resp->remote_id;
332 int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
333 struct ibv_qp_attr *qp_attr,
336 struct ibv_kern_qp_attr *resp;
337 struct ib_ucm_init_qp_attr *cmd;
342 if (!qp_attr || !qp_attr_mask)
345 CM_CREATE_MSG_CMD_RESP(msg, cmd, resp, IB_USER_CM_CMD_INIT_QP_ATTR, size);
346 cmd->id = cm_id->handle;
347 cmd->qp_state = qp_attr->qp_state;
349 result = write(cm_id->device->fd, msg, size);
351 return (result >= 0) ? ERR(ENODATA) : result;
353 VALGRIND_MAKE_MEM_DEFINED(resp, sizeof *resp);
355 *qp_attr_mask = resp->qp_attr_mask;
356 ibv_copy_qp_attr_from_kern(qp_attr, resp);
361 int ib_cm_listen(struct ib_cm_id *cm_id,
365 struct ib_ucm_listen *cmd;
370 CM_CREATE_MSG_CMD(msg, cmd, IB_USER_CM_CMD_LISTEN, size);
371 cmd->id = cm_id->handle;
372 cmd->service_id = service_id;
373 cmd->service_mask = service_mask;
375 result = write(cm_id->device->fd, msg, size);
377 return (result >= 0) ? ERR(ENODATA) : -1;
382 int ib_cm_send_req(struct ib_cm_id *cm_id, struct ib_cm_req_param *param)
384 struct ib_user_path_rec p_path;
385 struct ib_user_path_rec *a_path;
386 struct ib_ucm_req *cmd;
391 if (!param || !param->primary_path)
394 CM_CREATE_MSG_CMD(msg, cmd, IB_USER_CM_CMD_SEND_REQ, size);
395 cmd->id = cm_id->handle;
396 cmd->qpn = param->qp_num;
397 cmd->qp_type = param->qp_type;
398 cmd->psn = param->starting_psn;
399 cmd->sid = param->service_id;
400 cmd->peer_to_peer = param->peer_to_peer;
401 cmd->responder_resources = param->responder_resources;
402 cmd->initiator_depth = param->initiator_depth;
403 cmd->remote_cm_response_timeout = param->remote_cm_response_timeout;
404 cmd->flow_control = param->flow_control;
405 cmd->local_cm_response_timeout = param->local_cm_response_timeout;
406 cmd->retry_count = param->retry_count;
407 cmd->rnr_retry_count = param->rnr_retry_count;
408 cmd->max_cm_retries = param->max_cm_retries;
409 cmd->srq = param->srq;
411 ibv_copy_path_rec_to_kern(&p_path, param->primary_path);
412 cmd->primary_path = (uintptr_t) &p_path;
414 if (param->alternate_path) {
415 a_path = alloca(sizeof(*a_path));
419 ibv_copy_path_rec_to_kern(a_path, param->alternate_path);
420 cmd->alternate_path = (uintptr_t) a_path;
423 if (param->private_data && param->private_data_len) {
424 cmd->data = (uintptr_t) param->private_data;
425 cmd->len = param->private_data_len;
428 result = write(cm_id->device->fd, msg, size);
430 return (result >= 0) ? ERR(ENODATA) : -1;
435 int ib_cm_send_rep(struct ib_cm_id *cm_id, struct ib_cm_rep_param *param)
437 struct ib_ucm_rep *cmd;
445 CM_CREATE_MSG_CMD(msg, cmd, IB_USER_CM_CMD_SEND_REP, size);
446 cmd->uid = (uintptr_t) container_of(cm_id, struct cm_id_private, id);
447 cmd->id = cm_id->handle;
448 cmd->qpn = param->qp_num;
449 cmd->psn = param->starting_psn;
450 cmd->responder_resources = param->responder_resources;
451 cmd->initiator_depth = param->initiator_depth;
452 cmd->target_ack_delay = param->target_ack_delay;
453 cmd->failover_accepted = param->failover_accepted;
454 cmd->flow_control = param->flow_control;
455 cmd->rnr_retry_count = param->rnr_retry_count;
456 cmd->srq = param->srq;
458 if (param->private_data && param->private_data_len) {
459 cmd->data = (uintptr_t) param->private_data;
460 cmd->len = param->private_data_len;
463 result = write(cm_id->device->fd, msg, size);
465 return (result >= 0) ? ERR(ENODATA) : -1;
470 static inline int cm_send_private_data(struct ib_cm_id *cm_id,
473 uint8_t private_data_len)
475 struct ib_ucm_private_data *cmd;
480 CM_CREATE_MSG_CMD(msg, cmd, type, size);
481 cmd->id = cm_id->handle;
483 if (private_data && private_data_len) {
484 cmd->data = (uintptr_t) private_data;
485 cmd->len = private_data_len;
488 result = write(cm_id->device->fd, msg, size);
490 return (result >= 0) ? ERR(ENODATA) : -1;
495 int ib_cm_send_rtu(struct ib_cm_id *cm_id,
497 uint8_t private_data_len)
499 return cm_send_private_data(cm_id, IB_USER_CM_CMD_SEND_RTU,
500 private_data, private_data_len);
503 int ib_cm_send_dreq(struct ib_cm_id *cm_id,
505 uint8_t private_data_len)
507 return cm_send_private_data(cm_id, IB_USER_CM_CMD_SEND_DREQ,
508 private_data, private_data_len);
511 int ib_cm_send_drep(struct ib_cm_id *cm_id,
513 uint8_t private_data_len)
515 return cm_send_private_data(cm_id, IB_USER_CM_CMD_SEND_DREP,
516 private_data, private_data_len);
519 static int cm_establish(struct ib_cm_id *cm_id)
521 /* In kernel ABI 4 ESTABLISH was repurposed as NOTIFY and gained an
522 extra field. For some reason the compat definitions were deleted
523 from the uapi headers :( */
524 #define IB_USER_CM_CMD_ESTABLISH IB_USER_CM_CMD_NOTIFY
525 struct cm_abi_establish { /* ABI 4 support */
529 struct cm_abi_establish *cmd;
534 CM_CREATE_MSG_CMD(msg, cmd, IB_USER_CM_CMD_ESTABLISH, size);
535 cmd->id = cm_id->handle;
537 result = write(cm_id->device->fd, msg, size);
539 return (result >= 0) ? ERR(ENODATA) : -1;
544 int ib_cm_notify(struct ib_cm_id *cm_id, enum ibv_event_type event)
546 struct ib_ucm_notify *cmd;
552 if (event == IBV_EVENT_COMM_EST)
553 return cm_establish(cm_id);
558 CM_CREATE_MSG_CMD(msg, cmd, IB_USER_CM_CMD_NOTIFY, size);
559 cmd->id = cm_id->handle;
562 result = write(cm_id->device->fd, msg, size);
564 return (result >= 0) ? ERR(ENODATA) : -1;
569 static inline int cm_send_status(struct ib_cm_id *cm_id,
575 uint8_t private_data_len)
577 struct ib_ucm_info *cmd;
582 CM_CREATE_MSG_CMD(msg, cmd, type, size);
583 cmd->id = cm_id->handle;
584 cmd->status = status;
586 if (private_data && private_data_len) {
587 cmd->data = (uintptr_t) private_data;
588 cmd->data_len = private_data_len;
591 if (info && info_length) {
592 cmd->info = (uintptr_t) info;
593 cmd->info_len = info_length;
596 result = write(cm_id->device->fd, msg, size);
598 return (result >= 0) ? ERR(ENODATA) : -1;
603 int ib_cm_send_rej(struct ib_cm_id *cm_id,
604 enum ib_cm_rej_reason reason,
608 uint8_t private_data_len)
610 return cm_send_status(cm_id, IB_USER_CM_CMD_SEND_REJ, reason,
612 private_data, private_data_len);
615 int ib_cm_send_apr(struct ib_cm_id *cm_id,
616 enum ib_cm_apr_status status,
620 uint8_t private_data_len)
622 return cm_send_status(cm_id, IB_USER_CM_CMD_SEND_APR, status,
624 private_data, private_data_len);
627 int ib_cm_send_mra(struct ib_cm_id *cm_id,
628 uint8_t service_timeout,
630 uint8_t private_data_len)
632 struct ib_ucm_mra *cmd;
637 CM_CREATE_MSG_CMD(msg, cmd, IB_USER_CM_CMD_SEND_MRA, size);
638 cmd->id = cm_id->handle;
639 cmd->timeout = service_timeout;
641 if (private_data && private_data_len) {
642 cmd->data = (uintptr_t) private_data;
643 cmd->len = private_data_len;
646 result = write(cm_id->device->fd, msg, size);
648 return (result >= 0) ? ERR(ENODATA) : result;
653 int ib_cm_send_lap(struct ib_cm_id *cm_id,
654 struct ibv_sa_path_rec *alternate_path,
656 uint8_t private_data_len)
658 struct ib_user_path_rec abi_path;
659 struct ib_ucm_lap *cmd;
664 CM_CREATE_MSG_CMD(msg, cmd, IB_USER_CM_CMD_SEND_LAP, size);
665 cmd->id = cm_id->handle;
667 ibv_copy_path_rec_to_kern(&abi_path, alternate_path);
668 cmd->path = (uintptr_t) &abi_path;
670 if (private_data && private_data_len) {
671 cmd->data = (uintptr_t) private_data;
672 cmd->len = private_data_len;
675 result = write(cm_id->device->fd, msg, size);
677 return (result >= 0) ? ERR(ENODATA) : -1;
682 int ib_cm_send_sidr_req(struct ib_cm_id *cm_id,
683 struct ib_cm_sidr_req_param *param)
685 struct ib_user_path_rec abi_path;
686 struct ib_ucm_sidr_req *cmd;
691 if (!param || !param->path)
694 CM_CREATE_MSG_CMD(msg, cmd, IB_USER_CM_CMD_SEND_SIDR_REQ, size);
695 cmd->id = cm_id->handle;
696 cmd->sid = param->service_id;
697 cmd->timeout = param->timeout_ms;
698 cmd->max_cm_retries = param->max_cm_retries;
700 ibv_copy_path_rec_to_kern(&abi_path, param->path);
701 cmd->path = (uintptr_t) &abi_path;
703 if (param->private_data && param->private_data_len) {
704 cmd->data = (uintptr_t) param->private_data;
705 cmd->len = param->private_data_len;
708 result = write(cm_id->device->fd, msg, size);
710 return (result >= 0) ? ERR(ENODATA) : result;
715 int ib_cm_send_sidr_rep(struct ib_cm_id *cm_id,
716 struct ib_cm_sidr_rep_param *param)
718 struct ib_ucm_sidr_rep *cmd;
726 CM_CREATE_MSG_CMD(msg, cmd, IB_USER_CM_CMD_SEND_SIDR_REP, size);
727 cmd->id = cm_id->handle;
728 cmd->qpn = param->qp_num;
729 cmd->qkey = param->qkey;
730 cmd->status = param->status;
732 if (param->private_data && param->private_data_len) {
733 cmd->data = (uintptr_t) param->private_data;
734 cmd->data_len = param->private_data_len;
737 if (param->info && param->info_length) {
738 cmd->info = (uintptr_t) param->info;
739 cmd->info_len = param->info_length;
742 result = write(cm_id->device->fd, msg, size);
744 return (result >= 0) ? ERR(ENODATA) : -1;
749 static void cm_event_req_get(struct ib_cm_req_event_param *ureq,
750 struct ib_ucm_req_event_resp *kreq)
752 ureq->remote_ca_guid = kreq->remote_ca_guid;
753 ureq->remote_qkey = kreq->remote_qkey;
754 ureq->remote_qpn = kreq->remote_qpn;
755 ureq->qp_type = kreq->qp_type;
756 ureq->starting_psn = kreq->starting_psn;
757 ureq->responder_resources = kreq->responder_resources;
758 ureq->initiator_depth = kreq->initiator_depth;
759 ureq->local_cm_response_timeout = kreq->local_cm_response_timeout;
760 ureq->flow_control = kreq->flow_control;
761 ureq->remote_cm_response_timeout = kreq->remote_cm_response_timeout;
762 ureq->retry_count = kreq->retry_count;
763 ureq->rnr_retry_count = kreq->rnr_retry_count;
764 ureq->srq = kreq->srq;
765 ureq->port = kreq->port;
767 ibv_copy_path_rec_from_kern(ureq->primary_path, &kreq->primary_path);
768 if (ureq->alternate_path)
769 ibv_copy_path_rec_from_kern(ureq->alternate_path,
770 &kreq->alternate_path);
773 static void cm_event_rep_get(struct ib_cm_rep_event_param *urep,
774 struct ib_ucm_rep_event_resp *krep)
776 urep->remote_ca_guid = krep->remote_ca_guid;
777 urep->remote_qkey = krep->remote_qkey;
778 urep->remote_qpn = krep->remote_qpn;
779 urep->starting_psn = krep->starting_psn;
780 urep->responder_resources = krep->responder_resources;
781 urep->initiator_depth = krep->initiator_depth;
782 urep->target_ack_delay = krep->target_ack_delay;
783 urep->failover_accepted = krep->failover_accepted;
784 urep->flow_control = krep->flow_control;
785 urep->rnr_retry_count = krep->rnr_retry_count;
786 urep->srq = krep->srq;
789 static void cm_event_sidr_rep_get(struct ib_cm_sidr_rep_event_param *urep,
790 struct ib_ucm_sidr_rep_event_resp *krep)
792 urep->status = krep->status;
793 urep->qkey = krep->qkey;
794 urep->qpn = krep->qpn;
797 int ib_cm_get_event(struct ib_cm_device *device, struct ib_cm_event **event)
799 struct cm_id_private *cm_id_priv;
800 struct ib_ucm_cmd_hdr *hdr;
801 struct ib_ucm_event_get *cmd;
802 struct ib_ucm_event_resp *resp;
803 struct ib_cm_event *evt = NULL;
804 struct ibv_sa_path_rec *path_a = NULL;
805 struct ibv_sa_path_rec *path_b = NULL;
815 size = sizeof(*hdr) + sizeof(*cmd);
821 cmd = msg + sizeof(*hdr);
823 hdr->cmd = IB_USER_CM_CMD_EVENT;
824 hdr->in = sizeof(*cmd);
825 hdr->out = sizeof(*resp);
827 memset(cmd, 0, sizeof(*cmd));
829 resp = alloca(sizeof(*resp));
833 cmd->response = (uintptr_t) resp;
834 cmd->data_len = (uint8_t)(~0U);
835 cmd->info_len = (uint8_t)(~0U);
837 data = malloc(cmd->data_len);
839 result = ERR(ENOMEM);
843 info = malloc(cmd->info_len);
845 result = ERR(ENOMEM);
849 cmd->data = (uintptr_t) data;
850 cmd->info = (uintptr_t) info;
852 result = write(device->fd, msg, size);
853 if (result != size) {
854 result = (result >= 0) ? ERR(ENODATA) : -1;
858 VALGRIND_MAKE_MEM_DEFINED(resp, sizeof *resp);
863 evt = malloc(sizeof(*evt));
865 result = ERR(ENOMEM);
868 memset(evt, 0, sizeof(*evt));
869 evt->cm_id = (void *) (uintptr_t) resp->uid;
870 evt->event = resp->event;
872 if (resp->present & IB_UCM_PRES_PRIMARY) {
873 path_a = malloc(sizeof(*path_a));
875 result = ERR(ENOMEM);
880 if (resp->present & IB_UCM_PRES_ALTERNATE) {
881 path_b = malloc(sizeof(*path_b));
883 result = ERR(ENOMEM);
888 switch (evt->event) {
889 case IB_CM_REQ_RECEIVED:
890 evt->param.req_rcvd.listen_id = evt->cm_id;
891 cm_id_priv = ib_cm_alloc_id(evt->cm_id->device,
892 evt->cm_id->context);
894 result = ERR(ENOMEM);
897 cm_id_priv->id.handle = resp->id;
898 evt->cm_id = &cm_id_priv->id;
899 evt->param.req_rcvd.primary_path = path_a;
900 evt->param.req_rcvd.alternate_path = path_b;
903 cm_event_req_get(&evt->param.req_rcvd, &resp->u.req_resp);
905 case IB_CM_REP_RECEIVED:
906 cm_event_rep_get(&evt->param.rep_rcvd, &resp->u.rep_resp);
908 case IB_CM_MRA_RECEIVED:
909 evt->param.mra_rcvd.service_timeout = resp->u.mra_resp.timeout;
911 case IB_CM_REJ_RECEIVED:
912 evt->param.rej_rcvd.reason = resp->u.rej_resp.reason;
913 evt->param.rej_rcvd.ari = info;
916 case IB_CM_LAP_RECEIVED:
917 evt->param.lap_rcvd.alternate_path = path_b;
919 ibv_copy_path_rec_from_kern(evt->param.lap_rcvd.alternate_path,
920 &resp->u.lap_resp.path);
922 case IB_CM_APR_RECEIVED:
923 evt->param.apr_rcvd.ap_status = resp->u.apr_resp.status;
924 evt->param.apr_rcvd.apr_info = info;
927 case IB_CM_SIDR_REQ_RECEIVED:
928 evt->param.sidr_req_rcvd.listen_id = evt->cm_id;
929 cm_id_priv = ib_cm_alloc_id(evt->cm_id->device,
930 evt->cm_id->context);
932 result = ERR(ENOMEM);
935 cm_id_priv->id.handle = resp->id;
936 evt->cm_id = &cm_id_priv->id;
937 evt->param.sidr_req_rcvd.pkey = resp->u.sidr_req_resp.pkey;
938 evt->param.sidr_req_rcvd.port = resp->u.sidr_req_resp.port;
940 case IB_CM_SIDR_REP_RECEIVED:
941 cm_event_sidr_rep_get(&evt->param.sidr_rep_rcvd,
942 &resp->u.sidr_rep_resp);
943 evt->param.sidr_rep_rcvd.info = info;
947 evt->param.send_status = resp->u.send_status;
951 if (resp->present & IB_UCM_PRES_DATA) {
952 evt->private_data = data;
974 int ib_cm_ack_event(struct ib_cm_event *event)
976 struct cm_id_private *cm_id_priv;
981 if (event->private_data)
982 free(event->private_data);
984 cm_id_priv = container_of(event->cm_id, struct cm_id_private, id);
986 switch (event->event) {
987 case IB_CM_REQ_RECEIVED:
988 cm_id_priv = container_of(event->param.req_rcvd.listen_id,
989 struct cm_id_private, id);
990 free(event->param.req_rcvd.primary_path);
991 if (event->param.req_rcvd.alternate_path)
992 free(event->param.req_rcvd.alternate_path);
994 case IB_CM_REJ_RECEIVED:
995 if (event->param.rej_rcvd.ari)
996 free(event->param.rej_rcvd.ari);
998 case IB_CM_LAP_RECEIVED:
999 free(event->param.lap_rcvd.alternate_path);
1001 case IB_CM_APR_RECEIVED:
1002 if (event->param.apr_rcvd.apr_info)
1003 free(event->param.apr_rcvd.apr_info);
1005 case IB_CM_SIDR_REQ_RECEIVED:
1006 cm_id_priv = container_of(event->param.sidr_req_rcvd.listen_id,
1007 struct cm_id_private, id);
1009 case IB_CM_SIDR_REP_RECEIVED:
1010 if (event->param.sidr_rep_rcvd.info)
1011 free(event->param.sidr_rep_rcvd.info);
1016 pthread_mutex_lock(&cm_id_priv->mut);
1017 cm_id_priv->events_completed++;
1018 pthread_cond_signal(&cm_id_priv->cond);
1019 pthread_mutex_unlock(&cm_id_priv->mut);