2 * Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
3 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
4 * Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
5 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
6 * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
7 * Copyright (c) 2005 Network Appliance, Inc. All rights reserved.
9 * This software is available to you under a choice of one of two
10 * licenses. You may choose to be licensed under the terms of the GNU
11 * General Public License (GPL) Version 2, available from the file
12 * COPYING in the main directory of this source tree, or the
13 * OpenIB.org BSD license below:
15 * Redistribution and use in source and binary forms, with or
16 * without modification, are permitted provided that the following
19 * - Redistributions of source code must retain the above
20 * copyright notice, this list of conditions and the following
23 * - Redistributions in binary form must reproduce the above
24 * copyright notice, this list of conditions and the following
25 * disclaimer in the documentation and/or other materials
26 * provided with the distribution.
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
29 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
30 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
31 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
32 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
33 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
34 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
39 #include <sys/cdefs.h>
40 __FBSDID("$FreeBSD$");
42 #include <sys/types.h>
43 #include <sys/param.h>
44 #include <sys/systm.h>
46 #include <sys/kernel.h>
47 #include <sys/libkern.h>
48 #include <sys/socket.h>
49 #include <sys/socketvar.h>
50 #include <sys/module.h>
52 #include <sys/mutex.h>
53 #include <sys/rwlock.h>
54 #include <sys/queue.h>
55 #include <sys/taskqueue.h>
57 #include <sys/syslog.h>
58 #include <sys/malloc.h>
60 #include <netinet/in.h>
61 #include <netinet/in_pcb.h>
63 #include <contrib/rdma/iw_cm.h>
66 IW_CM_STATE_IDLE, /* unbound, inactive */
67 IW_CM_STATE_LISTEN, /* listen waiting for connect */
68 IW_CM_STATE_CONN_RECV, /* inbound waiting for user accept */
69 IW_CM_STATE_CONN_SENT, /* outbound waiting for peer accept */
70 IW_CM_STATE_ESTABLISHED, /* established */
71 IW_CM_STATE_CLOSING, /* disconnect */
72 IW_CM_STATE_DESTROYING /* object being deleted */
75 struct iwcm_id_private {
77 enum iw_cm_state state;
82 TAILQ_HEAD(, iwcm_work) work_list;
84 volatile int refcount;
85 TAILQ_HEAD(, iwcm_work) work_free_list;
88 #define IWCM_F_CALLBACK_DESTROY 1
89 #define IWCM_F_CONNECT_WAIT 2
91 static struct taskqueue *iwcm_wq;
94 struct iwcm_id_private *cm_id;
95 TAILQ_ENTRY(iwcm_work) list;
96 struct iw_cm_event event;
97 TAILQ_ENTRY(iwcm_work) free_list;
101 * The following services provide a mechanism for pre-allocating iwcm_work
102 * elements. The design pre-allocates them based on the cm_id type:
103 * LISTENING IDS: Get enough elements preallocated to handle the
105 * ACTIVE IDS: 4: CONNECT_REPLY, ESTABLISHED, DISCONNECT, CLOSE
106 * PASSIVE IDS: 3: ESTABLISHED, DISCONNECT, CLOSE
108 * Allocating them in connect and listen avoids having to deal
109 * with allocation failures on the event upcall from the provider (which
110 * is called in the interrupt context).
112 * One exception is when creating the cm_id for incoming connection requests.
113 * There are two cases:
114 * 1) in the event upcall, cm_event_handler(), for a listening cm_id. If
115 * the backlog is exceeded, then no more connection request events will
116 * be processed. cm_event_handler() returns ENOMEM in this case. Its up
117 * to the provider to reject the connection request.
118 * 2) in the connection request workqueue handler, cm_conn_req_handler().
119 * If work elements cannot be allocated for the new connect request cm_id,
120 * then IWCM will call the provider reject method. This is ok since
121 * cm_conn_req_handler() runs in the workqueue thread context.
124 static struct iwcm_work *get_work(struct iwcm_id_private *cm_id_priv)
126 struct iwcm_work *work;
128 if (TAILQ_EMPTY(&cm_id_priv->work_free_list))
130 work = TAILQ_FIRST(&cm_id_priv->work_free_list);
131 TAILQ_REMOVE(&cm_id_priv->work_free_list, work, free_list);
135 static void put_work(struct iwcm_work *work)
137 TAILQ_INSERT_HEAD(&work->cm_id->work_free_list, work, free_list);
140 static void dealloc_work_entries(struct iwcm_id_private *cm_id_priv)
142 struct iwcm_work *e, *tmp;
144 TAILQ_FOREACH_SAFE(e, &cm_id_priv->work_free_list, free_list, tmp)
148 static int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count)
150 struct iwcm_work *work;
152 PANIC_IF(!TAILQ_EMPTY(&cm_id_priv->work_free_list));
154 work = malloc(sizeof(struct iwcm_work), M_DEVBUF, M_NOWAIT);
156 dealloc_work_entries(cm_id_priv);
159 work->cm_id = cm_id_priv;
166 * Save private data from incoming connection requests to
167 * iw_cm_event, so the low level driver doesn't have to. Adjust
168 * the event ptr to point to the local copy.
170 static int copy_private_data(struct iw_cm_event *event)
174 p = malloc(event->private_data_len, M_DEVBUF, M_NOWAIT);
177 bcopy(event->private_data, p, event->private_data_len);
178 event->private_data = p;
182 static void free_cm_id(struct iwcm_id_private *cm_id_priv)
184 dealloc_work_entries(cm_id_priv);
185 free(cm_id_priv, M_DEVBUF);
189 * Release a reference on cm_id. If the last reference is being
190 * released, enable the waiting thread (in iw_destroy_cm_id) to
191 * get woken up, and return 1 if a thread is already waiting.
193 static int iwcm_deref_id(struct iwcm_id_private *cm_id_priv)
195 mtx_lock(&cm_id_priv->lock);
196 PANIC_IF(atomic_load_acq_int(&cm_id_priv->refcount)==0);
197 if (atomic_fetchadd_int(&cm_id_priv->refcount, -1) == 1) {
198 PANIC_IF(!TAILQ_EMPTY(&cm_id_priv->work_list));
199 wakeup(&cm_id_priv->destroy_comp);
200 mtx_unlock(&cm_id_priv->lock);
203 mtx_unlock(&cm_id_priv->lock);
208 static void add_ref(struct iw_cm_id *cm_id)
210 struct iwcm_id_private *cm_id_priv;
211 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
212 mtx_lock(&cm_id_priv->lock);
213 atomic_add_int(&cm_id_priv->refcount, 1);
214 mtx_unlock(&cm_id_priv->lock);
217 static void rem_ref(struct iw_cm_id *cm_id)
219 struct iwcm_id_private *cm_id_priv;
220 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
221 if (iwcm_deref_id(cm_id_priv) &&
222 isset(&cm_id_priv->flags, IWCM_F_CALLBACK_DESTROY)) {
223 PANIC_IF(!TAILQ_EMPTY(&cm_id_priv->work_list));
224 free_cm_id(cm_id_priv);
228 static int cm_event_handler(struct iw_cm_id *cm_id, struct iw_cm_event *event);
230 struct iw_cm_id *iw_create_cm_id(struct ib_device *device,
232 iw_cm_handler cm_handler,
235 struct iwcm_id_private *cm_id_priv;
237 KASSERT(so, ("iw_create_cm_id called with NULL socket!"));
238 cm_id_priv = malloc(sizeof(*cm_id_priv), M_DEVBUF, M_NOWAIT);
240 return ERR_PTR(ENOMEM);
241 bzero(cm_id_priv, sizeof *cm_id_priv);
243 cm_id_priv->state = IW_CM_STATE_IDLE;
244 cm_id_priv->id.device = device;
245 cm_id_priv->id.cm_handler = cm_handler;
246 cm_id_priv->id.context = context;
247 cm_id_priv->id.event_handler = cm_event_handler;
248 cm_id_priv->id.add_ref = add_ref;
249 cm_id_priv->id.rem_ref = rem_ref;
250 cm_id_priv->id.so = so;
251 mtx_init(&cm_id_priv->lock, "cm_id_priv", NULL, MTX_DUPOK|MTX_DEF);
252 atomic_store_rel_int(&cm_id_priv->refcount, 1);
253 TAILQ_INIT(&cm_id_priv->work_list);
254 TAILQ_INIT(&cm_id_priv->work_free_list);
256 return &cm_id_priv->id;
260 static int iwcm_modify_qp_err(struct ib_qp *qp)
262 struct ib_qp_attr qp_attr;
267 qp_attr.qp_state = IB_QPS_ERR;
268 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
272 * This is really the RDMAC CLOSING state. It is most similar to the
275 static int iwcm_modify_qp_sqd(struct ib_qp *qp)
277 struct ib_qp_attr qp_attr;
279 PANIC_IF(qp == NULL);
280 qp_attr.qp_state = IB_QPS_SQD;
281 return ib_modify_qp(qp, &qp_attr, IB_QP_STATE);
287 * Block if a passive or active connection is currently being processed. Then
288 * process the event as follows:
289 * - If we are ESTABLISHED, move to CLOSING and modify the QP state
290 * based on the abrupt flag
291 * - If the connection is already in the CLOSING or IDLE state, the peer is
292 * disconnecting concurrently with us and we've already seen the
293 * DISCONNECT event -- ignore the request and return 0
294 * - Disconnect on a listening endpoint returns EINVAL
296 int iw_cm_disconnect(struct iw_cm_id *cm_id, int abrupt)
298 struct iwcm_id_private *cm_id_priv;
300 struct ib_qp *qp = NULL;
302 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
303 /* Wait if we're currently in a connect or accept downcall */
304 mtx_lock(&cm_id_priv->lock);
305 if (isset(&cm_id_priv->flags, IWCM_F_CONNECT_WAIT))
306 msleep(&cm_id_priv->connect_wait, &cm_id_priv->lock, 0, "iwcm connect1", 0);
308 switch (cm_id_priv->state) {
309 case IW_CM_STATE_ESTABLISHED:
310 cm_id_priv->state = IW_CM_STATE_CLOSING;
312 /* QP could be <nul> for user-mode client */
318 case IW_CM_STATE_LISTEN:
321 case IW_CM_STATE_CLOSING:
322 /* remote peer closed first */
323 case IW_CM_STATE_IDLE:
324 /* accept or connect returned !0 */
326 case IW_CM_STATE_CONN_RECV:
328 * App called disconnect before/without calling accept after
329 * connect_request event delivered.
332 case IW_CM_STATE_CONN_SENT:
333 /* Can only get here if wait above fails */
337 mtx_unlock(&cm_id_priv->lock);
341 ret = iwcm_modify_qp_err(qp);
343 ret = iwcm_modify_qp_sqd(qp);
346 * If both sides are disconnecting the QP could
347 * already be in ERR or SQD states
356 * CM_ID <-- DESTROYING
358 * Clean up all resources associated with the connection and release
359 * the initial reference taken by iw_create_cm_id.
361 static void destroy_cm_id(struct iw_cm_id *cm_id)
363 struct iwcm_id_private *cm_id_priv;
366 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
368 * Wait if we're currently in a connect or accept downcall. A
369 * listening endpoint should never block here.
371 mtx_lock(&cm_id_priv->lock);
372 if (isset(&cm_id_priv->flags, IWCM_F_CONNECT_WAIT))
373 msleep(&cm_id_priv->connect_wait, &cm_id_priv->lock, 0, "iwcm connect2", 0);
375 switch (cm_id_priv->state) {
376 case IW_CM_STATE_LISTEN:
377 cm_id_priv->state = IW_CM_STATE_DESTROYING;
378 mtx_unlock(&cm_id_priv->lock);
379 /* destroy the listening endpoint */
380 ret = cm_id->device->iwcm->destroy_listen(cm_id);
381 mtx_lock(&cm_id_priv->lock);
383 case IW_CM_STATE_ESTABLISHED:
384 cm_id_priv->state = IW_CM_STATE_DESTROYING;
385 mtx_unlock(&cm_id_priv->lock);
386 /* Abrupt close of the connection */
387 (void)iwcm_modify_qp_err(cm_id_priv->qp);
388 mtx_lock(&cm_id_priv->lock);
390 case IW_CM_STATE_IDLE:
391 case IW_CM_STATE_CLOSING:
392 cm_id_priv->state = IW_CM_STATE_DESTROYING;
394 case IW_CM_STATE_CONN_RECV:
396 * App called destroy before/without calling accept after
397 * receiving connection request event notification or
398 * returned non zero from the event callback function.
399 * In either case, must tell the provider to reject.
401 cm_id_priv->state = IW_CM_STATE_DESTROYING;
403 case IW_CM_STATE_CONN_SENT:
404 case IW_CM_STATE_DESTROYING:
409 if (cm_id_priv->qp) {
410 cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp);
411 cm_id_priv->qp = NULL;
413 mtx_unlock(&cm_id_priv->lock);
415 (void)iwcm_deref_id(cm_id_priv);
419 * This function is only called by the application thread and cannot
420 * be called by the event thread. The function will wait for all
421 * references to be released on the cm_id and then free the cm_id
424 void iw_destroy_cm_id(struct iw_cm_id *cm_id)
426 struct iwcm_id_private *cm_id_priv;
428 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
429 PANIC_IF(isset(&cm_id_priv->flags, IWCM_F_CALLBACK_DESTROY));
431 destroy_cm_id(cm_id);
433 mtx_lock(&cm_id_priv->lock);
434 if (atomic_load_acq_int(&cm_id_priv->refcount))
435 msleep(&cm_id_priv->destroy_comp, &cm_id_priv->lock, 0, "iwcm destroy", 0);
436 mtx_unlock(&cm_id_priv->lock);
438 free_cm_id(cm_id_priv);
444 * Start listening for connect requests. Generates one CONNECT_REQUEST
445 * event for each inbound connect request.
447 int iw_cm_listen(struct iw_cm_id *cm_id, int backlog)
449 struct iwcm_id_private *cm_id_priv;
452 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
454 ret = alloc_work_entries(cm_id_priv, backlog);
458 mtx_lock(&cm_id_priv->lock);
459 switch (cm_id_priv->state) {
460 case IW_CM_STATE_IDLE:
461 cm_id_priv->state = IW_CM_STATE_LISTEN;
462 mtx_unlock(&cm_id_priv->lock);
463 ret = cm_id->device->iwcm->create_listen(cm_id, backlog);
465 cm_id_priv->state = IW_CM_STATE_IDLE;
466 mtx_lock(&cm_id_priv->lock);
471 mtx_unlock(&cm_id_priv->lock);
479 * Rejects an inbound connection request. No events are generated.
481 int iw_cm_reject(struct iw_cm_id *cm_id,
482 const void *private_data,
485 struct iwcm_id_private *cm_id_priv;
488 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
489 setbit(&cm_id_priv->flags, IWCM_F_CONNECT_WAIT);
491 mtx_lock(&cm_id_priv->lock);
492 if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) {
493 clrbit(&cm_id_priv->flags, IWCM_F_CONNECT_WAIT);
494 wakeup(&cm_id_priv->connect_wait);
495 mtx_unlock(&cm_id_priv->lock);
498 cm_id_priv->state = IW_CM_STATE_IDLE;
499 mtx_unlock(&cm_id_priv->lock);
501 ret = cm_id->device->iwcm->reject(cm_id, private_data,
504 mtx_lock(&cm_id_priv->lock);
505 clrbit(&cm_id_priv->flags, IWCM_F_CONNECT_WAIT);
506 wakeup(&cm_id_priv->connect_wait);
507 mtx_unlock(&cm_id_priv->lock);
513 * CM_ID <-- ESTABLISHED
515 * Accepts an inbound connection request and generates an ESTABLISHED
516 * event. Callers of iw_cm_disconnect and iw_destroy_cm_id will block
517 * until the ESTABLISHED event is received from the provider.
519 int iw_cm_accept(struct iw_cm_id *cm_id,
520 struct iw_cm_conn_param *iw_param)
522 struct iwcm_id_private *cm_id_priv;
526 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
527 setbit(&cm_id_priv->flags, IWCM_F_CONNECT_WAIT);
529 mtx_lock(&cm_id_priv->lock);
530 if (cm_id_priv->state != IW_CM_STATE_CONN_RECV) {
531 clrbit(&cm_id_priv->flags, IWCM_F_CONNECT_WAIT);
532 wakeup(&cm_id_priv->connect_wait);
533 mtx_unlock(&cm_id_priv->lock);
537 /* Get the ib_qp given the QPN */
538 qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn);
540 mtx_unlock(&cm_id_priv->lock);
543 cm_id->device->iwcm->add_ref(qp);
545 mtx_unlock(&cm_id_priv->lock);
547 ret = cm_id->device->iwcm->accept(cm_id, iw_param);
549 /* An error on accept precludes provider events */
550 PANIC_IF(cm_id_priv->state != IW_CM_STATE_CONN_RECV);
551 cm_id_priv->state = IW_CM_STATE_IDLE;
552 mtx_lock(&cm_id_priv->lock);
553 if (cm_id_priv->qp) {
554 cm_id->device->iwcm->rem_ref(qp);
555 cm_id_priv->qp = NULL;
557 clrbit(&cm_id_priv->flags, IWCM_F_CONNECT_WAIT);
558 wakeup(&cm_id_priv->connect_wait);
559 mtx_unlock(&cm_id_priv->lock);
566 * Active Side: CM_ID <-- CONN_SENT
568 * If successful, results in the generation of a CONNECT_REPLY
569 * event. iw_cm_disconnect and iw_cm_destroy will block until the
570 * CONNECT_REPLY event is received from the provider.
572 int iw_cm_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *iw_param)
574 struct iwcm_id_private *cm_id_priv;
578 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
580 ret = alloc_work_entries(cm_id_priv, 4);
584 setbit(&cm_id_priv->flags, IWCM_F_CONNECT_WAIT);
585 mtx_lock(&cm_id_priv->lock);
587 if (cm_id_priv->state != IW_CM_STATE_IDLE) {
588 clrbit(&cm_id_priv->flags, IWCM_F_CONNECT_WAIT);
589 wakeup(&cm_id_priv->connect_wait);
590 mtx_unlock(&cm_id_priv->lock);
595 /* Get the ib_qp given the QPN */
596 qp = cm_id->device->iwcm->get_qp(cm_id->device, iw_param->qpn);
598 mtx_unlock(&cm_id_priv->lock);
601 cm_id->device->iwcm->add_ref(qp);
603 cm_id_priv->state = IW_CM_STATE_CONN_SENT;
604 mtx_unlock(&cm_id_priv->lock);
606 ret = cm_id->device->iwcm->connect(cm_id, iw_param);
608 mtx_lock(&cm_id_priv->lock);
609 if (cm_id_priv->qp) {
610 cm_id->device->iwcm->rem_ref(qp);
611 cm_id_priv->qp = NULL;
613 PANIC_IF(cm_id_priv->state != IW_CM_STATE_CONN_SENT);
614 cm_id_priv->state = IW_CM_STATE_IDLE;
615 clrbit(&cm_id_priv->flags, IWCM_F_CONNECT_WAIT);
616 wakeup(&cm_id_priv->connect_wait);
617 mtx_unlock(&cm_id_priv->lock);
625 * Passive Side: new CM_ID <-- CONN_RECV
627 * Handles an inbound connect request. The function creates a new
628 * iw_cm_id to represent the new connection and inherits the client
629 * callback function and other attributes from the listening parent.
631 * The work item contains a pointer to the listen_cm_id and the event. The
632 * listen_cm_id contains the client cm_handler, context and
633 * device. These are copied when the device is cloned. The event
634 * contains the new four tuple.
636 * An error on the child should not affect the parent, so this
637 * function does not return a value.
639 static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv,
640 struct iw_cm_event *iw_event)
642 struct iw_cm_id *cm_id;
643 struct iwcm_id_private *cm_id_priv;
647 * The provider should never generate a connection request
648 * event with a bad status.
650 PANIC_IF(iw_event->status);
653 * We could be destroying the listening id. If so, ignore this
656 mtx_lock(&listen_id_priv->lock);
657 if (listen_id_priv->state != IW_CM_STATE_LISTEN) {
658 mtx_unlock(&listen_id_priv->lock);
661 mtx_unlock(&listen_id_priv->lock);
663 cm_id = iw_create_cm_id(listen_id_priv->id.device,
665 listen_id_priv->id.cm_handler,
666 listen_id_priv->id.context);
667 /* If the cm_id could not be created, ignore the request */
671 cm_id->provider_data = iw_event->provider_data;
672 cm_id->local_addr = iw_event->local_addr;
673 cm_id->remote_addr = iw_event->remote_addr;
675 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
676 cm_id_priv->state = IW_CM_STATE_CONN_RECV;
678 ret = alloc_work_entries(cm_id_priv, 3);
680 iw_cm_reject(cm_id, NULL, 0);
681 iw_destroy_cm_id(cm_id);
685 /* Call the client CM handler */
686 ret = cm_id->cm_handler(cm_id, iw_event);
688 iw_cm_reject(cm_id, NULL, 0);
689 setbit(&cm_id_priv->flags, IWCM_F_CALLBACK_DESTROY);
691 destroy_cm_id(cm_id);
692 if (atomic_load_acq_int(&cm_id_priv->refcount)==0)
693 free_cm_id(cm_id_priv);
697 if (iw_event->private_data_len)
698 free(iw_event->private_data, M_DEVBUF);
702 * Passive Side: CM_ID <-- ESTABLISHED
704 * The provider generated an ESTABLISHED event which means that
705 * the MPA negotion has completed successfully and we are now in MPA
708 * This event can only be received in the CONN_RECV state. If the
709 * remote peer closed, the ESTABLISHED event would be received followed
710 * by the CLOSE event. If the app closes, it will block until we wake
711 * it up after processing this event.
713 static int cm_conn_est_handler(struct iwcm_id_private *cm_id_priv,
714 struct iw_cm_event *iw_event)
718 mtx_lock(&cm_id_priv->lock);
721 * We clear the CONNECT_WAIT bit here to allow the callback
722 * function to call iw_cm_disconnect. Calling iw_destroy_cm_id
723 * from a callback handler is not allowed.
725 clrbit(&cm_id_priv->flags, IWCM_F_CONNECT_WAIT);
726 PANIC_IF(cm_id_priv->state != IW_CM_STATE_CONN_RECV);
727 cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
728 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
729 wakeup(&cm_id_priv->connect_wait);
730 mtx_unlock(&cm_id_priv->lock);
736 * Active Side: CM_ID <-- ESTABLISHED
738 * The app has called connect and is waiting for the established event to
739 * post it's requests to the server. This event will wake up anyone
740 * blocked in iw_cm_disconnect or iw_destroy_id.
742 static int cm_conn_rep_handler(struct iwcm_id_private *cm_id_priv,
743 struct iw_cm_event *iw_event)
747 mtx_lock(&cm_id_priv->lock);
749 * Clear the connect wait bit so a callback function calling
750 * iw_cm_disconnect will not wait and deadlock this thread
752 clrbit(&cm_id_priv->flags, IWCM_F_CONNECT_WAIT);
753 PANIC_IF(cm_id_priv->state != IW_CM_STATE_CONN_SENT);
754 if (iw_event->status == IW_CM_EVENT_STATUS_ACCEPTED) {
755 cm_id_priv->id.local_addr = iw_event->local_addr;
756 cm_id_priv->id.remote_addr = iw_event->remote_addr;
757 cm_id_priv->state = IW_CM_STATE_ESTABLISHED;
759 /* REJECTED or RESET */
760 cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp);
761 cm_id_priv->qp = NULL;
762 cm_id_priv->state = IW_CM_STATE_IDLE;
764 mtx_unlock(&cm_id_priv->lock);
765 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
767 mtx_lock(&cm_id_priv->lock);
768 if (iw_event->private_data_len)
769 free(iw_event->private_data, M_DEVBUF);
771 /* Wake up waiters on connect complete */
772 wakeup(&cm_id_priv->connect_wait);
773 mtx_unlock(&cm_id_priv->lock);
781 * If in the ESTABLISHED state, move to CLOSING.
783 static void cm_disconnect_handler(struct iwcm_id_private *cm_id_priv,
784 struct iw_cm_event *iw_event)
787 mtx_lock(&cm_id_priv->lock);
788 if (cm_id_priv->state == IW_CM_STATE_ESTABLISHED)
789 cm_id_priv->state = IW_CM_STATE_CLOSING;
790 mtx_unlock(&cm_id_priv->lock);
796 * If in the ESTBLISHED or CLOSING states, the QP will have have been
797 * moved by the provider to the ERR state. Disassociate the CM_ID from
798 * the QP, move to IDLE, and remove the 'connected' reference.
800 * If in some other state, the cm_id was destroyed asynchronously.
801 * This is the last reference that will result in waking up
802 * the app thread blocked in iw_destroy_cm_id.
804 static int cm_close_handler(struct iwcm_id_private *cm_id_priv,
805 struct iw_cm_event *iw_event)
808 mtx_lock(&cm_id_priv->lock);
810 if (cm_id_priv->qp) {
811 cm_id_priv->id.device->iwcm->rem_ref(cm_id_priv->qp);
812 cm_id_priv->qp = NULL;
814 switch (cm_id_priv->state) {
815 case IW_CM_STATE_ESTABLISHED:
816 case IW_CM_STATE_CLOSING:
817 cm_id_priv->state = IW_CM_STATE_IDLE;
818 mtx_unlock(&cm_id_priv->lock);
819 ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, iw_event);
820 mtx_lock(&cm_id_priv->lock);
822 case IW_CM_STATE_DESTROYING:
827 mtx_unlock(&cm_id_priv->lock);
832 static int process_event(struct iwcm_id_private *cm_id_priv,
833 struct iw_cm_event *iw_event)
837 switch (iw_event->event) {
838 case IW_CM_EVENT_CONNECT_REQUEST:
839 cm_conn_req_handler(cm_id_priv, iw_event);
841 case IW_CM_EVENT_CONNECT_REPLY:
842 ret = cm_conn_rep_handler(cm_id_priv, iw_event);
844 case IW_CM_EVENT_ESTABLISHED:
845 ret = cm_conn_est_handler(cm_id_priv, iw_event);
847 case IW_CM_EVENT_DISCONNECT:
848 cm_disconnect_handler(cm_id_priv, iw_event);
850 case IW_CM_EVENT_CLOSE:
851 ret = cm_close_handler(cm_id_priv, iw_event);
861 * Process events on the work_list for the cm_id. If the callback
862 * function requests that the cm_id be deleted, a flag is set in the
863 * cm_id flags to indicate that when the last reference is
864 * removed, the cm_id is to be destroyed. This is necessary to
865 * distinguish between an object that will be destroyed by the app
866 * thread asleep on the destroy_comp list vs. an object destroyed
867 * here synchronously when the last reference is removed.
869 static void cm_work_handler(void *context, int pending)
871 struct iwcm_work *work = context;
872 struct iw_cm_event levent;
873 struct iwcm_id_private *cm_id_priv = work->cm_id;
877 mtx_lock(&cm_id_priv->lock);
878 empty = TAILQ_EMPTY(&cm_id_priv->work_list);
880 work = TAILQ_FIRST(&cm_id_priv->work_list);
881 TAILQ_REMOVE(&cm_id_priv->work_list, work, list);
882 empty = TAILQ_EMPTY(&cm_id_priv->work_list);
883 levent = work->event;
885 mtx_unlock(&cm_id_priv->lock);
887 ret = process_event(cm_id_priv, &levent);
889 setbit(&cm_id_priv->flags, IWCM_F_CALLBACK_DESTROY);
890 destroy_cm_id(&cm_id_priv->id);
892 PANIC_IF(atomic_load_acq_int(&cm_id_priv->refcount)==0);
893 if (iwcm_deref_id(cm_id_priv)) {
894 if (isset(&cm_id_priv->flags,
895 IWCM_F_CALLBACK_DESTROY)) {
896 PANIC_IF(!TAILQ_EMPTY(&cm_id_priv->work_list));
897 free_cm_id(cm_id_priv);
901 mtx_lock(&cm_id_priv->lock);
903 mtx_unlock(&cm_id_priv->lock);
907 * This function is called on interrupt context. Schedule events on
908 * the iwcm_wq thread to allow callback functions to downcall into
909 * the CM and/or block. Events are queued to a per-CM_ID
910 * work_list. If this is the first event on the work_list, the work
911 * element is also queued on the iwcm_wq thread.
913 * Each event holds a reference on the cm_id. Until the last posted
914 * event has been delivered and processed, the cm_id cannot be
918 * 0 - the event was handled.
919 * ENOMEM - the event was not handled due to lack of resources.
921 static int cm_event_handler(struct iw_cm_id *cm_id,
922 struct iw_cm_event *iw_event)
924 struct iwcm_work *work;
925 struct iwcm_id_private *cm_id_priv;
928 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
930 mtx_lock(&cm_id_priv->lock);
931 work = get_work(cm_id_priv);
937 TASK_INIT(&work->task, 0, cm_work_handler, work);
938 work->cm_id = cm_id_priv;
939 work->event = *iw_event;
941 if ((work->event.event == IW_CM_EVENT_CONNECT_REQUEST ||
942 work->event.event == IW_CM_EVENT_CONNECT_REPLY) &&
943 work->event.private_data_len) {
944 ret = copy_private_data(&work->event);
951 atomic_add_acq_int(&cm_id_priv->refcount, 1);
952 if (TAILQ_EMPTY(&cm_id_priv->work_list)) {
953 TAILQ_INSERT_TAIL(&cm_id_priv->work_list, work, list);
954 taskqueue_enqueue(iwcm_wq, &work->task);
956 TAILQ_INSERT_TAIL(&cm_id_priv->work_list, work, list);
958 mtx_unlock(&cm_id_priv->lock);
962 static int iwcm_init_qp_init_attr(struct iwcm_id_private *cm_id_priv,
963 struct ib_qp_attr *qp_attr,
968 mtx_lock(&cm_id_priv->lock);
969 switch (cm_id_priv->state) {
970 case IW_CM_STATE_IDLE:
971 case IW_CM_STATE_CONN_SENT:
972 case IW_CM_STATE_CONN_RECV:
973 case IW_CM_STATE_ESTABLISHED:
974 *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS;
975 qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE |
976 IB_ACCESS_REMOTE_WRITE|
977 IB_ACCESS_REMOTE_READ;
984 mtx_unlock(&cm_id_priv->lock);
988 static int iwcm_init_qp_rts_attr(struct iwcm_id_private *cm_id_priv,
989 struct ib_qp_attr *qp_attr,
994 mtx_lock(&cm_id_priv->lock);
995 switch (cm_id_priv->state) {
996 case IW_CM_STATE_IDLE:
997 case IW_CM_STATE_CONN_SENT:
998 case IW_CM_STATE_CONN_RECV:
999 case IW_CM_STATE_ESTABLISHED:
1007 mtx_unlock(&cm_id_priv->lock);
1011 int iw_cm_init_qp_attr(struct iw_cm_id *cm_id,
1012 struct ib_qp_attr *qp_attr,
1015 struct iwcm_id_private *cm_id_priv;
1018 cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
1019 switch (qp_attr->qp_state) {
1022 ret = iwcm_init_qp_init_attr(cm_id_priv,
1023 qp_attr, qp_attr_mask);
1026 ret = iwcm_init_qp_rts_attr(cm_id_priv,
1027 qp_attr, qp_attr_mask);
1036 static int iw_cm_init(void)
1038 iwcm_wq = taskqueue_create("iw_cm_wq", M_NOWAIT, taskqueue_thread_enqueue, &iwcm_wq);
1042 taskqueue_start_threads(&iwcm_wq, 1, PI_NET, "iw_cm_wq thread");
1046 static void iw_cm_cleanup(void)
1048 taskqueue_free(iwcm_wq);
1052 iw_cm_load(module_t mod, int cmd, void *arg)
1058 printf("Loading rdma_iwcm.\n");
1065 printf("Unloading rdma_iwcm.\n");
1078 static moduledata_t mod_data = {
1084 MODULE_VERSION(rdma_iwcm, 1);
1085 MODULE_DEPEND(rdma_iwcm, rdma_core, 1, 1, 1);
1086 DECLARE_MODULE(rdma_iwcm, mod_data, SI_SUB_EXEC, SI_ORDER_ANY);