2 * Copyright (c) 2017 Broadcom. All rights reserved.
3 * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
15 * 3. Neither the name of the copyright holder nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
36 * Implement remote device state machine for target and initiator.
40 @defgroup device_sm Node State Machine: Remote Device States
44 #include "ocs_device.h"
45 #include "ocs_fabric.h"
48 static void *__ocs_d_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
49 static void *__ocs_d_wait_del_node(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
50 static void *__ocs_d_wait_del_ini_tgt(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
51 static int32_t ocs_process_abts(ocs_io_t *io, fc_header_t *hdr);
55 * @brief Send response to PRLI.
57 * <h3 class="desc">Description</h3>
58 * For device nodes, this function sends a PRLI response.
60 * @param io Pointer to a SCSI IO object.
61 * @param ox_id OX_ID of PRLI
63 * @return Returns None.
67 ocs_d_send_prli_rsp(ocs_io_t *io, uint16_t ox_id)
70 ocs_node_t *node = io->node;
72 /* If the back-end doesn't support the fc-type, we send an LS_RJT */
73 if (ocs->fc_type != node->fc_type) {
74 node_printf(node, "PRLI rejected by target-server, fc-type not supported\n");
75 ocs_send_ls_rjt(io, ox_id, FC_REASON_UNABLE_TO_PERFORM,
76 FC_EXPL_REQUEST_NOT_SUPPORTED, 0, NULL, NULL);
77 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
78 ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL);
81 /* If the back-end doesn't want to talk to this initiator, we send an LS_RJT */
82 if (node->sport->enable_tgt && (ocs_scsi_validate_initiator(node) == 0)) {
83 node_printf(node, "PRLI rejected by target-server\n");
84 ocs_send_ls_rjt(io, ox_id, FC_REASON_UNABLE_TO_PERFORM,
85 FC_EXPL_NO_ADDITIONAL, 0, NULL, NULL);
86 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
87 ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL);
89 /*sm: process PRLI payload, send PRLI acc */
90 ocs_send_prli_acc(io, ox_id, ocs->fc_type, NULL, NULL);
92 /* Immediately go to ready state to avoid window where we're
93 * waiting for the PRLI LS_ACC to complete while holding FCP_CMNDs
95 ocs_node_transition(node, __ocs_d_device_ready, NULL);
101 * @brief Device node state machine: Initiate node shutdown
103 * @param ctx Remote node state machine context.
104 * @param evt Event to process.
105 * @param arg Per event optional argument.
107 * @return Returns NULL.
111 __ocs_d_initiate_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
113 std_node_state_decl();
118 case OCS_EVT_ENTER: {
119 int32_t rc = OCS_SCSI_CALL_COMPLETE; /* assume no wait needed */
121 ocs_scsi_io_alloc_disable(node);
123 /* make necessary delete upcall(s) */
124 if (node->init && !node->targ) {
125 ocs_log_debug(node->ocs,
126 "[%s] delete (initiator) WWPN %s WWNN %s\n",
127 node->display_name, node->wwpn, node->wwnn);
128 ocs_node_transition(node, __ocs_d_wait_del_node, NULL);
129 if (node->sport->enable_tgt) {
130 rc = ocs_scsi_del_initiator(node,
131 OCS_SCSI_INITIATOR_DELETED);
133 if (rc == OCS_SCSI_CALL_COMPLETE) {
134 ocs_node_post_event(node,
135 OCS_EVT_NODE_DEL_INI_COMPLETE, NULL);
137 } else if (node->targ && !node->init) {
138 ocs_log_debug(node->ocs,
139 "[%s] delete (target) WWPN %s WWNN %s\n",
140 node->display_name, node->wwpn, node->wwnn);
141 ocs_node_transition(node, __ocs_d_wait_del_node, NULL);
142 if (node->sport->enable_ini) {
143 rc = ocs_scsi_del_target(node,
144 OCS_SCSI_TARGET_DELETED);
146 if (rc == OCS_SCSI_CALL_COMPLETE) {
147 ocs_node_post_event(node,
148 OCS_EVT_NODE_DEL_TGT_COMPLETE, NULL);
150 } else if (node->init && node->targ) {
151 ocs_log_debug(node->ocs,
152 "[%s] delete (initiator+target) WWPN %s WWNN %s\n",
153 node->display_name, node->wwpn, node->wwnn);
154 ocs_node_transition(node, __ocs_d_wait_del_ini_tgt, NULL);
155 if (node->sport->enable_tgt) {
156 rc = ocs_scsi_del_initiator(node,
157 OCS_SCSI_INITIATOR_DELETED);
159 if (rc == OCS_SCSI_CALL_COMPLETE) {
160 ocs_node_post_event(node,
161 OCS_EVT_NODE_DEL_INI_COMPLETE, NULL);
163 rc = OCS_SCSI_CALL_COMPLETE; /* assume no wait needed */
164 if (node->sport->enable_ini) {
165 rc = ocs_scsi_del_target(node,
166 OCS_SCSI_TARGET_DELETED);
168 if (rc == OCS_SCSI_CALL_COMPLETE) {
169 ocs_node_post_event(node,
170 OCS_EVT_NODE_DEL_TGT_COMPLETE, NULL);
174 /* we've initiated the upcalls as needed, now kick off the node
175 * detach to precipitate the aborting of outstanding exchanges
176 * associated with said node
178 * Beware: if we've made upcall(s), we've already transitioned
179 * to a new state by the time we execute this.
180 * TODO: consider doing this before the upcalls...
182 if (node->attached) {
183 /* issue hw node free; don't care if succeeds right away
184 * or sometime later, will check node->attached later in
187 rc = ocs_hw_node_detach(&ocs->hw, &node->rnode);
188 if (node->rnode.free_group) {
189 ocs_remote_node_group_free(node->node_group);
190 node->node_group = NULL;
191 node->rnode.free_group = FALSE;
193 if (rc != OCS_HW_RTN_SUCCESS &&
194 rc != OCS_HW_RTN_SUCCESS_SYNC) {
196 "Failed freeing HW node, rc=%d\n", rc);
200 /* if neither initiator nor target, proceed to cleanup */
201 if (!node->init && !node->targ){
203 * node has either been detached or is in the process
204 * of being detached, call common node's initiate
207 ocs_node_initiate_cleanup(node);
211 case OCS_EVT_ALL_CHILD_NODES_FREE:
212 /* Ignore, this can happen if an ELS is aborted,
213 * while in a delay/retry state */
216 __ocs_d_common(__func__, ctx, evt, arg);
224 * @brief Device node state machine: Common device event handler.
226 * <h3 class="desc">Description</h3>
227 * For device nodes, this event handler manages default and common events.
229 * @param funcname Function name text.
230 * @param ctx Remote node state machine context.
231 * @param evt Event to process.
232 * @param arg Per event optional argument.
234 * @return Returns NULL.
238 __ocs_d_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
240 ocs_node_t *node = NULL;
242 ocs_assert(ctx, NULL);
244 ocs_assert(node, NULL);
246 ocs_assert(ocs, NULL);
250 /* Handle shutdown events */
251 case OCS_EVT_SHUTDOWN:
252 ocs_log_debug(ocs, "[%s] %-20s %-20s\n", node->display_name, funcname, ocs_sm_event_name(evt));
253 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
254 ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL);
256 case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO:
257 ocs_log_debug(ocs, "[%s] %-20s %-20s\n", node->display_name, funcname, ocs_sm_event_name(evt));
258 node->shutdown_reason = OCS_NODE_SHUTDOWN_EXPLICIT_LOGO;
259 ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL);
261 case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO:
262 ocs_log_debug(ocs, "[%s] %-20s %-20s\n", node->display_name, funcname, ocs_sm_event_name(evt));
263 node->shutdown_reason = OCS_NODE_SHUTDOWN_IMPLICIT_LOGO;
264 ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL);
268 /* call default event handler common to all nodes */
269 __ocs_node_common(funcname, ctx, evt, arg);
277 * @brief Device node state machine: Wait for a domain-attach completion in loop topology.
279 * <h3 class="desc">Description</h3>
280 * State waits for a domain-attached completion while in loop topology.
282 * @param ctx Remote node state machine context.
283 * @param evt Event to process.
284 * @param arg Per event optional argument.
286 * @return Returns NULL.
290 __ocs_d_wait_loop(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
292 std_node_state_decl();
298 ocs_node_hold_frames(node);
302 ocs_node_accept_frames(node);
305 case OCS_EVT_DOMAIN_ATTACH_OK: {
306 /* send PLOGI automatically if initiator */
307 ocs_node_init_device(node, TRUE);
311 __ocs_d_common(__func__, ctx, evt, arg);
323 * @brief state: wait for node resume event
325 * State is entered when a node is in I+T mode and sends a delete initiator/target
326 * call to the target-server/initiator-client and needs to wait for that work to complete.
328 * @param ctx Remote node state machine context.
329 * @param evt Event to process.
330 * @param arg per event optional argument
332 * @return returns NULL
336 __ocs_d_wait_del_ini_tgt(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
338 std_node_state_decl();
344 ocs_node_hold_frames(node);
347 case OCS_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
348 case OCS_EVT_ALL_CHILD_NODES_FREE:
349 /* These are expected events. */
352 case OCS_EVT_NODE_DEL_INI_COMPLETE:
353 case OCS_EVT_NODE_DEL_TGT_COMPLETE:
354 ocs_node_transition(node, __ocs_d_wait_del_node, NULL);
358 ocs_node_accept_frames(node);
361 case OCS_EVT_SRRS_ELS_REQ_FAIL:
362 /* Can happen as ELS IO IO's complete */
363 ocs_assert(node->els_req_cnt, NULL);
367 /* ignore shutdown events as we're already in shutdown path */
368 case OCS_EVT_SHUTDOWN:
369 /* have default shutdown event take precedence */
370 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
372 case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO:
373 case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO:
374 node_printf(node, "%s received\n", ocs_sm_event_name(evt));
376 case OCS_EVT_DOMAIN_ATTACH_OK:
377 /* don't care about domain_attach_ok */
380 __ocs_d_common(__func__, ctx, evt, arg);
390 * @brief state: Wait for node resume event.
392 * State is entered when a node sends a delete initiator/target call to the
393 * target-server/initiator-client and needs to wait for that work to complete.
395 * @param ctx Remote node state machine context.
396 * @param evt Event to process.
397 * @param arg Per event optional argument.
399 * @return Returns NULL.
403 __ocs_d_wait_del_node(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
405 std_node_state_decl();
411 ocs_node_hold_frames(node);
414 case OCS_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
415 case OCS_EVT_ALL_CHILD_NODES_FREE:
416 /* These are expected events. */
419 case OCS_EVT_NODE_DEL_INI_COMPLETE:
420 case OCS_EVT_NODE_DEL_TGT_COMPLETE:
422 * node has either been detached or is in the process of being detached,
423 * call common node's initiate cleanup function
425 ocs_node_initiate_cleanup(node);
429 ocs_node_accept_frames(node);
432 case OCS_EVT_SRRS_ELS_REQ_FAIL:
433 /* Can happen as ELS IO IO's complete */
434 ocs_assert(node->els_req_cnt, NULL);
438 /* ignore shutdown events as we're already in shutdown path */
439 case OCS_EVT_SHUTDOWN:
440 /* have default shutdown event take precedence */
441 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
443 case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO:
444 case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO:
445 node_printf(node, "%s received\n", ocs_sm_event_name(evt));
447 case OCS_EVT_DOMAIN_ATTACH_OK:
448 /* don't care about domain_attach_ok */
451 __ocs_d_common(__func__, ctx, evt, arg);
461 * @brief Save the OX_ID for sending LS_ACC sometime later.
463 * <h3 class="desc">Description</h3>
464 * When deferring the response to an ELS request, the OX_ID of the request
465 * is saved using this function.
467 * @param io Pointer to a SCSI IO object.
468 * @param hdr Pointer to the FC header.
469 * @param ls Defines the type of ELS to send: LS_ACC, LS_ACC for PLOGI;
470 * or LSS_ACC for PRLI.
476 ocs_send_ls_acc_after_attach(ocs_io_t *io, fc_header_t *hdr, ocs_node_send_ls_acc_e ls)
478 ocs_node_t *node = io->node;
479 uint16_t ox_id = ocs_be16toh(hdr->ox_id);
481 ocs_assert(node->send_ls_acc == OCS_NODE_SEND_LS_ACC_NONE);
483 node->ls_acc_oxid = ox_id;
484 node->send_ls_acc = ls;
485 node->ls_acc_io = io;
486 node->ls_acc_did = fc_be24toh(hdr->d_id);
490 * @brief Process the PRLI payload.
492 * <h3 class="desc">Description</h3>
493 * The PRLI payload is processed; the initiator/target capabilities of the
494 * remote node are extracted and saved in the node object.
496 * @param node Pointer to the node object.
497 * @param prli Pointer to the PRLI payload.
503 ocs_process_prli_payload(ocs_node_t *node, fc_prli_payload_t *prli)
505 node->init = (ocs_be16toh(prli->service_params) & FC_PRLI_INITIATOR_FUNCTION) != 0;
506 node->targ = (ocs_be16toh(prli->service_params) & FC_PRLI_TARGET_FUNCTION) != 0;
507 node->fcp2device = (ocs_be16toh(prli->service_params) & FC_PRLI_RETRY) != 0;
508 node->fc_type = prli->type;
512 * @brief Process the ABTS.
514 * <h3 class="desc">Description</h3>
515 * Common code to process a received ABTS. If an active IO can be found
516 * that matches the OX_ID of the ABTS request, a call is made to the
517 * backend. Otherwise, a BA_ACC is returned to the initiator.
519 * @param io Pointer to a SCSI IO object.
520 * @param hdr Pointer to the FC header.
522 * @return Returns 0 on success, or a negative error value on failure.
526 ocs_process_abts(ocs_io_t *io, fc_header_t *hdr)
528 ocs_node_t *node = io->node;
529 ocs_t *ocs = node->ocs;
530 uint16_t ox_id = ocs_be16toh(hdr->ox_id);
531 uint16_t rx_id = ocs_be16toh(hdr->rx_id);
534 abortio = ocs_io_find_tgt_io(ocs, node, ox_id, rx_id);
536 /* If an IO was found, attempt to take a reference on it */
537 if (abortio != NULL && (ocs_ref_get_unless_zero(&abortio->ref) != 0)) {
539 /* Got a reference on the IO. Hold it until backend is notified below */
540 node_printf(node, "Abort request: ox_id [%04x] rx_id [%04x]\n",
544 * Save the ox_id for the ABTS as the init_task_tag in our manufactured
547 io->display_name = "abts";
548 io->init_task_tag = ox_id;
549 /* don't set tgt_task_tag, don't want to confuse with XRI */
552 * Save the rx_id from the ABTS as it is needed for the BLS response,
553 * regardless of the IO context's rx_id
555 io->abort_rx_id = rx_id;
557 /* Call target server command abort */
558 io->tmf_cmd = OCS_SCSI_TMF_ABORT_TASK;
559 ocs_scsi_recv_tmf(io, abortio->tgt_io.lun, OCS_SCSI_TMF_ABORT_TASK, abortio, 0);
562 * Backend will have taken an additional reference on the IO if needed;
563 * done with current reference.
565 ocs_ref_put(&abortio->ref); /* ocs_ref_get(): same function */
568 * Either IO was not found or it has been freed between finding it
569 * and attempting to get the reference,
571 node_printf(node, "Abort request: ox_id [%04x], IO not found (exists=%d)\n",
572 ox_id, (abortio != NULL));
575 ocs_bls_send_acc_hdr(io, hdr);
582 * @brief Device node state machine: Wait for the PLOGI accept to complete.
584 * @param ctx Remote node state machine context.
585 * @param evt Event to process.
586 * @param arg Per event optional argument.
588 * @return Returns NULL.
592 __ocs_d_wait_plogi_acc_cmpl(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
594 std_node_state_decl();
600 ocs_node_hold_frames(node);
604 ocs_node_accept_frames(node);
607 case OCS_EVT_SRRS_ELS_CMPL_FAIL:
608 ocs_assert(node->els_cmpl_cnt, NULL);
609 node->els_cmpl_cnt--;
610 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
611 ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL);
614 case OCS_EVT_SRRS_ELS_CMPL_OK: /* PLOGI ACC completions */
615 ocs_assert(node->els_cmpl_cnt, NULL);
616 node->els_cmpl_cnt--;
617 ocs_node_transition(node, __ocs_d_port_logged_in, NULL);
621 __ocs_d_common(__func__, ctx, evt, arg);
630 * @brief Device node state machine: Wait for the LOGO response.
632 * @param ctx Remote node state machine context.
633 * @param evt Event to process.
634 * @param arg Per event optional argument.
636 * @return Returns NULL.
640 __ocs_d_wait_logo_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
642 std_node_state_decl();
648 /* TODO: may want to remove this;
649 * if we'll want to know about PLOGI */
650 ocs_node_hold_frames(node);
654 ocs_node_accept_frames(node);
657 case OCS_EVT_SRRS_ELS_REQ_OK:
658 case OCS_EVT_SRRS_ELS_REQ_RJT:
659 case OCS_EVT_SRRS_ELS_REQ_FAIL:
660 /* LOGO response received, sent shutdown */
661 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_LOGO, __ocs_d_common, __func__)) {
664 ocs_assert(node->els_req_cnt, NULL);
666 node_printf(node, "LOGO sent (evt=%s), shutdown node\n", ocs_sm_event_name(evt));
667 /* sm: post explicit logout */
668 ocs_node_post_event(node, OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
671 /* TODO: PLOGI: abort LOGO and process PLOGI? (SHUTDOWN_EXPLICIT/IMPLICIT_LOGO?) */
674 __ocs_d_common(__func__, ctx, evt, arg);
683 * @brief Device node state machine: Wait for the PRLO response.
685 * @param ctx Remote node state machine context.
686 * @param evt Event to process.
687 * @param arg Per event optional argument.
689 * @return Returns NULL.
693 __ocs_d_wait_prlo_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
695 std_node_state_decl();
701 ocs_node_hold_frames(node);
705 ocs_node_accept_frames(node);
708 case OCS_EVT_SRRS_ELS_REQ_OK:
709 case OCS_EVT_SRRS_ELS_REQ_RJT:
710 case OCS_EVT_SRRS_ELS_REQ_FAIL:
711 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PRLO, __ocs_d_common, __func__)) {
714 ocs_assert(node->els_req_cnt, NULL);
716 node_printf(node, "PRLO sent (evt=%s)\n", ocs_sm_event_name(evt));
717 ocs_node_transition(node, __ocs_d_port_logged_in, NULL);
721 __ocs_node_common(__func__, ctx, evt, arg);
729 * @brief Initialize device node.
731 * Initialize device node. If a node is an initiator, then send a PLOGI and transition
732 * to __ocs_d_wait_plogi_rsp, otherwise transition to __ocs_d_init.
734 * @param node Pointer to the node object.
735 * @param send_plogi Boolean indicating to send PLOGI command or not.
741 ocs_node_init_device(ocs_node_t *node, int send_plogi)
743 node->send_plogi = send_plogi;
744 if ((node->ocs->nodedb_mask & OCS_NODEDB_PAUSE_NEW_NODES) && !FC_ADDR_IS_DOMAIN_CTRL(node->rnode.fc_id)) {
745 node->nodedb_state = __ocs_d_init;
746 ocs_node_transition(node, __ocs_node_paused, NULL);
748 ocs_node_transition(node, __ocs_d_init, NULL);
754 * @brief Device node state machine: Initial node state for an initiator or a target.
756 * <h3 class="desc">Description</h3>
757 * This state is entered when a node is instantiated, either having been
758 * discovered from a name services query, or having received a PLOGI/FLOGI.
760 * @param ctx Remote node state machine context.
761 * @param evt Event to process.
762 * @param arg Per event optional argument.
763 * - OCS_EVT_ENTER: (uint8_t *) - 1 to send a PLOGI on
764 * entry (initiator-only); 0 indicates a PLOGI is
765 * not sent on entry (initiator-only). Not applicable for a target.
767 * @return Returns NULL.
771 __ocs_d_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
774 ocs_node_cb_t *cbdata = arg;
775 std_node_state_decl();
781 /* check if we need to send PLOGI */
782 if (node->send_plogi) {
783 /* only send if we have initiator capability, and domain is attached */
784 if (node->sport->enable_ini && node->sport->domain->attached) {
785 ocs_send_plogi(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT,
786 OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
787 ocs_node_transition(node, __ocs_d_wait_plogi_rsp, NULL);
789 node_printf(node, "not sending plogi sport.ini=%d, domain attached=%d\n",
790 node->sport->enable_ini, node->sport->domain->attached);
794 case OCS_EVT_PLOGI_RCVD: {
796 fc_header_t *hdr = cbdata->header->dma.virt;
797 uint32_t d_id = fc_be24toh(hdr->d_id);
799 ocs_node_save_sparms(node, cbdata->payload->dma.virt);
800 ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PLOGI);
802 /* domain already attached */
803 if (node->sport->domain->attached) {
804 rc = ocs_node_attach(node);
805 ocs_node_transition(node, __ocs_d_wait_node_attach, NULL);
806 if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
807 ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
812 /* domain not attached; several possibilities: */
813 switch (node->sport->topology) {
814 case OCS_SPORT_TOPOLOGY_P2P:
815 /* we're not attached and sport is p2p, need to attach */
816 ocs_domain_attach(node->sport->domain, d_id);
817 ocs_node_transition(node, __ocs_d_wait_domain_attach, NULL);
819 case OCS_SPORT_TOPOLOGY_FABRIC:
820 /* we're not attached and sport is fabric, domain attach should have
821 * already been requested as part of the fabric state machine, wait for it
823 ocs_node_transition(node, __ocs_d_wait_domain_attach, NULL);
825 case OCS_SPORT_TOPOLOGY_UNKNOWN:
826 /* Two possibilities:
827 * 1. received a PLOGI before our FLOGI has completed (possible since
828 * completion comes in on another CQ), thus we don't know what we're
829 * connected to yet; transition to a state to wait for the fabric
831 * 2. PLOGI received before link went down and we haven't performed
833 * Note: we cannot distinguish between 1. and 2. so have to assume PLOGI
834 * was received after link back up.
836 node_printf(node, "received PLOGI, with unknown topology did=0x%x\n", d_id);
837 ocs_node_transition(node, __ocs_d_wait_topology_notify, NULL);
840 node_printf(node, "received PLOGI, with unexpectd topology %d\n",
841 node->sport->topology);
842 ocs_assert(FALSE, NULL);
848 case OCS_EVT_FDISC_RCVD: {
849 __ocs_d_common(__func__, ctx, evt, arg);
853 case OCS_EVT_FLOGI_RCVD: {
854 fc_header_t *hdr = cbdata->header->dma.virt;
856 /* this better be coming from an NPort */
857 ocs_assert(ocs_rnode_is_nport(cbdata->payload->dma.virt), NULL);
859 /* sm: save sparams, send FLOGI acc */
860 ocs_domain_save_sparms(node->sport->domain, cbdata->payload->dma.virt);
862 /* send FC LS_ACC response, override s_id */
863 ocs_fabric_set_topology(node, OCS_SPORT_TOPOLOGY_P2P);
864 ocs_send_flogi_p2p_acc(cbdata->io, ocs_be16toh(hdr->ox_id), fc_be24toh(hdr->d_id), NULL, NULL);
865 if (ocs_p2p_setup(node->sport)) {
866 node_printf(node, "p2p setup failed, shutting down node\n");
867 ocs_node_post_event(node, OCS_EVT_SHUTDOWN, NULL);
869 ocs_node_transition(node, __ocs_p2p_wait_flogi_acc_cmpl, NULL);
875 case OCS_EVT_LOGO_RCVD: {
876 fc_header_t *hdr = cbdata->header->dma.virt;
878 if (!node->sport->domain->attached) {
879 /* most likely a frame left over from before a link down; drop and
880 * shut node down w/ "explicit logout" so pending frames are processed */
881 node_printf(node, "%s domain not attached, dropping\n", ocs_sm_event_name(evt));
882 ocs_node_post_event(node, OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
885 ocs_send_logo_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
886 ocs_node_transition(node, __ocs_d_wait_logo_acc_cmpl, NULL);
890 case OCS_EVT_PRLI_RCVD:
891 case OCS_EVT_PRLO_RCVD:
892 case OCS_EVT_PDISC_RCVD:
893 case OCS_EVT_ADISC_RCVD:
894 case OCS_EVT_RSCN_RCVD: {
895 fc_header_t *hdr = cbdata->header->dma.virt;
896 if (!node->sport->domain->attached) {
897 /* most likely a frame left over from before a link down; drop and
898 * shut node down w/ "explicit logout" so pending frames are processed */
899 node_printf(node, "%s domain not attached, dropping\n", ocs_sm_event_name(evt));
900 ocs_node_post_event(node, OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
903 node_printf(node, "%s received, sending reject\n", ocs_sm_event_name(evt));
904 ocs_send_ls_rjt(cbdata->io, ocs_be16toh(hdr->ox_id),
905 FC_REASON_UNABLE_TO_PERFORM, FC_EXPL_NPORT_LOGIN_REQUIRED, 0,
911 case OCS_EVT_FCP_CMD_RCVD: {
912 /* note: problem, we're now expecting an ELS REQ completion
913 * from both the LOGO and PLOGI */
914 if (!node->sport->domain->attached) {
915 /* most likely a frame left over from before a link down; drop and
916 * shut node down w/ "explicit logout" so pending frames are processed */
917 node_printf(node, "%s domain not attached, dropping\n", ocs_sm_event_name(evt));
918 ocs_node_post_event(node, OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
923 node_printf(node, "FCP_CMND received, send LOGO\n");
924 if (ocs_send_logo(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT, 0, NULL, NULL) == NULL) {
925 /* failed to send LOGO, go ahead and cleanup node anyways */
926 node_printf(node, "Failed to send LOGO\n");
927 ocs_node_post_event(node, OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
929 /* sent LOGO, wait for response */
930 ocs_node_transition(node, __ocs_d_wait_logo_rsp, NULL);
934 case OCS_EVT_DOMAIN_ATTACH_OK:
935 /* don't care about domain_attach_ok */
939 __ocs_d_common(__func__, ctx, evt, arg);
948 * @brief Device node state machine: Wait on a response for a sent PLOGI.
950 * <h3 class="desc">Description</h3>
951 * State is entered when an initiator-capable node has sent
952 * a PLOGI and is waiting for a response.
954 * @param ctx Remote node state machine context.
955 * @param evt Event to process.
956 * @param arg Per event optional argument.
958 * @return Returns NULL.
962 __ocs_d_wait_plogi_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
965 ocs_node_cb_t *cbdata = arg;
966 std_node_state_decl();
971 case OCS_EVT_PLOGI_RCVD: {
973 /* received PLOGI with svc parms, go ahead and attach node
974 * when PLOGI that was sent ultimately completes, it'll be a no-op
977 /* TODO: there is an outstanding PLOGI sent, can we set a flag
978 * to indicate that we don't want to retry it if it times out? */
979 ocs_node_save_sparms(node, cbdata->payload->dma.virt);
980 ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PLOGI);
981 /* sm: domain->attached / ocs_node_attach */
982 rc = ocs_node_attach(node);
983 ocs_node_transition(node, __ocs_d_wait_node_attach, NULL);
984 if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
985 ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
990 case OCS_EVT_PRLI_RCVD:
992 /* sent PLOGI and before completion was seen, received the
993 * PRLI from the remote node (WCQEs and RCQEs come in on
994 * different queues and order of processing cannot be assumed)
995 * Save OXID so PRLI can be sent after the attach and continue
996 * to wait for PLOGI response
998 ocs_process_prli_payload(node, cbdata->payload->dma.virt);
999 if (ocs->fc_type == node->fc_type) {
1000 ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PRLI);
1001 ocs_node_transition(node, __ocs_d_wait_plogi_rsp_recvd_prli, NULL);
1003 /* TODO this need to be looked at. What do we do here ? */
1007 /* TODO this need to be looked at. we could very well be logged in */
1008 case OCS_EVT_LOGO_RCVD: /* why don't we do a shutdown here?? */
1009 case OCS_EVT_PRLO_RCVD:
1010 case OCS_EVT_PDISC_RCVD:
1011 case OCS_EVT_FDISC_RCVD:
1012 case OCS_EVT_ADISC_RCVD:
1013 case OCS_EVT_RSCN_RCVD:
1014 case OCS_EVT_SCR_RCVD: {
1015 fc_header_t *hdr = cbdata->header->dma.virt;
1016 node_printf(node, "%s received, sending reject\n", ocs_sm_event_name(evt));
1017 ocs_send_ls_rjt(cbdata->io, ocs_be16toh(hdr->ox_id),
1018 FC_REASON_UNABLE_TO_PERFORM, FC_EXPL_NPORT_LOGIN_REQUIRED, 0,
1024 case OCS_EVT_SRRS_ELS_REQ_OK: /* PLOGI response received */
1025 /* Completion from PLOGI sent */
1026 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_d_common, __func__)) {
1029 ocs_assert(node->els_req_cnt, NULL);
1030 node->els_req_cnt--;
1031 /* sm: save sparams, ocs_node_attach */
1032 ocs_node_save_sparms(node, cbdata->els->els_rsp.virt);
1033 ocs_display_sparams(node->display_name, "plogi rcvd resp", 0, NULL,
1034 ((uint8_t*)cbdata->els->els_rsp.virt) + 4);
1035 rc = ocs_node_attach(node);
1036 ocs_node_transition(node, __ocs_d_wait_node_attach, NULL);
1037 if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
1038 ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
1042 case OCS_EVT_SRRS_ELS_REQ_FAIL: /* PLOGI response received */
1043 /* PLOGI failed, shutdown the node */
1044 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_d_common, __func__)) {
1047 ocs_assert(node->els_req_cnt, NULL);
1048 node->els_req_cnt--;
1049 ocs_node_post_event(node, OCS_EVT_SHUTDOWN, NULL);
1052 case OCS_EVT_SRRS_ELS_REQ_RJT: /* Our PLOGI was rejected, this is ok in some cases */
1053 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_d_common, __func__)) {
1056 ocs_assert(node->els_req_cnt, NULL);
1057 node->els_req_cnt--;
1060 case OCS_EVT_FCP_CMD_RCVD: {
1061 /* not logged in yet and outstanding PLOGI so don't send LOGO,
1064 node_printf(node, "FCP_CMND received, drop\n");
1069 __ocs_d_common(__func__, ctx, evt, arg);
1077 * @ingroup device_sm
1078 * @brief Device node state machine: Waiting on a response for a
1081 * <h3 class="desc">Description</h3>
1082 * State is entered when an initiator-capable node has sent
1083 * a PLOGI and is waiting for a response. Before receiving the
1084 * response, a PRLI was received, implying that the PLOGI was
1087 * @param ctx Remote node state machine context.
1088 * @param evt Event to process.
1089 * @param arg Per event optional argument.
1091 * @return Returns NULL.
1095 __ocs_d_wait_plogi_rsp_recvd_prli(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1098 ocs_node_cb_t *cbdata = arg;
1099 std_node_state_decl();
1106 * Since we've received a PRLI, we have a port login and will
1107 * just need to wait for the PLOGI response to do the node
1108 * attach and then we can send the LS_ACC for the PRLI. If,
1109 * during this time, we receive FCP_CMNDs (which is possible
1110 * since we've already sent a PRLI and our peer may have accepted).
1111 * At this time, we are not waiting on any other unsolicited
1112 * frames to continue with the login process. Thus, it will not
1113 * hurt to hold frames here.
1115 ocs_node_hold_frames(node);
1119 ocs_node_accept_frames(node);
1122 case OCS_EVT_SRRS_ELS_REQ_OK: /* PLOGI response received */
1123 /* Completion from PLOGI sent */
1124 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_d_common, __func__)) {
1127 ocs_assert(node->els_req_cnt, NULL);
1128 node->els_req_cnt--;
1129 /* sm: save sparams, ocs_node_attach */
1130 ocs_node_save_sparms(node, cbdata->els->els_rsp.virt);
1131 ocs_display_sparams(node->display_name, "plogi rcvd resp", 0, NULL,
1132 ((uint8_t*)cbdata->els->els_rsp.virt) + 4);
1133 rc = ocs_node_attach(node);
1134 ocs_node_transition(node, __ocs_d_wait_node_attach, NULL);
1135 if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
1136 ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
1140 case OCS_EVT_SRRS_ELS_REQ_FAIL: /* PLOGI response received */
1141 case OCS_EVT_SRRS_ELS_REQ_RJT:
1142 /* PLOGI failed, shutdown the node */
1143 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_d_common, __func__)) {
1146 ocs_assert(node->els_req_cnt, NULL);
1147 node->els_req_cnt--;
1148 ocs_node_post_event(node, OCS_EVT_SHUTDOWN, NULL);
1152 __ocs_d_common(__func__, ctx, evt, arg);
1160 * @ingroup device_sm
1161 * @brief Device node state machine: Wait for a domain attach.
1163 * <h3 class="desc">Description</h3>
1164 * Waits for a domain-attach complete ok event.
1166 * @param ctx Remote node state machine context.
1167 * @param evt Event to process.
1168 * @param arg Per event optional argument.
1170 * @return Returns NULL.
1174 __ocs_d_wait_domain_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1177 std_node_state_decl();
1183 ocs_node_hold_frames(node);
1187 ocs_node_accept_frames(node);
1190 case OCS_EVT_DOMAIN_ATTACH_OK:
1191 ocs_assert(node->sport->domain->attached, NULL);
1192 /* sm: ocs_node_attach */
1193 rc = ocs_node_attach(node);
1194 ocs_node_transition(node, __ocs_d_wait_node_attach, NULL);
1195 if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
1196 ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
1201 __ocs_d_common(__func__, ctx, evt, arg);
1208 * @ingroup device_sm
1209 * @brief Device node state machine: Wait for topology
1212 * <h3 class="desc">Description</h3>
1213 * Waits for topology notification from fabric node, then
1214 * attaches domain and node.
1216 * @param ctx Remote node state machine context.
1217 * @param evt Event to process.
1218 * @param arg Per event optional argument.
1220 * @return Returns NULL.
1224 __ocs_d_wait_topology_notify(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1227 std_node_state_decl();
1233 ocs_node_hold_frames(node);
1237 ocs_node_accept_frames(node);
1240 case OCS_EVT_SPORT_TOPOLOGY_NOTIFY: {
1241 ocs_sport_topology_e topology = (ocs_sport_topology_e)arg;
1242 ocs_assert(!node->sport->domain->attached, NULL);
1243 ocs_assert(node->send_ls_acc == OCS_NODE_SEND_LS_ACC_PLOGI, NULL);
1244 node_printf(node, "topology notification, topology=%d\n", topology);
1246 /* At the time the PLOGI was received, the topology was unknown,
1247 * so we didn't know which node would perform the domain attach:
1248 * 1. The node from which the PLOGI was sent (p2p) or
1249 * 2. The node to which the FLOGI was sent (fabric).
1251 if (topology == OCS_SPORT_TOPOLOGY_P2P) {
1252 /* if this is p2p, need to attach to the domain using the
1253 * d_id from the PLOGI received
1255 ocs_domain_attach(node->sport->domain, node->ls_acc_did);
1257 /* else, if this is fabric, the domain attach should be performed
1258 * by the fabric node (node sending FLOGI); just wait for attach
1262 ocs_node_transition(node, __ocs_d_wait_domain_attach, NULL);
1265 case OCS_EVT_DOMAIN_ATTACH_OK:
1266 ocs_assert(node->sport->domain->attached, NULL);
1267 node_printf(node, "domain attach ok\n");
1268 /*sm: ocs_node_attach */
1269 rc = ocs_node_attach(node);
1270 ocs_node_transition(node, __ocs_d_wait_node_attach, NULL);
1271 if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
1272 ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
1277 __ocs_d_common(__func__, ctx, evt, arg);
1284 * @ingroup device_sm
1285 * @brief Device node state machine: Wait for a node attach when found by a remote node.
1287 * @param ctx Remote node state machine context.
1288 * @param evt Event to process.
1289 * @param arg Per event optional argument.
1291 * @return Returns NULL.
1295 __ocs_d_wait_node_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1297 std_node_state_decl();
1303 ocs_node_hold_frames(node);
1307 ocs_node_accept_frames(node);
1310 case OCS_EVT_NODE_ATTACH_OK:
1311 node->attached = TRUE;
1312 switch (node->send_ls_acc) {
1313 case OCS_NODE_SEND_LS_ACC_PLOGI: {
1314 /* sm: send_plogi_acc is set / send PLOGI acc */
1315 /* Normal case for T, or I+T */
1316 ocs_send_plogi_acc(node->ls_acc_io, node->ls_acc_oxid, NULL, NULL);
1317 ocs_node_transition(node, __ocs_d_wait_plogi_acc_cmpl, NULL);
1318 node->send_ls_acc = OCS_NODE_SEND_LS_ACC_NONE;
1319 node->ls_acc_io = NULL;
1322 case OCS_NODE_SEND_LS_ACC_PRLI: {
1323 ocs_d_send_prli_rsp(node->ls_acc_io, node->ls_acc_oxid);
1324 node->send_ls_acc = OCS_NODE_SEND_LS_ACC_NONE;
1325 node->ls_acc_io = NULL;
1328 case OCS_NODE_SEND_LS_ACC_NONE:
1330 /* Normal case for I */
1331 /* sm: send_plogi_acc is not set / send PLOGI acc */
1332 ocs_node_transition(node, __ocs_d_port_logged_in, NULL);
1337 case OCS_EVT_NODE_ATTACH_FAIL:
1338 /* node attach failed, shutdown the node */
1339 node->attached = FALSE;
1340 node_printf(node, "node attach failed\n");
1341 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1342 ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL);
1345 /* Handle shutdown events */
1346 case OCS_EVT_SHUTDOWN:
1347 node_printf(node, "%s received\n", ocs_sm_event_name(evt));
1348 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1349 ocs_node_transition(node, __ocs_d_wait_attach_evt_shutdown, NULL);
1351 case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO:
1352 node_printf(node, "%s received\n", ocs_sm_event_name(evt));
1353 node->shutdown_reason = OCS_NODE_SHUTDOWN_EXPLICIT_LOGO;
1354 ocs_node_transition(node, __ocs_d_wait_attach_evt_shutdown, NULL);
1356 case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO:
1357 node_printf(node, "%s received\n", ocs_sm_event_name(evt));
1358 node->shutdown_reason = OCS_NODE_SHUTDOWN_IMPLICIT_LOGO;
1359 ocs_node_transition(node, __ocs_d_wait_attach_evt_shutdown, NULL);
1362 __ocs_d_common(__func__, ctx, evt, arg);
1370 * @ingroup device_sm
1371 * @brief Device node state machine: Wait for a node/domain
1372 * attach then shutdown node.
1374 * @param ctx Remote node state machine context.
1375 * @param evt Event to process.
1376 * @param arg Per event optional argument.
1378 * @return Returns NULL.
1382 __ocs_d_wait_attach_evt_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1384 std_node_state_decl();
1390 ocs_node_hold_frames(node);
1394 ocs_node_accept_frames(node);
1397 /* wait for any of these attach events and then shutdown */
1398 case OCS_EVT_NODE_ATTACH_OK:
1399 node->attached = TRUE;
1400 node_printf(node, "Attach evt=%s, proceed to shutdown\n", ocs_sm_event_name(evt));
1401 ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL);
1404 case OCS_EVT_NODE_ATTACH_FAIL:
1405 /* node attach failed, shutdown the node */
1406 node->attached = FALSE;
1407 node_printf(node, "Attach evt=%s, proceed to shutdown\n", ocs_sm_event_name(evt));
1408 ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL);
1411 /* ignore shutdown events as we're already in shutdown path */
1412 case OCS_EVT_SHUTDOWN:
1413 /* have default shutdown event take precedence */
1414 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1416 case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO:
1417 case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO:
1418 node_printf(node, "%s received\n", ocs_sm_event_name(evt));
1422 __ocs_d_common(__func__, ctx, evt, arg);
1430 * @ingroup device_sm
1431 * @brief Device node state machine: Port is logged in.
1433 * <h3 class="desc">Description</h3>
1434 * This state is entered when a remote port has completed port login (PLOGI).
1436 * @param ctx Remote node state machine context.
1437 * @param evt Event to process
1438 * @param arg Per event optional argument
1440 * @return Returns NULL.
1443 __ocs_d_port_logged_in(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1445 ocs_node_cb_t *cbdata = arg;
1446 std_node_state_decl();
1450 /* TODO: I+T: what if PLOGI response not yet received ? */
1454 /* Normal case for I or I+T */
1455 if (node->sport->enable_ini && !FC_ADDR_IS_DOMAIN_CTRL(node->rnode.fc_id)
1456 && !node->sent_prli) {
1457 /* sm: if enable_ini / send PRLI */
1458 ocs_send_prli(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
1459 node->sent_prli = TRUE;
1460 /* can now expect ELS_REQ_OK/FAIL/RJT */
1464 case OCS_EVT_FCP_CMD_RCVD: {
1465 /* For target functionality send PRLO and drop the CMD frame. */
1466 if (node->sport->enable_tgt) {
1467 if (ocs_send_prlo(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT,
1468 OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL)) {
1469 ocs_node_transition(node, __ocs_d_wait_prlo_rsp, NULL);
1475 case OCS_EVT_PRLI_RCVD: {
1476 fc_header_t *hdr = cbdata->header->dma.virt;
1478 /* Normal for T or I+T */
1480 ocs_process_prli_payload(node, cbdata->payload->dma.virt);
1481 ocs_d_send_prli_rsp(cbdata->io, ocs_be16toh(hdr->ox_id));
1485 case OCS_EVT_SRRS_ELS_REQ_OK: { /* PRLI response */
1486 /* Normal case for I or I+T */
1487 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PRLI, __ocs_d_common, __func__)) {
1490 ocs_assert(node->els_req_cnt, NULL);
1491 node->els_req_cnt--;
1492 /* sm: process PRLI payload */
1493 ocs_process_prli_payload(node, cbdata->els->els_rsp.virt);
1494 ocs_node_transition(node, __ocs_d_device_ready, NULL);
1498 case OCS_EVT_SRRS_ELS_REQ_FAIL: { /* PRLI response failed */
1499 /* I, I+T, assume some link failure, shutdown node */
1500 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PRLI, __ocs_d_common, __func__)) {
1503 ocs_assert(node->els_req_cnt, NULL);
1504 node->els_req_cnt--;
1505 ocs_node_post_event(node, OCS_EVT_SHUTDOWN, NULL);
1509 case OCS_EVT_SRRS_ELS_REQ_RJT: {/* PRLI rejected by remote */
1510 /* Normal for I, I+T (connected to an I) */
1511 /* Node doesn't want to be a target, stay here and wait for a PRLI from the remote node
1512 * if it really wants to connect to us as target */
1513 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PRLI, __ocs_d_common, __func__)) {
1516 ocs_assert(node->els_req_cnt, NULL);
1517 node->els_req_cnt--;
1521 case OCS_EVT_SRRS_ELS_CMPL_OK: {
1522 /* Normal T, I+T, target-server rejected the process login */
1523 /* This would be received only in the case where we sent LS_RJT for the PRLI, so
1524 * do nothing. (note: as T only we could shutdown the node)
1526 ocs_assert(node->els_cmpl_cnt, NULL);
1527 node->els_cmpl_cnt--;
1531 case OCS_EVT_PLOGI_RCVD: {
1532 /* sm: save sparams, set send_plogi_acc, post implicit logout
1533 * Save plogi parameters */
1534 ocs_node_save_sparms(node, cbdata->payload->dma.virt);
1535 ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PLOGI);
1537 /* Restart node attach with new service parameters, and send ACC */
1538 ocs_node_post_event(node, OCS_EVT_SHUTDOWN_IMPLICIT_LOGO, NULL);
1542 case OCS_EVT_LOGO_RCVD: {
1544 fc_header_t *hdr = cbdata->header->dma.virt;
1545 node_printf(node, "%s received attached=%d\n", ocs_sm_event_name(evt), node->attached);
1546 ocs_send_logo_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
1547 ocs_node_transition(node, __ocs_d_wait_logo_acc_cmpl, NULL);
1552 __ocs_d_common(__func__, ctx, evt, arg);
1560 * @ingroup device_sm
1561 * @brief Device node state machine: Wait for a LOGO accept.
1563 * <h3 class="desc">Description</h3>
1564 * Waits for a LOGO accept completion.
1566 * @param ctx Remote node state machine context.
1567 * @param evt Event to process
1568 * @param arg Per event optional argument
1570 * @return Returns NULL.
1574 __ocs_d_wait_logo_acc_cmpl(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1576 std_node_state_decl();
1582 ocs_node_hold_frames(node);
1586 ocs_node_accept_frames(node);
1589 case OCS_EVT_SRRS_ELS_CMPL_OK:
1590 case OCS_EVT_SRRS_ELS_CMPL_FAIL:
1591 /* sm: / post explicit logout */
1592 ocs_assert(node->els_cmpl_cnt, NULL);
1593 node->els_cmpl_cnt--;
1594 ocs_node_post_event(node, OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
1597 __ocs_d_common(__func__, ctx, evt, arg);
1605 * @ingroup device_sm
1606 * @brief Device node state machine: Device is ready.
1608 * @param ctx Remote node state machine context.
1609 * @param evt Event to process.
1610 * @param arg Per event optional argument.
1612 * @return Returns NULL.
1616 __ocs_d_device_ready(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1618 ocs_node_cb_t *cbdata = arg;
1619 std_node_state_decl();
1621 if (evt != OCS_EVT_FCP_CMD_RCVD) {
1627 node->fcp_enabled = TRUE;
1629 device_printf(ocs->dev, "[%s] found (initiator) WWPN %s WWNN %s\n", node->display_name,
1630 node->wwpn, node->wwnn);
1631 if (node->sport->enable_tgt)
1632 ocs_scsi_new_initiator(node);
1635 device_printf(ocs->dev, "[%s] found (target) WWPN %s WWNN %s\n", node->display_name,
1636 node->wwpn, node->wwnn);
1637 if (node->sport->enable_ini)
1638 ocs_scsi_new_target(node);
1643 node->fcp_enabled = FALSE;
1646 case OCS_EVT_PLOGI_RCVD: {
1647 /* sm: save sparams, set send_plogi_acc, post implicit logout
1648 * Save plogi parameters */
1649 ocs_node_save_sparms(node, cbdata->payload->dma.virt);
1650 ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PLOGI);
1652 /* Restart node attach with new service parameters, and send ACC */
1653 ocs_node_post_event(node, OCS_EVT_SHUTDOWN_IMPLICIT_LOGO, NULL);
1658 case OCS_EVT_PDISC_RCVD: {
1659 fc_header_t *hdr = cbdata->header->dma.virt;
1660 ocs_send_plogi_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
1664 case OCS_EVT_PRLI_RCVD: {
1665 /* T, I+T: remote initiator is slow to get started */
1666 fc_header_t *hdr = cbdata->header->dma.virt;
1668 ocs_process_prli_payload(node, cbdata->payload->dma.virt);
1670 /* sm: send PRLI acc/reject */
1671 if (ocs->fc_type == node->fc_type)
1672 ocs_send_prli_acc(cbdata->io, ocs_be16toh(hdr->ox_id), ocs->fc_type, NULL, NULL);
1674 ocs_send_ls_rjt(cbdata->io, ocs_be16toh(hdr->ox_id), FC_REASON_UNABLE_TO_PERFORM,
1675 FC_EXPL_REQUEST_NOT_SUPPORTED, 0, NULL, NULL);
1679 case OCS_EVT_PRLO_RCVD: {
1680 fc_header_t *hdr = cbdata->header->dma.virt;
1681 fc_prlo_payload_t *prlo = cbdata->payload->dma.virt;
1683 /* sm: send PRLO acc/reject */
1684 if (ocs->fc_type == prlo->type)
1685 ocs_send_prlo_acc(cbdata->io, ocs_be16toh(hdr->ox_id), ocs->fc_type, NULL, NULL);
1687 ocs_send_ls_rjt(cbdata->io, ocs_be16toh(hdr->ox_id), FC_REASON_UNABLE_TO_PERFORM,
1688 FC_EXPL_REQUEST_NOT_SUPPORTED, 0, NULL, NULL);
1689 /*TODO: need implicit logout */
1693 case OCS_EVT_LOGO_RCVD: {
1694 fc_header_t *hdr = cbdata->header->dma.virt;
1695 node_printf(node, "%s received attached=%d\n", ocs_sm_event_name(evt), node->attached);
1696 ocs_send_logo_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
1697 ocs_node_transition(node, __ocs_d_wait_logo_acc_cmpl, NULL);
1701 case OCS_EVT_ADISC_RCVD: {
1702 fc_header_t *hdr = cbdata->header->dma.virt;
1703 ocs_send_adisc_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
1707 case OCS_EVT_RRQ_RCVD: {
1708 fc_header_t *hdr = cbdata->header->dma.virt;
1710 ocs_send_ls_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
1714 case OCS_EVT_ABTS_RCVD:
1715 ocs_process_abts(cbdata->io, cbdata->header->dma.virt);
1718 case OCS_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
1721 case OCS_EVT_NODE_REFOUND:
1724 case OCS_EVT_NODE_MISSING:
1725 if (node->sport->enable_rscn) {
1726 ocs_node_transition(node, __ocs_d_device_gone, NULL);
1730 case OCS_EVT_SRRS_ELS_CMPL_OK:
1731 /* T, or I+T, PRLI accept completed ok */
1732 ocs_assert(node->els_cmpl_cnt, NULL);
1733 node->els_cmpl_cnt--;
1736 case OCS_EVT_SRRS_ELS_CMPL_FAIL:
1737 /* T, or I+T, PRLI accept failed to complete */
1738 ocs_assert(node->els_cmpl_cnt, NULL);
1739 node->els_cmpl_cnt--;
1740 node_printf(node, "Failed to send PRLI LS_ACC\n");
1744 __ocs_d_common(__func__, ctx, evt, arg);
1752 * @ingroup device_sm
1753 * @brief Device node state machine: Node is gone (absent from GID_PT).
1755 * <h3 class="desc">Description</h3>
1756 * State entered when a node is detected as being gone (absent from GID_PT).
1758 * @param ctx Remote node state machine context.
1759 * @param evt Event to process
1760 * @param arg Per event optional argument
1762 * @return Returns NULL.
1766 __ocs_d_device_gone(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1768 int32_t rc = OCS_SCSI_CALL_COMPLETE;
1769 int32_t rc_2 = OCS_SCSI_CALL_COMPLETE;
1770 ocs_node_cb_t *cbdata = arg;
1771 std_node_state_decl();
1776 case OCS_EVT_ENTER: {
1777 const char *labels[] = {"none", "initiator", "target", "initiator+target"};
1779 device_printf(ocs->dev, "[%s] missing (%s) WWPN %s WWNN %s\n", node->display_name,
1780 labels[(node->targ << 1) | (node->init)], node->wwpn, node->wwnn);
1782 switch(ocs_node_get_enable(node)) {
1783 case OCS_NODE_ENABLE_T_TO_T:
1784 case OCS_NODE_ENABLE_I_TO_T:
1785 case OCS_NODE_ENABLE_IT_TO_T:
1786 rc = ocs_scsi_del_target(node, OCS_SCSI_TARGET_MISSING);
1789 case OCS_NODE_ENABLE_T_TO_I:
1790 case OCS_NODE_ENABLE_I_TO_I:
1791 case OCS_NODE_ENABLE_IT_TO_I:
1792 rc = ocs_scsi_del_initiator(node, OCS_SCSI_INITIATOR_MISSING);
1795 case OCS_NODE_ENABLE_T_TO_IT:
1796 rc = ocs_scsi_del_initiator(node, OCS_SCSI_INITIATOR_MISSING);
1799 case OCS_NODE_ENABLE_I_TO_IT:
1800 rc = ocs_scsi_del_target(node, OCS_SCSI_TARGET_MISSING);
1803 case OCS_NODE_ENABLE_IT_TO_IT:
1804 rc = ocs_scsi_del_initiator(node, OCS_SCSI_INITIATOR_MISSING);
1805 rc_2 = ocs_scsi_del_target(node, OCS_SCSI_TARGET_MISSING);
1809 rc = OCS_SCSI_CALL_COMPLETE;
1814 if ((rc == OCS_SCSI_CALL_COMPLETE) && (rc_2 == OCS_SCSI_CALL_COMPLETE)) {
1815 ocs_node_post_event(node, OCS_EVT_SHUTDOWN, NULL);
1820 case OCS_EVT_NODE_REFOUND:
1821 /* two approaches, reauthenticate with PLOGI/PRLI, or ADISC */
1823 /* reauthenticate with PLOGI/PRLI */
1824 /* ocs_node_transition(node, __ocs_d_discovered, NULL); */
1826 /* reauthenticate with ADISC
1828 ocs_send_adisc(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
1829 ocs_node_transition(node, __ocs_d_wait_adisc_rsp, NULL);
1832 case OCS_EVT_PLOGI_RCVD: {
1833 /* sm: save sparams, set send_plogi_acc, post implicit logout
1834 * Save plogi parameters */
1835 ocs_node_save_sparms(node, cbdata->payload->dma.virt);
1836 ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PLOGI);
1838 /* Restart node attach with new service parameters, and send ACC */
1839 ocs_node_post_event(node, OCS_EVT_SHUTDOWN_IMPLICIT_LOGO, NULL);
1843 case OCS_EVT_FCP_CMD_RCVD: {
1844 /* most likely a stale frame (received prior to link down), if attempt
1845 * to send LOGO, will probably timeout and eat up 20s; thus, drop FCP_CMND
1847 node_printf(node, "FCP_CMND received, drop\n");
1850 case OCS_EVT_LOGO_RCVD: {
1852 fc_header_t *hdr = cbdata->header->dma.virt;
1853 node_printf(node, "%s received attached=%d\n", ocs_sm_event_name(evt), node->attached);
1854 /* sm: send LOGO acc */
1855 ocs_send_logo_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
1856 ocs_node_transition(node, __ocs_d_wait_logo_acc_cmpl, NULL);
1860 __ocs_d_common(__func__, ctx, evt, arg);
1868 * @ingroup device_sm
1869 * @brief Device node state machine: Wait for the ADISC response.
1871 * <h3 class="desc">Description</h3>
1872 * Waits for the ADISC response from the remote node.
1874 * @param ctx Remote node state machine context.
1875 * @param evt Event to process.
1876 * @param arg Per event optional argument.
1878 * @return Returns NULL.
1882 __ocs_d_wait_adisc_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1884 ocs_node_cb_t *cbdata = arg;
1885 std_node_state_decl();
1890 case OCS_EVT_SRRS_ELS_REQ_OK:
1891 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_ADISC, __ocs_d_common, __func__)) {
1894 ocs_assert(node->els_req_cnt, NULL);
1895 node->els_req_cnt--;
1896 ocs_node_transition(node, __ocs_d_device_ready, NULL);
1899 case OCS_EVT_SRRS_ELS_REQ_RJT:
1900 /* received an LS_RJT, in this case, send shutdown (explicit logo)
1901 * event which will unregister the node, and start over with PLOGI
1903 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_ADISC, __ocs_d_common, __func__)) {
1906 ocs_assert(node->els_req_cnt, NULL);
1907 node->els_req_cnt--;
1908 /*sm: post explicit logout */
1909 ocs_node_post_event(node, OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
1912 case OCS_EVT_LOGO_RCVD: {
1913 /* In this case, we have the equivalent of an LS_RJT for the ADISC,
1914 * so we need to abort the ADISC, and re-login with PLOGI
1916 /*sm: request abort, send LOGO acc */
1917 fc_header_t *hdr = cbdata->header->dma.virt;
1918 node_printf(node, "%s received attached=%d\n", ocs_sm_event_name(evt), node->attached);
1919 ocs_send_logo_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
1920 ocs_node_transition(node, __ocs_d_wait_logo_acc_cmpl, NULL);
1924 __ocs_d_common(__func__, ctx, evt, arg);