2 * Copyright (c) 2017 Broadcom. All rights reserved.
3 * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
15 * 3. Neither the name of the copyright holder nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
37 * This file implements remote node state machines for:
39 * - Fabric controller events.
40 * - Name/directory services interaction.
41 * - Point-to-point logins.
45 @defgroup fabric_sm Node State Machine: Fabric States
46 @defgroup ns_sm Node State Machine: Name/Directory Services States
47 @defgroup p2p_sm Node State Machine: Point-to-Point Node States
51 #include "ocs_fabric.h"
53 #include "ocs_device.h"
55 static void ocs_fabric_initiate_shutdown(ocs_node_t *node);
56 static void * __ocs_fabric_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
57 static int32_t ocs_start_ns_node(ocs_sport_t *sport);
58 static int32_t ocs_start_fabctl_node(ocs_sport_t *sport);
59 static int32_t ocs_process_gidpt_payload(ocs_node_t *node, fcct_gidpt_acc_t *gidpt, uint32_t gidpt_len);
60 static void ocs_process_rscn(ocs_node_t *node, ocs_node_cb_t *cbdata);
61 static uint64_t ocs_get_wwpn(fc_plogi_payload_t *sp);
62 static void gidpt_delay_timer_cb(void *arg);
66 * @brief Fabric node state machine: Initial state.
69 * Send an FLOGI to a well-known fabric.
71 * @param ctx Remote node sm context.
72 * @param evt Event to process.
73 * @param arg Per event optional argument.
75 * @return Returns NULL.
78 __ocs_fabric_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
80 std_node_state_decl();
85 case OCS_EVT_REENTER: /* not sure why we're getting these ... */
86 ocs_log_debug(node->ocs, ">>> reenter !!\n");
89 /* sm: / send FLOGI */
90 ocs_send_flogi(node, OCS_FC_FLOGI_TIMEOUT_SEC, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
91 ocs_node_transition(node, __ocs_fabric_flogi_wait_rsp, NULL);
95 __ocs_fabric_common(__func__, ctx, evt, arg);
104 * @brief Set sport topology.
107 * Set sport topology.
109 * @param node Pointer to the node for which the topology is set.
110 * @param topology Topology to set.
112 * @return Returns NULL.
115 ocs_fabric_set_topology(ocs_node_t *node, ocs_sport_topology_e topology)
117 node->sport->topology = topology;
122 * @brief Notify sport topology.
124 * notify sport topology.
125 * @param node Pointer to the node for which the topology is set.
126 * @return Returns NULL.
129 ocs_fabric_notify_topology(ocs_node_t *node)
131 ocs_node_t *tmp_node;
133 ocs_sport_topology_e topology = node->sport->topology;
135 /* now loop through the nodes in the sport and send topology notification */
136 ocs_sport_lock(node->sport);
137 ocs_list_foreach_safe(&node->sport->node_list, tmp_node, next) {
138 if (tmp_node != node) {
139 ocs_node_post_event(tmp_node, OCS_EVT_SPORT_TOPOLOGY_NOTIFY, (void *)topology);
142 ocs_sport_unlock(node->sport);
147 * @brief Fabric node state machine: Wait for an FLOGI response.
150 * Wait for an FLOGI response event.
152 * @param ctx Remote node state machine context.
153 * @param evt Event to process.
154 * @param arg Per event optional argument.
156 * @return Returns NULL.
160 __ocs_fabric_flogi_wait_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
162 ocs_node_cb_t *cbdata = arg;
163 std_node_state_decl();
168 case OCS_EVT_SRRS_ELS_REQ_OK: {
170 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_FLOGI, __ocs_fabric_common, __func__)) {
173 ocs_assert(node->els_req_cnt, NULL);
176 ocs_domain_save_sparms(node->sport->domain, cbdata->els->els_rsp.virt);
178 ocs_display_sparams(node->display_name, "flogi rcvd resp", 0, NULL,
179 ((uint8_t*)cbdata->els->els_rsp.virt) + 4);
181 /* Check to see if the fabric is an F_PORT or and N_PORT */
182 if (ocs_rnode_is_nport(cbdata->els->els_rsp.virt)) {
183 /* sm: if nport and p2p_winner / ocs_domain_attach */
184 ocs_fabric_set_topology(node, OCS_SPORT_TOPOLOGY_P2P);
185 if (ocs_p2p_setup(node->sport)) {
186 node_printf(node, "p2p setup failed, shutting down node\n");
187 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
188 ocs_fabric_initiate_shutdown(node);
190 if (node->sport->p2p_winner) {
191 ocs_node_transition(node, __ocs_p2p_wait_domain_attach, NULL);
192 if (!node->sport->domain->attached) {
193 node_printf(node, "p2p winner, domain not attached\n");
194 ocs_domain_attach(node->sport->domain, node->sport->p2p_port_id);
196 /* already attached, just send ATTACH_OK */
197 node_printf(node, "p2p winner, domain already attached\n");
198 ocs_node_post_event(node, OCS_EVT_DOMAIN_ATTACH_OK, NULL);
201 /* peer is p2p winner; PLOGI will be received on the
202 * remote SID=1 node; this node has served its purpose
204 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
205 ocs_fabric_initiate_shutdown(node);
209 /* sm: if not nport / ocs_domain_attach */
210 /* ext_status has the fc_id, attach domain */
211 ocs_fabric_set_topology(node, OCS_SPORT_TOPOLOGY_FABRIC);
212 ocs_fabric_notify_topology(node);
213 ocs_assert(!node->sport->domain->attached, NULL);
214 ocs_domain_attach(node->sport->domain, cbdata->ext_status);
215 ocs_node_transition(node, __ocs_fabric_wait_domain_attach, NULL);
221 case OCS_EVT_ELS_REQ_ABORTED:
222 case OCS_EVT_SRRS_ELS_REQ_RJT:
223 case OCS_EVT_SRRS_ELS_REQ_FAIL: {
224 ocs_sport_t *sport = node->sport;
226 * with these errors, we have no recovery, so shutdown the sport, leave the link
227 * up and the domain ready
229 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_FLOGI, __ocs_fabric_common, __func__)) {
232 node_printf(node, "FLOGI failed evt=%s, shutting down sport [%s]\n", ocs_sm_event_name(evt),
233 sport->display_name);
234 ocs_assert(node->els_req_cnt, NULL);
236 ocs_sm_post_event(&sport->sm, OCS_EVT_SHUTDOWN, NULL);
241 __ocs_fabric_common(__func__, ctx, evt, arg);
250 * @brief Fabric node state machine: Initial state for a virtual port.
253 * State entered when a virtual port is created. Send FDISC.
255 * @param ctx Remote node state machine context.
256 * @param evt Event to process.
257 * @param arg Per event optional argument.
259 * @return Returns NULL.
262 __ocs_vport_fabric_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
264 std_node_state_decl();
271 ocs_send_fdisc(node, OCS_FC_FLOGI_TIMEOUT_SEC, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
272 ocs_node_transition(node, __ocs_fabric_fdisc_wait_rsp, NULL);
276 __ocs_fabric_common(__func__, ctx, evt, arg);
285 * @brief Fabric node state machine: Wait for an FDISC response
288 * Used for a virtual port. Waits for an FDISC response. If OK, issue a HW port attach.
290 * @param ctx Remote node state machine context.
291 * @param evt Event to process.
292 * @param arg Per event optional argument.
294 * @return Returns NULL.
297 __ocs_fabric_fdisc_wait_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
299 ocs_node_cb_t *cbdata = arg;
300 std_node_state_decl();
305 case OCS_EVT_SRRS_ELS_REQ_OK: {
306 /* fc_id is in ext_status */
307 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_FDISC, __ocs_fabric_common, __func__)) {
311 ocs_display_sparams(node->display_name, "fdisc rcvd resp", 0, NULL,
312 ((uint8_t*)cbdata->els->els_rsp.virt) + 4);
314 ocs_assert(node->els_req_cnt, NULL);
316 /* sm: ocs_sport_attach */
317 ocs_sport_attach(node->sport, cbdata->ext_status);
318 ocs_node_transition(node, __ocs_fabric_wait_domain_attach, NULL);
323 case OCS_EVT_SRRS_ELS_REQ_RJT:
324 case OCS_EVT_SRRS_ELS_REQ_FAIL: {
325 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_FDISC, __ocs_fabric_common, __func__)) {
328 ocs_assert(node->els_req_cnt, NULL);
330 ocs_log_err(ocs, "FDISC failed, shutting down sport\n");
331 /* sm: shutdown sport */
332 ocs_sm_post_event(&node->sport->sm, OCS_EVT_SHUTDOWN, NULL);
337 __ocs_fabric_common(__func__, ctx, evt, arg);
346 * @brief Fabric node state machine: Wait for a domain/sport attach event.
349 * Waits for a domain/sport attach event.
351 * @param ctx Remote node state machine context.
352 * @param evt Event to process.
353 * @param arg Per event optional argument.
355 * @return Returns NULL.
358 __ocs_fabric_wait_domain_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
360 std_node_state_decl();
366 ocs_node_hold_frames(node);
370 ocs_node_accept_frames(node);
372 case OCS_EVT_DOMAIN_ATTACH_OK:
373 case OCS_EVT_SPORT_ATTACH_OK: {
376 rc = ocs_start_ns_node(node->sport);
380 /* sm: if enable_ini / start fabctl node
381 * Instantiate the fabric controller (sends SCR) */
382 if (node->sport->enable_rscn) {
383 rc = ocs_start_fabctl_node(node->sport);
387 ocs_node_transition(node, __ocs_fabric_idle, NULL);
391 __ocs_fabric_common(__func__, ctx, evt, arg);
400 * @brief Fabric node state machine: Fabric node is idle.
403 * Wait for fabric node events.
405 * @param ctx Remote node state machine context.
406 * @param evt Event to process.
407 * @param arg Per event optional argument.
409 * @return Returns NULL.
412 __ocs_fabric_idle(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
414 std_node_state_decl();
419 case OCS_EVT_DOMAIN_ATTACH_OK:
422 __ocs_fabric_common(__func__, ctx, evt, arg);
431 * @brief Name services node state machine: Initialize.
434 * A PLOGI is sent to the well-known name/directory services node.
436 * @param ctx Remote node state machine context.
437 * @param evt Event to process.
438 * @param arg Per event optional argument.
440 * @return Returns NULL.
443 __ocs_ns_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
445 std_node_state_decl();
452 ocs_send_plogi(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
453 ocs_node_transition(node, __ocs_ns_plogi_wait_rsp, NULL);
456 __ocs_fabric_common(__func__, ctx, evt, arg);
465 * @brief Name services node state machine: Wait for a PLOGI response.
468 * Waits for a response from PLOGI to name services node, then issues a
469 * node attach request to the HW.
471 * @param ctx Remote node state machine context.
472 * @param evt Event to process.
473 * @param arg Per event optional argument.
475 * @return Returns NULL.
478 __ocs_ns_plogi_wait_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
481 ocs_node_cb_t *cbdata = arg;
482 std_node_state_decl();
487 case OCS_EVT_SRRS_ELS_REQ_OK: {
488 /* Save service parameters */
489 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_fabric_common, __func__)) {
492 ocs_assert(node->els_req_cnt, NULL);
494 /* sm: save sparams, ocs_node_attach */
495 ocs_node_save_sparms(node, cbdata->els->els_rsp.virt);
496 ocs_display_sparams(node->display_name, "plogi rcvd resp", 0, NULL,
497 ((uint8_t*)cbdata->els->els_rsp.virt) + 4);
498 rc = ocs_node_attach(node);
499 ocs_node_transition(node, __ocs_ns_wait_node_attach, NULL);
500 if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
501 ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
506 __ocs_fabric_common(__func__, ctx, evt, arg);
515 * @brief Name services node state machine: Wait for a node attach completion.
518 * Waits for a node attach completion, then issues an RFTID name services
521 * @param ctx Remote node state machine context.
522 * @param evt Event to process.
523 * @param arg Per event optional argument.
525 * @return Returns NULL.
528 __ocs_ns_wait_node_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
530 std_node_state_decl();
536 ocs_node_hold_frames(node);
540 ocs_node_accept_frames(node);
543 case OCS_EVT_NODE_ATTACH_OK:
544 node->attached = TRUE;
546 ocs_ns_send_rftid(node, OCS_FC_ELS_CT_SEND_DEFAULT_TIMEOUT,
547 OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
548 ocs_node_transition(node, __ocs_ns_rftid_wait_rsp, NULL);
551 case OCS_EVT_NODE_ATTACH_FAIL:
552 /* node attach failed, shutdown the node */
553 node->attached = FALSE;
554 node_printf(node, "Node attach failed\n");
555 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
556 ocs_fabric_initiate_shutdown(node);
559 case OCS_EVT_SHUTDOWN:
560 node_printf(node, "Shutdown event received\n");
561 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
562 ocs_node_transition(node, __ocs_fabric_wait_attach_evt_shutdown, NULL);
565 /* if receive RSCN just ignore,
566 * we haven't sent GID_PT yet (ACC sent by fabctl node) */
567 case OCS_EVT_RSCN_RCVD:
571 __ocs_fabric_common(__func__, ctx, evt, arg);
580 * @brief Wait for a domain/sport/node attach completion, then
584 * Waits for a domain/sport/node attach completion, then shuts
587 * @param ctx Remote node state machine context.
588 * @param evt Event to process.
589 * @param arg Per event optional argument.
591 * @return Returns NULL.
594 __ocs_fabric_wait_attach_evt_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
596 std_node_state_decl();
602 ocs_node_hold_frames(node);
606 ocs_node_accept_frames(node);
609 /* wait for any of these attach events and then shutdown */
610 case OCS_EVT_NODE_ATTACH_OK:
611 node->attached = TRUE;
612 node_printf(node, "Attach evt=%s, proceed to shutdown\n", ocs_sm_event_name(evt));
613 ocs_fabric_initiate_shutdown(node);
616 case OCS_EVT_NODE_ATTACH_FAIL:
617 node->attached = FALSE;
618 node_printf(node, "Attach evt=%s, proceed to shutdown\n", ocs_sm_event_name(evt));
619 ocs_fabric_initiate_shutdown(node);
622 /* ignore shutdown event as we're already in shutdown path */
623 case OCS_EVT_SHUTDOWN:
624 node_printf(node, "Shutdown event received\n");
628 __ocs_fabric_common(__func__, ctx, evt, arg);
637 * @brief Name services node state machine: Wait for an RFTID response event.
640 * Waits for an RFTID response event; if configured for an initiator operation,
641 * a GIDPT name services request is issued.
643 * @param ctx Remote node state machine context.
644 * @param evt Event to process.
645 * @param arg Per event optional argument.
647 * @return Returns NULL.
650 __ocs_ns_rftid_wait_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
652 std_node_state_decl();
657 case OCS_EVT_SRRS_ELS_REQ_OK:
658 if (node_check_ns_req(ctx, evt, arg, FC_GS_NAMESERVER_RFT_ID, __ocs_fabric_common, __func__)) {
661 ocs_assert(node->els_req_cnt, NULL);
664 ocs_ns_send_rffid(node, OCS_FC_ELS_CT_SEND_DEFAULT_TIMEOUT,
665 OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
666 ocs_node_transition(node, __ocs_ns_rffid_wait_rsp, NULL);
669 /* if receive RSCN just ignore,
670 * we haven't sent GID_PT yet (ACC sent by fabctl node) */
671 case OCS_EVT_RSCN_RCVD:
675 __ocs_fabric_common(__func__, ctx, evt, arg);
684 * @brief Fabric node state machine: Wait for RFFID response event.
687 * Waits for an RFFID response event; if configured for an initiator operation,
688 * a GIDPT name services request is issued.
690 * @param ctx Remote node state machine context.
691 * @param evt Event to process.
692 * @param arg Per event optional argument.
694 * @return Returns NULL.
697 __ocs_ns_rffid_wait_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
699 std_node_state_decl();
704 case OCS_EVT_SRRS_ELS_REQ_OK: {
705 if (node_check_ns_req(ctx, evt, arg, FC_GS_NAMESERVER_RFF_ID, __ocs_fabric_common, __func__)) {
708 ocs_assert(node->els_req_cnt, NULL);
710 if (node->sport->enable_rscn) {
711 /* sm: if enable_rscn / send GIDPT */
712 ocs_ns_send_gidpt(node, OCS_FC_ELS_CT_SEND_DEFAULT_TIMEOUT,
713 OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
714 ocs_node_transition(node, __ocs_ns_gidpt_wait_rsp, NULL);
716 /* if 'T' only, we're done, go to idle */
717 ocs_node_transition(node, __ocs_ns_idle, NULL);
721 /* if receive RSCN just ignore,
722 * we haven't sent GID_PT yet (ACC sent by fabctl node) */
723 case OCS_EVT_RSCN_RCVD:
727 __ocs_fabric_common(__func__, ctx, evt, arg);
736 * @brief Name services node state machine: Wait for a GIDPT response.
739 * Wait for a GIDPT response from the name server. Process the FC_IDs that are
740 * reported by creating new remote ports, as needed.
742 * @param ctx Remote node state machine context.
743 * @param evt Event to process.
744 * @param arg Per event optional argument.
746 * @return Returns NULL.
749 __ocs_ns_gidpt_wait_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
751 ocs_node_cb_t *cbdata = arg;
752 std_node_state_decl();
757 case OCS_EVT_SRRS_ELS_REQ_OK: {
758 if (node_check_ns_req(ctx, evt, arg, FC_GS_NAMESERVER_GID_PT, __ocs_fabric_common, __func__)) {
761 ocs_assert(node->els_req_cnt, NULL);
763 /* sm: / process GIDPT payload */
764 ocs_process_gidpt_payload(node, cbdata->els->els_rsp.virt, cbdata->els->els_rsp.len);
765 /* TODO: should we logout at this point or just go idle */
766 ocs_node_transition(node, __ocs_ns_idle, NULL);
770 case OCS_EVT_SRRS_ELS_REQ_FAIL: {
771 /* not much we can do; will retry with the next RSCN */
772 node_printf(node, "GID_PT failed to complete\n");
773 ocs_assert(node->els_req_cnt, NULL);
775 ocs_node_transition(node, __ocs_ns_idle, NULL);
779 /* if receive RSCN here, queue up another discovery processing */
780 case OCS_EVT_RSCN_RCVD: {
781 node_printf(node, "RSCN received during GID_PT processing\n");
782 node->rscn_pending = 1;
787 __ocs_fabric_common(__func__, ctx, evt, arg);
797 * @brief Name services node state machine: Idle state.
800 * Idle. Waiting for RSCN received events (posted from the fabric controller), and
801 * restarts the GIDPT name services query and processing.
803 * @param ctx Remote node state machine context.
804 * @param evt Event to process.
805 * @param arg Per event optional argument.
807 * @return Returns NULL.
810 __ocs_ns_idle(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
812 std_node_state_decl();
818 if (!node->rscn_pending) {
821 node_printf(node, "RSCN pending, restart discovery\n");
822 node->rscn_pending = 0;
826 case OCS_EVT_RSCN_RCVD: {
828 * If target RSCN processing is enabled, and this is target only
829 * (not initiator), and tgt_rscn_delay is non-zero,
830 * then we delay issuing the GID_PT
832 if ((ocs->tgt_rscn_delay_msec != 0) && !node->sport->enable_ini && node->sport->enable_tgt &&
833 enable_target_rscn(ocs)) {
834 ocs_node_transition(node, __ocs_ns_gidpt_delay, NULL);
836 ocs_ns_send_gidpt(node, OCS_FC_ELS_CT_SEND_DEFAULT_TIMEOUT,
837 OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
838 ocs_node_transition(node, __ocs_ns_gidpt_wait_rsp, NULL);
844 __ocs_fabric_common(__func__, ctx, evt, arg);
852 * @brief Handle GIDPT delay timer callback
855 * Post an OCS_EVT_GIDPT_DEIALY_EXPIRED event to the passed in node.
857 * @param arg Pointer to node.
862 gidpt_delay_timer_cb(void *arg)
864 ocs_node_t *node = arg;
867 ocs_del_timer(&node->gidpt_delay_timer);
868 rc = ocs_xport_control(node->ocs->xport, OCS_XPORT_POST_NODE_EVENT, node, OCS_EVT_GIDPT_DELAY_EXPIRED, NULL);
870 ocs_log_err(node->ocs, "ocs_xport_control(OCS_XPORT_POST_NODE_EVENT) failed: %d\n", rc);
876 * @brief Name services node state machine: Delayed GIDPT.
879 * Waiting for GIDPT delay to expire before submitting GIDPT to name server.
881 * @param ctx Remote node state machine context.
882 * @param evt Event to process.
883 * @param arg Per event optional argument.
885 * @return Returns NULL.
888 __ocs_ns_gidpt_delay(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
890 std_node_state_decl();
895 case OCS_EVT_ENTER: {
898 ocs_assert(ocs->tgt_rscn_delay_msec != 0, NULL);
901 * Compute the delay time. Set to tgt_rscn_delay, if the time since last GIDPT
902 * is less than tgt_rscn_period, then use tgt_rscn_period.
904 delay_msec = ocs->tgt_rscn_delay_msec;
905 if ((ocs_msectime() - node->time_last_gidpt_msec) < ocs->tgt_rscn_period_msec) {
906 delay_msec = ocs->tgt_rscn_period_msec;
909 ocs_setup_timer(ocs, &node->gidpt_delay_timer, gidpt_delay_timer_cb, node, delay_msec);
914 case OCS_EVT_GIDPT_DELAY_EXPIRED:
915 node->time_last_gidpt_msec = ocs_msectime();
916 ocs_ns_send_gidpt(node, OCS_FC_ELS_CT_SEND_DEFAULT_TIMEOUT,
917 OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
918 ocs_node_transition(node, __ocs_ns_gidpt_wait_rsp, NULL);
921 case OCS_EVT_RSCN_RCVD: {
922 ocs_log_debug(ocs, "RSCN received while in GIDPT delay - no action\n");
927 __ocs_fabric_common(__func__, ctx, evt, arg);
936 * @brief Fabric controller node state machine: Initial state.
939 * Issue a PLOGI to a well-known fabric controller address.
941 * @param ctx Remote node state machine context.
942 * @param evt Event to process.
943 * @param arg Per event optional argument.
945 * @return Returns NULL.
948 __ocs_fabctl_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
950 ocs_node_t *node = ctx->app;
956 /* no need to login to fabric controller, just send SCR */
957 ocs_send_scr(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
958 ocs_node_transition(node, __ocs_fabctl_wait_scr_rsp, NULL);
961 case OCS_EVT_NODE_ATTACH_OK:
962 node->attached = TRUE;
966 __ocs_fabric_common(__func__, ctx, evt, arg);
975 * @brief Fabric controller node state machine: Wait for a node attach request
979 * Wait for a node attach to complete. If successful, issue an SCR
980 * to the fabric controller, subscribing to all RSCN.
982 * @param ctx Remote node state machine context.
983 * @param evt Event to process.
984 * @param arg Per event optional argument.
986 * @return Returns NULL.
990 __ocs_fabctl_wait_node_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
992 std_node_state_decl();
998 ocs_node_hold_frames(node);
1002 ocs_node_accept_frames(node);
1005 case OCS_EVT_NODE_ATTACH_OK:
1006 node->attached = TRUE;
1007 /* sm: / send SCR */
1008 ocs_send_scr(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
1009 ocs_node_transition(node, __ocs_fabctl_wait_scr_rsp, NULL);
1012 case OCS_EVT_NODE_ATTACH_FAIL:
1013 /* node attach failed, shutdown the node */
1014 node->attached = FALSE;
1015 node_printf(node, "Node attach failed\n");
1016 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1017 ocs_fabric_initiate_shutdown(node);
1020 case OCS_EVT_SHUTDOWN:
1021 node_printf(node, "Shutdown event received\n");
1022 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1023 ocs_node_transition(node, __ocs_fabric_wait_attach_evt_shutdown, NULL);
1027 __ocs_fabric_common(__func__, ctx, evt, arg);
1035 * @ingroup fabric_sm
1036 * @brief Fabric controller node state machine: Wait for an SCR response from the
1037 * fabric controller.
1040 * Waits for an SCR response from the fabric controller.
1042 * @param ctx Remote node state machine context.
1043 * @param evt Event to process.
1044 * @param arg Per event optional argument.
1046 * @return Returns NULL.
1049 __ocs_fabctl_wait_scr_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1051 std_node_state_decl();
1056 case OCS_EVT_SRRS_ELS_REQ_OK:
1057 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_SCR, __ocs_fabric_common, __func__)) {
1060 ocs_assert(node->els_req_cnt, NULL);
1061 node->els_req_cnt--;
1062 ocs_node_transition(node, __ocs_fabctl_ready, NULL);
1066 __ocs_fabric_common(__func__, ctx, evt, arg);
1074 * @ingroup fabric_sm
1075 * @brief Fabric controller node state machine: Ready.
1078 * In this state, the fabric controller sends a RSCN, which is received
1079 * by this node and is forwarded to the name services node object; and
1080 * the RSCN LS_ACC is sent.
1082 * @param ctx Remote node state machine context.
1083 * @param evt Event to process.
1084 * @param arg Per event optional argument.
1086 * @return Returns NULL.
1090 __ocs_fabctl_ready(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1092 ocs_node_cb_t *cbdata = arg;
1093 std_node_state_decl();
1098 case OCS_EVT_RSCN_RCVD: {
1099 fc_header_t *hdr = cbdata->header->dma.virt;
1101 /* sm: / process RSCN (forward to name services node),
1103 ocs_process_rscn(node, cbdata);
1104 ocs_send_ls_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
1105 ocs_node_transition(node, __ocs_fabctl_wait_ls_acc_cmpl, NULL);
1110 __ocs_fabric_common(__func__, ctx, evt, arg);
1118 * @ingroup fabric_sm
1119 * @brief Fabric controller node state machine: Wait for LS_ACC.
1122 * Waits for the LS_ACC from the fabric controller.
1124 * @param ctx Remote node state machine context.
1125 * @param evt Event to process.
1126 * @param arg Per event optional argument.
1128 * @return Returns NULL.
1132 __ocs_fabctl_wait_ls_acc_cmpl(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1134 std_node_state_decl();
1140 ocs_node_hold_frames(node);
1144 ocs_node_accept_frames(node);
1147 case OCS_EVT_SRRS_ELS_CMPL_OK:
1148 ocs_assert(node->els_cmpl_cnt, NULL);
1149 node->els_cmpl_cnt--;
1150 ocs_node_transition(node, __ocs_fabctl_ready, NULL);
1154 __ocs_fabric_common(__func__, ctx, evt, arg);
1162 * @ingroup fabric_sm
1163 * @brief Initiate fabric node shutdown.
1165 * @param node Node for which shutdown is initiated.
1167 * @return Returns None.
1171 ocs_fabric_initiate_shutdown(ocs_node_t *node)
1174 ocs_t *ocs = node->ocs;
1175 ocs_scsi_io_alloc_disable(node);
1177 if (node->attached) {
1178 /* issue hw node free; don't care if succeeds right away
1179 * or sometime later, will check node->attached later in
1182 rc = ocs_hw_node_detach(&ocs->hw, &node->rnode);
1183 if (node->rnode.free_group) {
1184 ocs_remote_node_group_free(node->node_group);
1185 node->node_group = NULL;
1186 node->rnode.free_group = FALSE;
1188 if (rc != OCS_HW_RTN_SUCCESS && rc != OCS_HW_RTN_SUCCESS_SYNC) {
1189 node_printf(node, "Failed freeing HW node, rc=%d\n", rc);
1193 * node has either been detached or is in the process of being detached,
1194 * call common node's initiate cleanup function
1196 ocs_node_initiate_cleanup(node);
1200 * @ingroup fabric_sm
1201 * @brief Fabric node state machine: Handle the common fabric node events.
1203 * @param funcname Function name text.
1204 * @param ctx Remote node state machine context.
1205 * @param evt Event to process.
1206 * @param arg Per event optional argument.
1208 * @return Returns NULL.
1212 __ocs_fabric_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1214 ocs_node_t *node = NULL;
1215 ocs_assert(ctx, NULL);
1216 ocs_assert(ctx->app, NULL);
1220 case OCS_EVT_DOMAIN_ATTACH_OK:
1222 case OCS_EVT_SHUTDOWN:
1223 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1224 ocs_fabric_initiate_shutdown(node);
1228 /* call default event handler common to all nodes */
1229 __ocs_node_common(funcname, ctx, evt, arg);
1236 * @brief Return TRUE if the remote node is an NPORT.
1239 * Examines the service parameters. Returns TRUE if the node reports itself as
1242 * @param remote_sparms Remote node service parameters.
1244 * @return Returns TRUE if NPORT.
1248 ocs_rnode_is_nport(fc_plogi_payload_t *remote_sparms)
1250 return (ocs_be32toh(remote_sparms->common_service_parameters[1]) & (1U << 28)) == 0;
1254 * @brief Return the node's WWPN as an uint64_t.
1257 * The WWPN is computed from service parameters, and returned as a uint64_t.
1259 * @param sp Pointer to service parameters.
1261 * @return Returns WWPN.
1266 ocs_get_wwpn(fc_plogi_payload_t *sp)
1268 return (((uint64_t)ocs_be32toh(sp->port_name_hi) << 32ll) | (ocs_be32toh(sp->port_name_lo)));
1272 * @brief Return TRUE if the remote node is the point-to-point winner.
1275 * Compares WWPNs. Returns TRUE if the remote node's WWPN is numerically
1276 * higher than the local node's WWPN.
1278 * @param sport Pointer to the sport object.
1281 * - 0, if the remote node is the loser.
1282 * - 1, if the remote node is the winner.
1283 * - (-1), if remote node is neither the loser nor the winner
1288 ocs_rnode_is_winner(ocs_sport_t *sport)
1290 fc_plogi_payload_t *remote_sparms = (fc_plogi_payload_t*) sport->domain->flogi_service_params;
1291 uint64_t remote_wwpn = ocs_get_wwpn(remote_sparms);
1292 uint64_t local_wwpn = sport->wwpn;
1294 uint64_t wwn_bump = 0;
1296 if (ocs_get_property("wwn_bump", prop_buf, sizeof(prop_buf)) == 0) {
1297 wwn_bump = ocs_strtoull(prop_buf, 0, 0);
1299 local_wwpn ^= wwn_bump;
1301 remote_wwpn = ocs_get_wwpn(remote_sparms);
1303 ocs_log_debug(sport->ocs, "r: %08x %08x\n", ocs_be32toh(remote_sparms->port_name_hi), ocs_be32toh(remote_sparms->port_name_lo));
1304 ocs_log_debug(sport->ocs, "l: %08x %08x\n", (uint32_t) (local_wwpn >> 32ll), (uint32_t) local_wwpn);
1306 if (remote_wwpn == local_wwpn) {
1307 ocs_log_warn(sport->ocs, "WWPN of remote node [%08x %08x] matches local WWPN\n",
1308 (uint32_t) (local_wwpn >> 32ll), (uint32_t) local_wwpn);
1312 return (remote_wwpn > local_wwpn);
1317 * @brief Point-to-point state machine: Wait for the domain attach to complete.
1320 * Once the domain attach has completed, a PLOGI is sent (if we're the
1321 * winning point-to-point node).
1323 * @param ctx Remote node state machine context.
1324 * @param evt Event to process.
1325 * @param arg Per event optional argument.
1327 * @return Returns NULL.
1331 __ocs_p2p_wait_domain_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1333 std_node_state_decl();
1339 ocs_node_hold_frames(node);
1343 ocs_node_accept_frames(node);
1346 case OCS_EVT_DOMAIN_ATTACH_OK: {
1347 ocs_sport_t *sport = node->sport;
1350 /* this transient node (SID=0 (recv'd FLOGI) or DID=fabric (sent FLOGI))
1351 * is the p2p winner, will use a separate node to send PLOGI to peer
1353 ocs_assert (node->sport->p2p_winner, NULL);
1355 rnode = ocs_node_find(sport, node->sport->p2p_remote_port_id);
1356 if (rnode != NULL) {
1357 /* the "other" transient p2p node has already kicked off the
1358 * new node from which PLOGI is sent */
1359 node_printf(node, "Node with fc_id x%x already exists\n", rnode->rnode.fc_id);
1360 ocs_assert (rnode != node, NULL);
1362 /* create new node (SID=1, DID=2) from which to send PLOGI */
1363 rnode = ocs_node_alloc(sport, sport->p2p_remote_port_id, FALSE, FALSE);
1364 if (rnode == NULL) {
1365 ocs_log_err(ocs, "node alloc failed\n");
1369 ocs_fabric_notify_topology(node);
1370 /* sm: allocate p2p remote node */
1371 ocs_node_transition(rnode, __ocs_p2p_rnode_init, NULL);
1374 /* the transient node (SID=0 or DID=fabric) has served its purpose */
1375 if (node->rnode.fc_id == 0) {
1376 /* if this is the SID=0 node, move to the init state in case peer
1377 * has restarted FLOGI discovery and FLOGI is pending
1379 /* don't send PLOGI on ocs_d_init entry */
1380 ocs_node_init_device(node, FALSE);
1382 /* if this is the DID=fabric node (we initiated FLOGI), shut it down */
1383 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1384 ocs_fabric_initiate_shutdown(node);
1390 __ocs_fabric_common(__func__, ctx, evt, arg);
1399 * @brief Point-to-point state machine: Remote node initialization state.
1402 * This state is entered after winning point-to-point, and the remote node
1405 * @param ctx Remote node state machine context.
1406 * @param evt Event to process.
1407 * @param arg Per event optional argument.
1409 * @return Returns NULL.
1413 __ocs_p2p_rnode_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1415 ocs_node_cb_t *cbdata = arg;
1416 std_node_state_decl();
1422 /* sm: / send PLOGI */
1423 ocs_send_plogi(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
1424 ocs_node_transition(node, __ocs_p2p_wait_plogi_rsp, NULL);
1427 case OCS_EVT_ABTS_RCVD:
1428 /* sm: send BA_ACC */
1429 ocs_bls_send_acc_hdr(cbdata->io, cbdata->header->dma.virt);
1433 __ocs_fabric_common(__func__, ctx, evt, arg);
1442 * @brief Point-to-point node state machine: Wait for the FLOGI accept completion.
1445 * Wait for the FLOGI accept completion.
1447 * @param ctx Remote node state machine context.
1448 * @param evt Event to process.
1449 * @param arg Per event optional argument.
1451 * @return Returns NULL.
1455 __ocs_p2p_wait_flogi_acc_cmpl(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1457 ocs_node_cb_t *cbdata = arg;
1458 std_node_state_decl();
1464 ocs_node_hold_frames(node);
1468 ocs_node_accept_frames(node);
1471 case OCS_EVT_SRRS_ELS_CMPL_OK:
1472 ocs_assert(node->els_cmpl_cnt, NULL);
1473 node->els_cmpl_cnt--;
1475 /* sm: if p2p_winner / domain_attach */
1476 if (node->sport->p2p_winner) {
1477 ocs_node_transition(node, __ocs_p2p_wait_domain_attach, NULL);
1478 if (node->sport->domain->attached &&
1479 !(node->sport->domain->domain_notify_pend)) {
1480 node_printf(node, "Domain already attached\n");
1481 ocs_node_post_event(node, OCS_EVT_DOMAIN_ATTACH_OK, NULL);
1484 /* this node has served its purpose; we'll expect a PLOGI on a separate
1485 * node (remote SID=0x1); return this node to init state in case peer
1486 * restarts discovery -- it may already have (pending frames may exist).
1488 /* don't send PLOGI on ocs_d_init entry */
1489 ocs_node_init_device(node, FALSE);
1493 case OCS_EVT_SRRS_ELS_CMPL_FAIL:
1494 /* LS_ACC failed, possibly due to link down; shutdown node and wait
1495 * for FLOGI discovery to restart */
1496 node_printf(node, "FLOGI LS_ACC failed, shutting down\n");
1497 ocs_assert(node->els_cmpl_cnt, NULL);
1498 node->els_cmpl_cnt--;
1499 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1500 ocs_fabric_initiate_shutdown(node);
1503 case OCS_EVT_ABTS_RCVD: {
1504 /* sm: / send BA_ACC */
1505 ocs_bls_send_acc_hdr(cbdata->io, cbdata->header->dma.virt);
1510 __ocs_fabric_common(__func__, ctx, evt, arg);
1520 * @brief Point-to-point node state machine: Wait for a PLOGI response
1521 * as a point-to-point winner.
1524 * Wait for a PLOGI response from the remote node as a point-to-point winner.
1525 * Submit node attach request to the HW.
1527 * @param ctx Remote node state machine context.
1528 * @param evt Event to process.
1529 * @param arg Per event optional argument.
1531 * @return Returns NULL.
1535 __ocs_p2p_wait_plogi_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1538 ocs_node_cb_t *cbdata = arg;
1539 std_node_state_decl();
1544 case OCS_EVT_SRRS_ELS_REQ_OK: {
1545 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_fabric_common, __func__)) {
1548 ocs_assert(node->els_req_cnt, NULL);
1549 node->els_req_cnt--;
1550 /* sm: / save sparams, ocs_node_attach */
1551 ocs_node_save_sparms(node, cbdata->els->els_rsp.virt);
1552 rc = ocs_node_attach(node);
1553 ocs_node_transition(node, __ocs_p2p_wait_node_attach, NULL);
1554 if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
1555 ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
1559 case OCS_EVT_SRRS_ELS_REQ_FAIL: {
1560 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_fabric_common, __func__)) {
1563 node_printf(node, "PLOGI failed, shutting down\n");
1564 ocs_assert(node->els_req_cnt, NULL);
1565 node->els_req_cnt--;
1566 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1567 ocs_fabric_initiate_shutdown(node);
1571 case OCS_EVT_PLOGI_RCVD: {
1572 fc_header_t *hdr = cbdata->header->dma.virt;
1573 /* if we're in external loopback mode, just send LS_ACC */
1574 if (node->ocs->external_loopback) {
1575 ocs_send_plogi_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
1578 /* if this isn't external loopback, pass to default handler */
1579 __ocs_fabric_common(__func__, ctx, evt, arg);
1583 case OCS_EVT_PRLI_RCVD:
1585 /* sent PLOGI and before completion was seen, received the
1586 * PRLI from the remote node (WCQEs and RCQEs come in on
1587 * different queues and order of processing cannot be assumed)
1588 * Save OXID so PRLI can be sent after the attach and continue
1589 * to wait for PLOGI response
1591 ocs_process_prli_payload(node, cbdata->payload->dma.virt);
1592 ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PRLI);
1593 ocs_node_transition(node, __ocs_p2p_wait_plogi_rsp_recvd_prli, NULL);
1596 __ocs_fabric_common(__func__, ctx, evt, arg);
1605 * @brief Point-to-point node state machine: Waiting on a response for a
1609 * State is entered when the point-to-point winner has sent
1610 * a PLOGI and is waiting for a response. Before receiving the
1611 * response, a PRLI was received, implying that the PLOGI was
1614 * @param ctx Remote node state machine context.
1615 * @param evt Event to process.
1616 * @param arg Per event optional argument.
1618 * @return Returns NULL.
1622 __ocs_p2p_wait_plogi_rsp_recvd_prli(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1625 ocs_node_cb_t *cbdata = arg;
1626 std_node_state_decl();
1633 * Since we've received a PRLI, we have a port login and will
1634 * just need to wait for the PLOGI response to do the node
1635 * attach and then we can send the LS_ACC for the PRLI. If,
1636 * during this time, we receive FCP_CMNDs (which is possible
1637 * since we've already sent a PRLI and our peer may have accepted).
1638 * At this time, we are not waiting on any other unsolicited
1639 * frames to continue with the login process. Thus, it will not
1640 * hurt to hold frames here.
1642 ocs_node_hold_frames(node);
1646 ocs_node_accept_frames(node);
1649 case OCS_EVT_SRRS_ELS_REQ_OK: /* PLOGI response received */
1650 /* Completion from PLOGI sent */
1651 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_fabric_common, __func__)) {
1654 ocs_assert(node->els_req_cnt, NULL);
1655 node->els_req_cnt--;
1656 /* sm: / save sparams, ocs_node_attach */
1657 ocs_node_save_sparms(node, cbdata->els->els_rsp.virt);
1658 ocs_display_sparams(node->display_name, "plogi rcvd resp", 0, NULL,
1659 ((uint8_t*)cbdata->els->els_rsp.virt) + 4);
1660 rc = ocs_node_attach(node);
1661 ocs_node_transition(node, __ocs_p2p_wait_node_attach, NULL);
1662 if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
1663 ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
1667 case OCS_EVT_SRRS_ELS_REQ_FAIL: /* PLOGI response received */
1668 case OCS_EVT_SRRS_ELS_REQ_RJT:
1669 /* PLOGI failed, shutdown the node */
1670 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_fabric_common, __func__)) {
1673 ocs_assert(node->els_req_cnt, NULL);
1674 node->els_req_cnt--;
1675 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1676 ocs_fabric_initiate_shutdown(node);
1680 __ocs_fabric_common(__func__, ctx, evt, arg);
1689 * @brief Point-to-point node state machine: Wait for a point-to-point node attach
1693 * Waits for the point-to-point node attach to complete.
1695 * @param ctx Remote node state machine context.
1696 * @param evt Event to process.
1697 * @param arg Per event optional argument.
1699 * @return Returns NULL.
1703 __ocs_p2p_wait_node_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1705 ocs_node_cb_t *cbdata = arg;
1706 std_node_state_decl();
1712 ocs_node_hold_frames(node);
1716 ocs_node_accept_frames(node);
1719 case OCS_EVT_NODE_ATTACH_OK:
1720 node->attached = TRUE;
1721 switch (node->send_ls_acc) {
1722 case OCS_NODE_SEND_LS_ACC_PRLI: {
1723 ocs_d_send_prli_rsp(node->ls_acc_io, node->ls_acc_oxid);
1724 node->send_ls_acc = OCS_NODE_SEND_LS_ACC_NONE;
1725 node->ls_acc_io = NULL;
1728 case OCS_NODE_SEND_LS_ACC_PLOGI: /* Can't happen in P2P */
1729 case OCS_NODE_SEND_LS_ACC_NONE:
1731 /* Normal case for I */
1732 /* sm: send_plogi_acc is not set / send PLOGI acc */
1733 ocs_node_transition(node, __ocs_d_port_logged_in, NULL);
1738 case OCS_EVT_NODE_ATTACH_FAIL:
1739 /* node attach failed, shutdown the node */
1740 node->attached = FALSE;
1741 node_printf(node, "Node attach failed\n");
1742 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1743 ocs_fabric_initiate_shutdown(node);
1746 case OCS_EVT_SHUTDOWN:
1747 node_printf(node, "%s received\n", ocs_sm_event_name(evt));
1748 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1749 ocs_node_transition(node, __ocs_fabric_wait_attach_evt_shutdown, NULL);
1751 case OCS_EVT_PRLI_RCVD:
1752 node_printf(node, "%s: PRLI received before node is attached\n", ocs_sm_event_name(evt));
1753 ocs_process_prli_payload(node, cbdata->payload->dma.virt);
1754 ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PRLI);
1757 __ocs_fabric_common(__func__, ctx, evt, arg);
1765 * @brief Start up the name services node.
1768 * Allocates and starts up the name services node.
1770 * @param sport Pointer to the sport structure.
1772 * @return Returns 0 on success, or a negative error value on failure.
1776 ocs_start_ns_node(ocs_sport_t *sport)
1780 /* Instantiate a name services node */
1781 ns = ocs_node_find(sport, FC_ADDR_NAMESERVER);
1783 ns = ocs_node_alloc(sport, FC_ADDR_NAMESERVER, FALSE, FALSE);
1788 /* TODO: for found ns, should we be transitioning from here?
1789 * breaks transition only 1. from within state machine or
1792 if (ns->ocs->nodedb_mask & OCS_NODEDB_PAUSE_NAMESERVER) {
1793 ocs_node_pause(ns, __ocs_ns_init);
1795 ocs_node_transition(ns, __ocs_ns_init, NULL);
1801 * @brief Start up the fabric controller node.
1804 * Allocates and starts up the fabric controller node.
1806 * @param sport Pointer to the sport structure.
1808 * @return Returns 0 on success, or a negative error value on failure.
1812 ocs_start_fabctl_node(ocs_sport_t *sport)
1816 fabctl = ocs_node_find(sport, FC_ADDR_CONTROLLER);
1817 if (fabctl == NULL) {
1818 fabctl = ocs_node_alloc(sport, FC_ADDR_CONTROLLER, FALSE, FALSE);
1819 if (fabctl == NULL) {
1823 /* TODO: for found ns, should we be transitioning from here?
1824 * breaks transition only 1. from within state machine or
1827 ocs_node_transition(fabctl, __ocs_fabctl_init, NULL);
1832 * @brief Process the GIDPT payload.
1835 * The GIDPT payload is parsed, and new nodes are created, as needed.
1837 * @param node Pointer to the node structure.
1838 * @param gidpt Pointer to the GIDPT payload.
1839 * @param gidpt_len Payload length
1841 * @return Returns 0 on success, or a negative error value on failure.
1845 ocs_process_gidpt_payload(ocs_node_t *node, fcct_gidpt_acc_t *gidpt, uint32_t gidpt_len)
1849 ocs_node_t *newnode;
1850 ocs_sport_t *sport = node->sport;
1851 ocs_t *ocs = node->ocs;
1853 uint32_t port_count;
1855 ocs_node_t **active_nodes;
1856 uint32_t portlist_count;
1859 residual = ocs_be16toh(gidpt->hdr.max_residual_size);
1861 if (residual != 0) {
1862 ocs_log_debug(node->ocs, "residual is %u words\n", residual);
1865 if (ocs_be16toh(gidpt->hdr.cmd_rsp_code) == FCCT_HDR_CMDRSP_REJECT) {
1866 node_printf(node, "GIDPT request failed: rsn x%x rsn_expl x%x\n",
1867 gidpt->hdr.reason_code, gidpt->hdr.reason_code_explanation);
1871 portlist_count = (gidpt_len - sizeof(fcct_iu_header_t)) / sizeof(gidpt->port_list);
1873 /* Count the number of nodes */
1875 ocs_sport_lock(sport);
1876 ocs_list_foreach(&sport->node_list, n) {
1880 /* Allocate a buffer for all nodes */
1881 active_nodes = ocs_malloc(node->ocs, port_count * sizeof(*active_nodes), OCS_M_NOWAIT | OCS_M_ZERO);
1882 if (active_nodes == NULL) {
1883 node_printf(node, "ocs_malloc failed\n");
1884 ocs_sport_unlock(sport);
1888 /* Fill buffer with fc_id of active nodes */
1890 ocs_list_foreach(&sport->node_list, n) {
1891 port_id = n->rnode.fc_id;
1893 case FC_ADDR_FABRIC:
1894 case FC_ADDR_CONTROLLER:
1895 case FC_ADDR_NAMESERVER:
1898 if (!FC_ADDR_IS_DOMAIN_CTRL(port_id)) {
1899 active_nodes[i++] = n;
1905 /* update the active nodes buffer */
1906 for (i = 0; i < portlist_count; i ++) {
1907 port_id = fc_be24toh(gidpt->port_list[i].port_id);
1909 for (j = 0; j < port_count; j ++) {
1910 if ((active_nodes[j] != NULL) && (port_id == active_nodes[j]->rnode.fc_id)) {
1911 active_nodes[j] = NULL;
1915 if (gidpt->port_list[i].ctl & FCCT_GID_PT_LAST_ID)
1919 /* Those remaining in the active_nodes[] are now gone ! */
1920 for (i = 0; i < port_count; i ++) {
1921 /* if we're an initiator and the remote node is a target, then
1922 * post the node missing event. if we're target and we have enabled
1923 * target RSCN, then post the node missing event.
1925 if (active_nodes[i] != NULL) {
1926 if ((node->sport->enable_ini && active_nodes[i]->targ) ||
1927 (node->sport->enable_tgt && enable_target_rscn(ocs))) {
1928 ocs_node_post_event(active_nodes[i], OCS_EVT_NODE_MISSING, NULL);
1930 node_printf(node, "GID_PT: skipping non-tgt port_id x%06x\n",
1931 active_nodes[i]->rnode.fc_id);
1935 ocs_free(ocs, active_nodes, port_count * sizeof(*active_nodes));
1937 for(i = 0; i < portlist_count; i ++) {
1938 uint32_t port_id = fc_be24toh(gidpt->port_list[i].port_id);
1940 /* node_printf(node, "GID_PT: port_id x%06x\n", port_id); */
1942 /* Don't create node for ourselves or the associated NPIV ports */
1943 if (port_id != node->rnode.sport->fc_id && !ocs_sport_find(sport->domain, port_id)) {
1944 newnode = ocs_node_find(sport, port_id);
1946 /* TODO: what if node deleted here?? */
1947 if (node->sport->enable_ini && newnode->targ) {
1948 ocs_node_post_event(newnode, OCS_EVT_NODE_REFOUND, NULL);
1950 /* original code sends ADISC, has notion of "refound" */
1952 if (node->sport->enable_ini) {
1953 newnode = ocs_node_alloc(sport, port_id, 0, 0);
1954 if (newnode == NULL) {
1955 ocs_log_err(ocs, "ocs_node_alloc() failed\n");
1956 ocs_sport_unlock(sport);
1959 /* send PLOGI automatically if initiator */
1960 ocs_node_init_device(newnode, TRUE);
1965 if (gidpt->port_list[i].ctl & FCCT_GID_PT_LAST_ID) {
1969 ocs_sport_unlock(sport);
1974 * @brief Set up the domain point-to-point parameters.
1977 * The remote node service parameters are examined, and various point-to-point
1978 * variables are set.
1980 * @param sport Pointer to the sport object.
1982 * @return Returns 0 on success, or a negative error value on failure.
1986 ocs_p2p_setup(ocs_sport_t *sport)
1988 ocs_t *ocs = sport->ocs;
1989 int32_t rnode_winner;
1990 rnode_winner = ocs_rnode_is_winner(sport);
1992 /* set sport flags to indicate p2p "winner" */
1993 if (rnode_winner == 1) {
1994 sport->p2p_remote_port_id = 0;
1995 sport->p2p_port_id = 0;
1996 sport->p2p_winner = FALSE;
1997 } else if (rnode_winner == 0) {
1998 sport->p2p_remote_port_id = 2;
1999 sport->p2p_port_id = 1;
2000 sport->p2p_winner = TRUE;
2002 /* no winner; only okay if external loopback enabled */
2003 if (sport->ocs->external_loopback) {
2005 * External loopback mode enabled; local sport and remote node
2006 * will be registered with an NPortID = 1;
2008 ocs_log_debug(ocs, "External loopback mode enabled\n");
2009 sport->p2p_remote_port_id = 1;
2010 sport->p2p_port_id = 1;
2011 sport->p2p_winner = TRUE;
2013 ocs_log_warn(ocs, "failed to determine p2p winner\n");
2014 return rnode_winner;
2021 * @brief Process the FABCTL node RSCN.
2023 * <h3 class="desc">Description</h3>
2024 * Processes the FABCTL node RSCN payload, simply passes the event to the name server.
2026 * @param node Pointer to the node structure.
2027 * @param cbdata Callback data to pass forward.
2033 ocs_process_rscn(ocs_node_t *node, ocs_node_cb_t *cbdata)
2035 ocs_t *ocs = node->ocs;
2036 ocs_sport_t *sport = node->sport;
2039 /* Forward this event to the name-services node */
2040 ns = ocs_node_find(sport, FC_ADDR_NAMESERVER);
2042 ocs_node_post_event(ns, OCS_EVT_RSCN_RCVD, cbdata);
2044 ocs_log_warn(ocs, "can't find name server node\n");