2 * Copyright (c) 2017 Broadcom. All rights reserved.
3 * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
15 * 3. Neither the name of the copyright holder nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
36 * Defines and implements the Hardware Abstraction Layer (HW).
37 * All interaction with the hardware is performed through the HW, which abstracts
38 * the details of the underlying SLI-4 implementation.
42 * @defgroup devInitShutdown Device Initialization and Shutdown
43 * @defgroup domain Domain Functions
44 * @defgroup port Port Functions
45 * @defgroup node Remote Node Functions
46 * @defgroup io IO Functions
47 * @defgroup interrupt Interrupt handling
48 * @defgroup os OS Required Functions
54 #include "ocs_hw_queues.h"
56 #define OCS_HW_MQ_DEPTH 128
57 #define OCS_HW_READ_FCF_SIZE 4096
58 #define OCS_HW_DEFAULT_AUTO_XFER_RDY_IOS 256
59 #define OCS_HW_WQ_TIMER_PERIOD_MS 500
61 /* values used for setting the auto xfer rdy parameters */
62 #define OCS_HW_AUTO_XFER_RDY_BLK_SIZE_DEFAULT 0 /* 512 bytes */
63 #define OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA_DEFAULT TRUE
64 #define OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID_DEFAULT FALSE
65 #define OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE_DEFAULT 0
66 #define OCS_HW_REQUE_XRI_REGTAG 65534
67 /* max command and response buffer lengths -- arbitrary at the moment */
68 #define OCS_HW_DMTF_CLP_CMD_MAX 256
69 #define OCS_HW_DMTF_CLP_RSP_MAX 256
72 ocs_hw_global_t hw_global;
74 static void ocs_hw_queue_hash_add(ocs_queue_hash_t *, uint16_t, uint16_t);
75 static void ocs_hw_adjust_wqs(ocs_hw_t *hw);
76 static uint32_t ocs_hw_get_num_chutes(ocs_hw_t *hw);
77 static int32_t ocs_hw_cb_link(void *, void *);
78 static int32_t ocs_hw_cb_fip(void *, void *);
79 static int32_t ocs_hw_command_process(ocs_hw_t *, int32_t, uint8_t *, size_t);
80 static int32_t ocs_hw_mq_process(ocs_hw_t *, int32_t, sli4_queue_t *);
81 static int32_t ocs_hw_cb_read_fcf(ocs_hw_t *, int32_t, uint8_t *, void *);
82 static int32_t ocs_hw_cb_node_attach(ocs_hw_t *, int32_t, uint8_t *, void *);
83 static int32_t ocs_hw_cb_node_free(ocs_hw_t *, int32_t, uint8_t *, void *);
84 static int32_t ocs_hw_cb_node_free_all(ocs_hw_t *, int32_t, uint8_t *, void *);
85 static ocs_hw_rtn_e ocs_hw_setup_io(ocs_hw_t *);
86 static ocs_hw_rtn_e ocs_hw_init_io(ocs_hw_t *);
87 static int32_t ocs_hw_flush(ocs_hw_t *);
88 static int32_t ocs_hw_command_cancel(ocs_hw_t *);
89 static int32_t ocs_hw_io_cancel(ocs_hw_t *);
90 static void ocs_hw_io_quarantine(ocs_hw_t *hw, hw_wq_t *wq, ocs_hw_io_t *io);
91 static void ocs_hw_io_restore_sgl(ocs_hw_t *, ocs_hw_io_t *);
92 static int32_t ocs_hw_io_ini_sge(ocs_hw_t *, ocs_hw_io_t *, ocs_dma_t *, uint32_t, ocs_dma_t *);
93 static ocs_hw_rtn_e ocs_hw_firmware_write_lancer(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, int last, ocs_hw_fw_cb_t cb, void *arg);
94 static int32_t ocs_hw_cb_fw_write(ocs_hw_t *, int32_t, uint8_t *, void *);
95 static int32_t ocs_hw_cb_sfp(ocs_hw_t *, int32_t, uint8_t *, void *);
96 static int32_t ocs_hw_cb_temp(ocs_hw_t *, int32_t, uint8_t *, void *);
97 static int32_t ocs_hw_cb_link_stat(ocs_hw_t *, int32_t, uint8_t *, void *);
98 static int32_t ocs_hw_cb_host_stat(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg);
99 static void ocs_hw_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg);
100 static int32_t ocs_hw_clp_resp_get_value(ocs_hw_t *hw, const char *keyword, char *value, uint32_t value_len, const char *resp, uint32_t resp_len);
101 typedef void (*ocs_hw_dmtf_clp_cb_t)(ocs_hw_t *hw, int32_t status, uint32_t result_len, void *arg);
102 static ocs_hw_rtn_e ocs_hw_exec_dmtf_clp_cmd(ocs_hw_t *hw, ocs_dma_t *dma_cmd, ocs_dma_t *dma_resp, uint32_t opts, ocs_hw_dmtf_clp_cb_t cb, void *arg);
103 static void ocs_hw_linkcfg_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint32_t result_len, void *arg);
105 static int32_t __ocs_read_topology_cb(ocs_hw_t *, int32_t, uint8_t *, void *);
106 static ocs_hw_rtn_e ocs_hw_get_linkcfg(ocs_hw_t *, uint32_t, ocs_hw_port_control_cb_t, void *);
107 static ocs_hw_rtn_e ocs_hw_get_linkcfg_lancer(ocs_hw_t *, uint32_t, ocs_hw_port_control_cb_t, void *);
108 static ocs_hw_rtn_e ocs_hw_get_linkcfg_skyhawk(ocs_hw_t *, uint32_t, ocs_hw_port_control_cb_t, void *);
109 static ocs_hw_rtn_e ocs_hw_set_linkcfg(ocs_hw_t *, ocs_hw_linkcfg_e, uint32_t, ocs_hw_port_control_cb_t, void *);
110 static ocs_hw_rtn_e ocs_hw_set_linkcfg_lancer(ocs_hw_t *, ocs_hw_linkcfg_e, uint32_t, ocs_hw_port_control_cb_t, void *);
111 static ocs_hw_rtn_e ocs_hw_set_linkcfg_skyhawk(ocs_hw_t *, ocs_hw_linkcfg_e, uint32_t, ocs_hw_port_control_cb_t, void *);
112 static void ocs_hw_init_linkcfg_cb(int32_t status, uintptr_t value, void *arg);
113 static ocs_hw_rtn_e ocs_hw_set_eth_license(ocs_hw_t *hw, uint32_t license);
114 static ocs_hw_rtn_e ocs_hw_set_dif_seed(ocs_hw_t *hw);
115 static ocs_hw_rtn_e ocs_hw_set_dif_mode(ocs_hw_t *hw);
116 static void ocs_hw_io_free_internal(void *arg);
117 static void ocs_hw_io_free_port_owned(void *arg);
118 static ocs_hw_rtn_e ocs_hw_config_auto_xfer_rdy_t10pi(ocs_hw_t *hw, uint8_t *buf);
119 static ocs_hw_rtn_e ocs_hw_config_set_fdt_xfer_hint(ocs_hw_t *hw, uint32_t fdt_xfer_hint);
120 static void ocs_hw_wq_process_abort(void *arg, uint8_t *cqe, int32_t status);
121 static int32_t ocs_hw_config_mrq(ocs_hw_t *hw, uint8_t, uint16_t, uint16_t);
122 static ocs_hw_rtn_e ocs_hw_config_watchdog_timer(ocs_hw_t *hw);
123 static ocs_hw_rtn_e ocs_hw_config_sli_port_health_check(ocs_hw_t *hw, uint8_t query, uint8_t enable);
125 /* HW domain database operations */
126 static int32_t ocs_hw_domain_add(ocs_hw_t *, ocs_domain_t *);
127 static int32_t ocs_hw_domain_del(ocs_hw_t *, ocs_domain_t *);
129 /* Port state machine */
130 static void *__ocs_hw_port_alloc_init(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
131 static void *__ocs_hw_port_alloc_read_sparm64(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
132 static void *__ocs_hw_port_alloc_init_vpi(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
133 static void *__ocs_hw_port_done(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
134 static void *__ocs_hw_port_free_unreg_vpi(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
136 /* Domain state machine */
137 static void *__ocs_hw_domain_init(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
138 static void *__ocs_hw_domain_alloc_reg_fcfi(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
139 static void * __ocs_hw_domain_alloc_init_vfi(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
140 static void *__ocs_hw_domain_free_unreg_vfi(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
141 static void *__ocs_hw_domain_free_unreg_fcfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data);
142 static int32_t __ocs_hw_domain_cb(ocs_hw_t *, int32_t, uint8_t *, void *);
143 static int32_t __ocs_hw_port_cb(ocs_hw_t *, int32_t, uint8_t *, void *);
144 static int32_t __ocs_hw_port_realloc_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg);
147 static void ocs_hw_check_sec_hio_list(ocs_hw_t *hw);
150 static void target_wqe_timer_cb(void *arg);
151 static void shutdown_target_wqe_timer(ocs_hw_t *hw);
154 ocs_hw_add_io_timed_wqe(ocs_hw_t *hw, ocs_hw_io_t *io)
156 if (hw->config.emulate_tgt_wqe_timeout && io->tgt_wqe_timeout) {
158 * Active WQE list currently only used for
159 * target WQE timeouts.
161 ocs_lock(&hw->io_lock);
162 ocs_list_add_tail(&hw->io_timed_wqe, io);
163 io->submit_ticks = ocs_get_os_ticks();
164 ocs_unlock(&hw->io_lock);
169 ocs_hw_remove_io_timed_wqe(ocs_hw_t *hw, ocs_hw_io_t *io)
171 if (hw->config.emulate_tgt_wqe_timeout) {
173 * If target wqe timeouts are enabled,
174 * remove from active wqe list.
176 ocs_lock(&hw->io_lock);
177 if (ocs_list_on_list(&io->wqe_link)) {
178 ocs_list_remove(&hw->io_timed_wqe, io);
180 ocs_unlock(&hw->io_lock);
184 static uint8_t ocs_hw_iotype_is_originator(uint16_t io_type)
187 case OCS_HW_IO_INITIATOR_READ:
188 case OCS_HW_IO_INITIATOR_WRITE:
189 case OCS_HW_IO_INITIATOR_NODATA:
198 static uint8_t ocs_hw_wcqe_abort_needed(uint16_t status, uint8_t ext, uint8_t xb)
200 /* if exchange not active, nothing to abort */
204 if (status == SLI4_FC_WCQE_STATUS_LOCAL_REJECT) {
206 /* exceptions where abort is not needed */
207 case SLI4_FC_LOCAL_REJECT_INVALID_RPI: /* lancer returns this after unreg_rpi */
208 case SLI4_FC_LOCAL_REJECT_ABORT_REQUESTED: /* abort already in progress */
218 * @brief Determine the number of chutes on the device.
221 * Some devices require queue resources allocated per protocol processor
222 * (chute). This function returns the number of chutes on this device.
224 * @param hw Hardware context allocated by the caller.
226 * @return Returns the number of chutes on the device for protocol.
229 ocs_hw_get_num_chutes(ocs_hw_t *hw)
231 uint32_t num_chutes = 1;
233 if (sli_get_is_dual_ulp_capable(&hw->sli) &&
234 sli_get_is_ulp_enabled(&hw->sli, 0) &&
235 sli_get_is_ulp_enabled(&hw->sli, 1)) {
242 ocs_hw_link_event_init(ocs_hw_t *hw)
246 hw->link.status = SLI_LINK_STATUS_MAX;
247 hw->link.topology = SLI_LINK_TOPO_NONE;
248 hw->link.medium = SLI_LINK_MEDIUM_MAX;
250 hw->link.loop_map = NULL;
251 hw->link.fc_id = UINT32_MAX;
253 return OCS_HW_RTN_SUCCESS;
257 * @ingroup devInitShutdown
258 * @brief If this is physical port 0, then read the max dump size.
261 * Queries the FW for the maximum dump size
263 * @param hw Hardware context allocated by the caller.
265 * @return Returns 0 on success, or a non-zero value on failure.
268 ocs_hw_read_max_dump_size(ocs_hw_t *hw)
270 uint8_t buf[SLI4_BMBX_SIZE];
271 uint8_t bus, dev, func;
275 if ((SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) &&
276 (SLI4_IF_TYPE_LANCER_G7 != sli_get_if_type(&hw->sli))) {
277 ocs_log_debug(hw->os, "Function only supported for I/F type 2\n");
278 return OCS_HW_RTN_ERROR;
282 * Make sure the FW is new enough to support this command. If the FW
283 * is too old, the FW will UE.
285 if (hw->workaround.disable_dump_loc) {
286 ocs_log_test(hw->os, "FW version is too old for this feature\n");
287 return OCS_HW_RTN_ERROR;
290 /* attempt to detemine the dump size for function 0 only. */
291 ocs_get_bus_dev_func(hw->os, &bus, &dev, &func);
293 if (sli_cmd_common_set_dump_location(&hw->sli, buf,
294 SLI4_BMBX_SIZE, 1, 0, NULL, 0)) {
295 sli4_res_common_set_dump_location_t *rsp =
296 (sli4_res_common_set_dump_location_t *)
297 (buf + offsetof(sli4_cmd_sli_config_t,
300 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
301 if (rc != OCS_HW_RTN_SUCCESS) {
302 ocs_log_test(hw->os, "set dump location command failed\n");
305 hw->dump_size = rsp->buffer_length;
306 ocs_log_debug(hw->os, "Dump size %x\n", rsp->buffer_length);
310 return OCS_HW_RTN_SUCCESS;
314 * @ingroup devInitShutdown
315 * @brief Set up the Hardware Abstraction Layer module.
318 * Calls set up to configure the hardware.
320 * @param hw Hardware context allocated by the caller.
321 * @param os Device abstraction.
322 * @param port_type Protocol type of port, such as FC and NIC.
324 * @todo Why is port_type a parameter?
326 * @return Returns 0 on success, or a non-zero value on failure.
329 ocs_hw_setup(ocs_hw_t *hw, ocs_os_handle_t os, sli4_port_type_e port_type)
335 ocs_log_err(os, "bad parameter(s) hw=%p\n", hw);
336 return OCS_HW_RTN_ERROR;
339 if (hw->hw_setup_called) {
340 /* Setup run-time workarounds.
341 * Call for each setup, to allow for hw_war_version
343 ocs_hw_workaround_setup(hw);
344 return OCS_HW_RTN_SUCCESS;
348 * ocs_hw_init() relies on NULL pointers indicating that a structure
349 * needs allocation. If a structure is non-NULL, ocs_hw_init() won't
350 * free/realloc that memory
352 ocs_memset(hw, 0, sizeof(ocs_hw_t));
354 hw->hw_setup_called = TRUE;
358 ocs_lock_init(hw->os, &hw->cmd_lock, "HW_cmd_lock[%d]", ocs_instance(hw->os));
359 ocs_list_init(&hw->cmd_head, ocs_command_ctx_t, link);
360 ocs_list_init(&hw->cmd_pending, ocs_command_ctx_t, link);
361 hw->cmd_head_count = 0;
363 ocs_lock_init(hw->os, &hw->io_lock, "HW_io_lock[%d]", ocs_instance(hw->os));
364 ocs_lock_init(hw->os, &hw->io_abort_lock, "HW_io_abort_lock[%d]", ocs_instance(hw->os));
366 ocs_atomic_init(&hw->io_alloc_failed_count, 0);
368 hw->config.speed = FC_LINK_SPEED_AUTO_16_8_4;
369 hw->config.dif_seed = 0;
370 hw->config.auto_xfer_rdy_blk_size_chip = OCS_HW_AUTO_XFER_RDY_BLK_SIZE_DEFAULT;
371 hw->config.auto_xfer_rdy_ref_tag_is_lba = OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA_DEFAULT;
372 hw->config.auto_xfer_rdy_app_tag_valid = OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID_DEFAULT;
373 hw->config.auto_xfer_rdy_app_tag_value = OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE_DEFAULT;
375 if (sli_setup(&hw->sli, hw->os, port_type)) {
376 ocs_log_err(hw->os, "SLI setup failed\n");
377 return OCS_HW_RTN_ERROR;
380 ocs_memset(hw->domains, 0, sizeof(hw->domains));
382 ocs_memset(hw->fcf_index_fcfi, 0, sizeof(hw->fcf_index_fcfi));
384 ocs_hw_link_event_init(hw);
386 sli_callback(&hw->sli, SLI4_CB_LINK, ocs_hw_cb_link, hw);
387 sli_callback(&hw->sli, SLI4_CB_FIP, ocs_hw_cb_fip, hw);
390 * Set all the queue sizes to the maximum allowed. These values may
391 * be changes later by the adjust and workaround functions.
393 for (i = 0; i < ARRAY_SIZE(hw->num_qentries); i++) {
394 hw->num_qentries[i] = sli_get_max_qentries(&hw->sli, i);
398 * The RQ assignment for RQ pair mode.
400 hw->config.rq_default_buffer_size = OCS_HW_RQ_SIZE_PAYLOAD;
401 hw->config.n_io = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI);
402 if (ocs_get_property("auto_xfer_rdy_xri_cnt", prop_buf, sizeof(prop_buf)) == 0) {
403 hw->config.auto_xfer_rdy_xri_cnt = ocs_strtoul(prop_buf, 0, 0);
406 /* by default, enable initiator-only auto-ABTS emulation */
407 hw->config.i_only_aab = TRUE;
409 /* Setup run-time workarounds */
410 ocs_hw_workaround_setup(hw);
412 /* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */
413 if (hw->workaround.override_fcfi) {
414 hw->first_domain_idx = -1;
417 /* Must be done after the workaround setup */
418 if ((SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) ||
419 (SLI4_IF_TYPE_LANCER_G7 == sli_get_if_type(&hw->sli))) {
421 (void)ocs_hw_read_max_dump_size(hw);
424 /* calculate the number of WQs required. */
425 ocs_hw_adjust_wqs(hw);
427 /* Set the default dif mode */
428 if (! sli_is_dif_inline_capable(&hw->sli)) {
429 ocs_log_test(hw->os, "not inline capable, setting mode to separate\n");
430 hw->config.dif_mode = OCS_HW_DIF_MODE_SEPARATE;
432 /* Workaround: BZ 161832 */
433 if (hw->workaround.use_dif_sec_xri) {
434 ocs_list_init(&hw->sec_hio_wait_list, ocs_hw_io_t, link);
438 * Figure out the starting and max ULP to spread the WQs across the
441 if (sli_get_is_dual_ulp_capable(&hw->sli)) {
442 if (sli_get_is_ulp_enabled(&hw->sli, 0) &&
443 sli_get_is_ulp_enabled(&hw->sli, 1)) {
446 } else if (sli_get_is_ulp_enabled(&hw->sli, 0)) {
454 if (sli_get_is_ulp_enabled(&hw->sli, 0)) {
462 ocs_log_debug(hw->os, "ulp_start %d, ulp_max %d\n",
463 hw->ulp_start, hw->ulp_max);
464 hw->config.queue_topology = hw_global.queue_topology_string;
466 hw->qtop = ocs_hw_qtop_parse(hw, hw->config.queue_topology);
468 hw->config.n_eq = hw->qtop->entry_counts[QTOP_EQ];
469 hw->config.n_cq = hw->qtop->entry_counts[QTOP_CQ];
470 hw->config.n_rq = hw->qtop->entry_counts[QTOP_RQ];
471 hw->config.n_wq = hw->qtop->entry_counts[QTOP_WQ];
472 hw->config.n_mq = hw->qtop->entry_counts[QTOP_MQ];
474 /* Verify qtop configuration against driver supported configuration */
475 if (hw->config.n_rq > OCE_HW_MAX_NUM_MRQ_PAIRS) {
476 ocs_log_crit(hw->os, "Max supported MRQ pairs = %d\n",
477 OCE_HW_MAX_NUM_MRQ_PAIRS);
478 return OCS_HW_RTN_ERROR;
481 if (hw->config.n_eq > OCS_HW_MAX_NUM_EQ) {
482 ocs_log_crit(hw->os, "Max supported EQs = %d\n",
484 return OCS_HW_RTN_ERROR;
487 if (hw->config.n_cq > OCS_HW_MAX_NUM_CQ) {
488 ocs_log_crit(hw->os, "Max supported CQs = %d\n",
490 return OCS_HW_RTN_ERROR;
493 if (hw->config.n_wq > OCS_HW_MAX_NUM_WQ) {
494 ocs_log_crit(hw->os, "Max supported WQs = %d\n",
496 return OCS_HW_RTN_ERROR;
499 if (hw->config.n_mq > OCS_HW_MAX_NUM_MQ) {
500 ocs_log_crit(hw->os, "Max supported MQs = %d\n",
502 return OCS_HW_RTN_ERROR;
505 return OCS_HW_RTN_SUCCESS;
509 * @ingroup devInitShutdown
510 * @brief Allocate memory structures to prepare for the device operation.
513 * Allocates memory structures needed by the device and prepares the device
515 * @n @n @b Note: This function may be called more than once (for example, at
516 * initialization and then after a reset), but the size of the internal resources
517 * may not be changed without tearing down the HW (ocs_hw_teardown()).
519 * @param hw Hardware context allocated by the caller.
521 * @return Returns 0 on success, or a non-zero value on failure.
524 ocs_hw_init(ocs_hw_t *hw)
528 uint8_t buf[SLI4_BMBX_SIZE];
531 int written_size = 0;
534 uint32_t ramdisc_blocksize = 512;
535 uint32_t q_count = 0;
537 * Make sure the command lists are empty. If this is start-of-day,
538 * they'll be empty since they were just initialized in ocs_hw_setup.
539 * If we've just gone through a reset, the command and command pending
540 * lists should have been cleaned up as part of the reset (ocs_hw_reset()).
542 ocs_lock(&hw->cmd_lock);
543 if (!ocs_list_empty(&hw->cmd_head)) {
544 ocs_log_test(hw->os, "command found on cmd list\n");
545 ocs_unlock(&hw->cmd_lock);
546 return OCS_HW_RTN_ERROR;
548 if (!ocs_list_empty(&hw->cmd_pending)) {
549 ocs_log_test(hw->os, "command found on pending list\n");
550 ocs_unlock(&hw->cmd_lock);
551 return OCS_HW_RTN_ERROR;
553 ocs_unlock(&hw->cmd_lock);
555 /* Free RQ buffers if prevously allocated */
559 * The IO queues must be initialized here for the reset case. The
560 * ocs_hw_init_io() function will re-add the IOs to the free list.
561 * The cmd_head list should be OK since we free all entries in
562 * ocs_hw_command_cancel() that is called in the ocs_hw_reset().
565 /* If we are in this function due to a reset, there may be stale items
566 * on lists that need to be removed. Clean them up.
569 if (ocs_list_valid(&hw->io_wait_free)) {
570 while ((!ocs_list_empty(&hw->io_wait_free))) {
572 ocs_list_remove_head(&hw->io_wait_free);
575 ocs_log_debug(hw->os, "removed %d items from io_wait_free list\n", rem_count);
579 if (ocs_list_valid(&hw->io_inuse)) {
580 while ((!ocs_list_empty(&hw->io_inuse))) {
582 ocs_list_remove_head(&hw->io_inuse);
585 ocs_log_debug(hw->os, "removed %d items from io_inuse list\n", rem_count);
589 if (ocs_list_valid(&hw->io_free)) {
590 while ((!ocs_list_empty(&hw->io_free))) {
592 ocs_list_remove_head(&hw->io_free);
595 ocs_log_debug(hw->os, "removed %d items from io_free list\n", rem_count);
598 if (ocs_list_valid(&hw->io_port_owned)) {
599 while ((!ocs_list_empty(&hw->io_port_owned))) {
600 ocs_list_remove_head(&hw->io_port_owned);
603 ocs_list_init(&hw->io_inuse, ocs_hw_io_t, link);
604 ocs_list_init(&hw->io_free, ocs_hw_io_t, link);
605 ocs_list_init(&hw->io_port_owned, ocs_hw_io_t, link);
606 ocs_list_init(&hw->io_wait_free, ocs_hw_io_t, link);
607 ocs_list_init(&hw->io_timed_wqe, ocs_hw_io_t, wqe_link);
608 ocs_list_init(&hw->io_port_dnrx, ocs_hw_io_t, dnrx_link);
610 /* If MRQ not required, Make sure we dont request feature. */
611 if (hw->config.n_rq == 1) {
612 hw->sli.config.features.flag.mrqp = FALSE;
615 if (sli_init(&hw->sli)) {
616 ocs_log_err(hw->os, "SLI failed to initialize\n");
617 return OCS_HW_RTN_ERROR;
621 * Enable the auto xfer rdy feature if requested.
623 hw->auto_xfer_rdy_enabled = FALSE;
624 if (sli_get_auto_xfer_rdy_capable(&hw->sli) &&
625 hw->config.auto_xfer_rdy_size > 0) {
626 if (hw->config.esoc){
627 if (ocs_get_property("ramdisc_blocksize", prop_buf, sizeof(prop_buf)) == 0) {
628 ramdisc_blocksize = ocs_strtoul(prop_buf, 0, 0);
630 written_size = sli_cmd_config_auto_xfer_rdy_hp(&hw->sli, buf, SLI4_BMBX_SIZE, hw->config.auto_xfer_rdy_size, 1, ramdisc_blocksize);
632 written_size = sli_cmd_config_auto_xfer_rdy(&hw->sli, buf, SLI4_BMBX_SIZE, hw->config.auto_xfer_rdy_size);
635 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
636 if (rc != OCS_HW_RTN_SUCCESS) {
637 ocs_log_err(hw->os, "config auto xfer rdy failed\n");
641 hw->auto_xfer_rdy_enabled = TRUE;
643 if (hw->config.auto_xfer_rdy_t10_enable) {
644 rc = ocs_hw_config_auto_xfer_rdy_t10pi(hw, buf);
645 if (rc != OCS_HW_RTN_SUCCESS) {
646 ocs_log_err(hw->os, "set parameters auto xfer rdy T10 PI failed\n");
652 if(hw->sliport_healthcheck) {
653 rc = ocs_hw_config_sli_port_health_check(hw, 0, 1);
654 if (rc != OCS_HW_RTN_SUCCESS) {
655 ocs_log_err(hw->os, "Enabling Sliport Health check failed \n");
661 * Set FDT transfer hint, only works on Lancer
663 if ((hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) && (OCS_HW_FDT_XFER_HINT != 0)) {
665 * Non-fatal error. In particular, we can disregard failure to set OCS_HW_FDT_XFER_HINT on
666 * devices with legacy firmware that do not support OCS_HW_FDT_XFER_HINT feature.
668 ocs_hw_config_set_fdt_xfer_hint(hw, OCS_HW_FDT_XFER_HINT);
672 * Verify that we have not exceeded any queue sizes
674 q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_EQ),
676 if (hw->config.n_eq > q_count) {
677 ocs_log_err(hw->os, "requested %d EQ but %d allowed\n",
678 hw->config.n_eq, q_count);
679 return OCS_HW_RTN_ERROR;
682 q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_CQ),
684 if (hw->config.n_cq > q_count) {
685 ocs_log_err(hw->os, "requested %d CQ but %d allowed\n",
686 hw->config.n_cq, q_count);
687 return OCS_HW_RTN_ERROR;
690 q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_MQ),
692 if (hw->config.n_mq > q_count) {
693 ocs_log_err(hw->os, "requested %d MQ but %d allowed\n",
694 hw->config.n_mq, q_count);
695 return OCS_HW_RTN_ERROR;
698 q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_RQ),
700 if (hw->config.n_rq > q_count) {
701 ocs_log_err(hw->os, "requested %d RQ but %d allowed\n",
702 hw->config.n_rq, q_count);
703 return OCS_HW_RTN_ERROR;
706 q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_WQ),
708 if (hw->config.n_wq > q_count) {
709 ocs_log_err(hw->os, "requested %d WQ but %d allowed\n",
710 hw->config.n_wq, q_count);
711 return OCS_HW_RTN_ERROR;
714 /* zero the hashes */
715 ocs_memset(hw->cq_hash, 0, sizeof(hw->cq_hash));
716 ocs_log_debug(hw->os, "Max CQs %d, hash size = %d\n",
717 OCS_HW_MAX_NUM_CQ, OCS_HW_Q_HASH_SIZE);
719 ocs_memset(hw->rq_hash, 0, sizeof(hw->rq_hash));
720 ocs_log_debug(hw->os, "Max RQs %d, hash size = %d\n",
721 OCS_HW_MAX_NUM_RQ, OCS_HW_Q_HASH_SIZE);
723 ocs_memset(hw->wq_hash, 0, sizeof(hw->wq_hash));
724 ocs_log_debug(hw->os, "Max WQs %d, hash size = %d\n",
725 OCS_HW_MAX_NUM_WQ, OCS_HW_Q_HASH_SIZE);
727 rc = ocs_hw_init_queues(hw, hw->qtop);
728 if (rc != OCS_HW_RTN_SUCCESS) {
732 max_rpi = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI);
733 i = sli_fc_get_rpi_requirements(&hw->sli, max_rpi);
735 ocs_dma_t payload_memory;
737 rc = OCS_HW_RTN_ERROR;
739 if (hw->rnode_mem.size) {
740 ocs_dma_free(hw->os, &hw->rnode_mem);
743 if (ocs_dma_alloc(hw->os, &hw->rnode_mem, i, 4096)) {
744 ocs_log_err(hw->os, "remote node memory allocation fail\n");
745 return OCS_HW_RTN_NO_MEMORY;
748 payload_memory.size = 0;
749 if (sli_cmd_fcoe_post_hdr_templates(&hw->sli, buf, SLI4_BMBX_SIZE,
750 &hw->rnode_mem, UINT16_MAX, &payload_memory)) {
751 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
753 if (payload_memory.size != 0) {
754 /* The command was non-embedded - need to free the dma buffer */
755 ocs_dma_free(hw->os, &payload_memory);
759 if (rc != OCS_HW_RTN_SUCCESS) {
760 ocs_log_err(hw->os, "header template registration failed\n");
765 /* Allocate and post RQ buffers */
766 rc = ocs_hw_rx_allocate(hw);
768 ocs_log_err(hw->os, "rx_allocate failed\n");
772 /* Populate hw->seq_free_list */
773 if (hw->seq_pool == NULL) {
777 /* Sum up the total number of RQ entries, to use to allocate the sequence object pool */
778 for (i = 0; i < hw->hw_rq_count; i++) {
779 count += hw->hw_rq[i]->entry_count;
782 hw->seq_pool = ocs_array_alloc(hw->os, sizeof(ocs_hw_sequence_t), count);
783 if (hw->seq_pool == NULL) {
784 ocs_log_err(hw->os, "malloc seq_pool failed\n");
785 return OCS_HW_RTN_NO_MEMORY;
789 if(ocs_hw_rx_post(hw)) {
790 ocs_log_err(hw->os, "WARNING - error posting RQ buffers\n");
793 /* Allocate rpi_ref if not previously allocated */
794 if (hw->rpi_ref == NULL) {
795 hw->rpi_ref = ocs_malloc(hw->os, max_rpi * sizeof(*hw->rpi_ref),
796 OCS_M_ZERO | OCS_M_NOWAIT);
797 if (hw->rpi_ref == NULL) {
798 ocs_log_err(hw->os, "rpi_ref allocation failure (%d)\n", i);
799 return OCS_HW_RTN_NO_MEMORY;
803 for (i = 0; i < max_rpi; i ++) {
804 ocs_atomic_init(&hw->rpi_ref[i].rpi_count, 0);
805 ocs_atomic_init(&hw->rpi_ref[i].rpi_attached, 0);
808 ocs_memset(hw->domains, 0, sizeof(hw->domains));
810 /* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */
811 if (hw->workaround.override_fcfi) {
812 hw->first_domain_idx = -1;
815 ocs_memset(hw->fcf_index_fcfi, 0, sizeof(hw->fcf_index_fcfi));
817 /* Register a FCFI to allow unsolicited frames to be routed to the driver */
818 if (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC) {
819 if (hw->hw_mrq_count) {
820 ocs_log_debug(hw->os, "using REG_FCFI MRQ\n");
822 rc = ocs_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_FCFI_MODE, 0, 0);
823 if (rc != OCS_HW_RTN_SUCCESS) {
824 ocs_log_err(hw->os, "REG_FCFI_MRQ FCFI registration failed\n");
828 rc = ocs_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_MRQ_MODE, 0, 0);
829 if (rc != OCS_HW_RTN_SUCCESS) {
830 ocs_log_err(hw->os, "REG_FCFI_MRQ MRQ registration failed\n");
834 sli4_cmd_rq_cfg_t rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG];
836 ocs_log_debug(hw->os, "using REG_FCFI standard\n");
838 /* Set the filter match/mask values from hw's filter_def values */
839 for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
840 rq_cfg[i].rq_id = 0xffff;
841 rq_cfg[i].r_ctl_mask = (uint8_t) hw->config.filter_def[i];
842 rq_cfg[i].r_ctl_match = (uint8_t) (hw->config.filter_def[i] >> 8);
843 rq_cfg[i].type_mask = (uint8_t) (hw->config.filter_def[i] >> 16);
844 rq_cfg[i].type_match = (uint8_t) (hw->config.filter_def[i] >> 24);
848 * Update the rq_id's of the FCF configuration (don't update more than the number
849 * of rq_cfg elements)
851 for (i = 0; i < OCS_MIN(hw->hw_rq_count, SLI4_CMD_REG_FCFI_NUM_RQ_CFG); i++) {
852 hw_rq_t *rq = hw->hw_rq[i];
854 for (j = 0; j < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; j++) {
855 uint32_t mask = (rq->filter_mask != 0) ? rq->filter_mask : 1;
856 if (mask & (1U << j)) {
857 rq_cfg[j].rq_id = rq->hdr->id;
858 ocs_log_debug(hw->os, "REG_FCFI: filter[%d] %08X -> RQ[%d] id=%d\n",
859 j, hw->config.filter_def[j], i, rq->hdr->id);
864 rc = OCS_HW_RTN_ERROR;
866 if (sli_cmd_reg_fcfi(&hw->sli, buf, SLI4_BMBX_SIZE, 0, rq_cfg, 0)) {
867 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
870 if (rc != OCS_HW_RTN_SUCCESS) {
871 ocs_log_err(hw->os, "FCFI registration failed\n");
874 hw->fcf_indicator = ((sli4_cmd_reg_fcfi_t *)buf)->fcfi;
879 * Allocate the WQ request tag pool, if not previously allocated (the request tag value is 16 bits,
880 * thus the pool allocation size of 64k)
882 rc = ocs_hw_reqtag_init(hw);
884 ocs_log_err(hw->os, "ocs_pool_alloc hw_wq_callback_t failed: %d\n", rc);
888 rc = ocs_hw_setup_io(hw);
890 ocs_log_err(hw->os, "IO allocation failure\n");
894 rc = ocs_hw_init_io(hw);
896 ocs_log_err(hw->os, "IO initialization failure\n");
900 ocs_queue_history_init(hw->os, &hw->q_hist);
902 /* get hw link config; polling, so callback will be called immediately */
903 hw->linkcfg = OCS_HW_LINKCFG_NA;
904 ocs_hw_get_linkcfg(hw, OCS_CMD_POLL, ocs_hw_init_linkcfg_cb, hw);
906 /* if lancer ethernet, ethernet ports need to be enabled */
907 if ((hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) &&
908 (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_ETHERNET)) {
909 if (ocs_hw_set_eth_license(hw, hw->eth_license)) {
910 /* log warning but continue */
911 ocs_log_err(hw->os, "Failed to set ethernet license\n");
915 /* Set the DIF seed - only for lancer right now */
916 if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli) &&
917 ocs_hw_set_dif_seed(hw) != OCS_HW_RTN_SUCCESS) {
918 ocs_log_err(hw->os, "Failed to set DIF seed value\n");
922 /* Set the DIF mode - skyhawk only */
923 if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli) &&
924 sli_get_dif_capable(&hw->sli)) {
925 rc = ocs_hw_set_dif_mode(hw);
926 if (rc != OCS_HW_RTN_SUCCESS) {
927 ocs_log_err(hw->os, "Failed to set DIF mode value\n");
933 * Arming the EQ allows (e.g.) interrupts when CQ completions write EQ entries
935 for (i = 0; i < hw->eq_count; i++) {
936 sli_queue_arm(&hw->sli, &hw->eq[i], TRUE);
942 for (i = 0; i < hw->rq_count; i++) {
943 ocs_hw_queue_hash_add(hw->rq_hash, hw->rq[i].id, i);
949 for (i = 0; i < hw->wq_count; i++) {
950 ocs_hw_queue_hash_add(hw->wq_hash, hw->wq[i].id, i);
954 * Arming the CQ allows (e.g.) MQ completions to write CQ entries
956 for (i = 0; i < hw->cq_count; i++) {
957 ocs_hw_queue_hash_add(hw->cq_hash, hw->cq[i].id, i);
958 sli_queue_arm(&hw->sli, &hw->cq[i], TRUE);
961 /* record the fact that the queues are functional */
962 hw->state = OCS_HW_STATE_ACTIVE;
964 /* Note: Must be after the IOs are setup and the state is active*/
965 if (ocs_hw_rqpair_init(hw)) {
966 ocs_log_err(hw->os, "WARNING - error initializing RQ pair\n");
969 /* finally kick off periodic timer to check for timed out target WQEs */
970 if (hw->config.emulate_tgt_wqe_timeout) {
971 ocs_setup_timer(hw->os, &hw->wqe_timer, target_wqe_timer_cb, hw,
972 OCS_HW_WQ_TIMER_PERIOD_MS);
976 * Allocate a HW IOs for send frame. Allocate one for each Class 1 WQ, or if there
977 * are none of those, allocate one for WQ[0]
979 if ((count = ocs_varray_get_count(hw->wq_class_array[1])) > 0) {
980 for (i = 0; i < count; i++) {
981 hw_wq_t *wq = ocs_varray_iter_next(hw->wq_class_array[1]);
982 wq->send_frame_io = ocs_hw_io_alloc(hw);
983 if (wq->send_frame_io == NULL) {
984 ocs_log_err(hw->os, "ocs_hw_io_alloc for send_frame_io failed\n");
988 hw->hw_wq[0]->send_frame_io = ocs_hw_io_alloc(hw);
989 if (hw->hw_wq[0]->send_frame_io == NULL) {
990 ocs_log_err(hw->os, "ocs_hw_io_alloc for send_frame_io failed\n");
994 /* Initialize send frame frame sequence id */
995 ocs_atomic_init(&hw->send_frame_seq_id, 0);
997 /* Initialize watchdog timer if enabled by user */
998 hw->expiration_logged = 0;
999 if(hw->watchdog_timeout) {
1000 if((hw->watchdog_timeout < 1) || (hw->watchdog_timeout > 65534)) {
1001 ocs_log_err(hw->os, "watchdog_timeout out of range: Valid range is 1 - 65534\n");
1002 }else if(!ocs_hw_config_watchdog_timer(hw)) {
1003 ocs_log_info(hw->os, "watchdog timer configured with timeout = %d seconds \n", hw->watchdog_timeout);
1007 if (ocs_dma_alloc(hw->os, &hw->domain_dmem, 112, 4)) {
1008 ocs_log_err(hw->os, "domain node memory allocation fail\n");
1009 return OCS_HW_RTN_NO_MEMORY;
1012 if (ocs_dma_alloc(hw->os, &hw->fcf_dmem, OCS_HW_READ_FCF_SIZE, OCS_HW_READ_FCF_SIZE)) {
1013 ocs_log_err(hw->os, "domain fcf memory allocation fail\n");
1014 return OCS_HW_RTN_NO_MEMORY;
1017 if ((0 == hw->loop_map.size) && ocs_dma_alloc(hw->os, &hw->loop_map,
1018 SLI4_MIN_LOOP_MAP_BYTES, 4)) {
1019 ocs_log_err(hw->os, "Loop dma alloc failed size:%d \n", hw->loop_map.size);
1022 return OCS_HW_RTN_SUCCESS;
1026 * @brief Configure Multi-RQ
1028 * @param hw Hardware context allocated by the caller.
1029 * @param mode 1 to set MRQ filters and 0 to set FCFI index
1030 * @param vlanid valid in mode 0
1031 * @param fcf_index valid in mode 0
1033 * @return Returns 0 on success, or a non-zero value on failure.
1036 ocs_hw_config_mrq(ocs_hw_t *hw, uint8_t mode, uint16_t vlanid, uint16_t fcf_index)
1038 uint8_t buf[SLI4_BMBX_SIZE], mrq_bitmask = 0;
1040 sli4_cmd_reg_fcfi_mrq_t *rsp = NULL;
1042 sli4_cmd_rq_cfg_t rq_filter[SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG];
1045 if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE) {
1049 /* Set the filter match/mask values from hw's filter_def values */
1050 for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
1051 rq_filter[i].rq_id = 0xffff;
1052 rq_filter[i].r_ctl_mask = (uint8_t) hw->config.filter_def[i];
1053 rq_filter[i].r_ctl_match = (uint8_t) (hw->config.filter_def[i] >> 8);
1054 rq_filter[i].type_mask = (uint8_t) (hw->config.filter_def[i] >> 16);
1055 rq_filter[i].type_match = (uint8_t) (hw->config.filter_def[i] >> 24);
1058 /* Accumulate counts for each filter type used, build rq_ids[] list */
1059 for (i = 0; i < hw->hw_rq_count; i++) {
1061 for (j = 0; j < SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG; j++) {
1062 if (rq->filter_mask & (1U << j)) {
1063 if (rq_filter[j].rq_id != 0xffff) {
1064 /* Already used. Bailout ifts not RQset case */
1065 if (!rq->is_mrq || (rq_filter[j].rq_id != rq->base_mrq_id)) {
1066 ocs_log_err(hw->os, "Wrong queue topology.\n");
1067 return OCS_HW_RTN_ERROR;
1073 rq_filter[j].rq_id = rq->base_mrq_id;
1074 mrq_bitmask |= (1U << j);
1076 rq_filter[j].rq_id = rq->hdr->id;
1083 /* Invoke REG_FCFI_MRQ */
1084 rc = sli_cmd_reg_fcfi_mrq(&hw->sli,
1086 SLI4_BMBX_SIZE, /* size */
1088 fcf_index, /* fcf_index */
1089 vlanid, /* vlan_id */
1090 hw->config.rq_selection_policy, /* RQ selection policy*/
1091 mrq_bitmask, /* MRQ bitmask */
1092 hw->hw_mrq_count, /* num_mrqs */
1093 rq_filter); /* RQ filter */
1095 ocs_log_err(hw->os, "sli_cmd_reg_fcfi_mrq() failed: %d\n", rc);
1096 return OCS_HW_RTN_ERROR;
1099 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
1101 rsp = (sli4_cmd_reg_fcfi_mrq_t *)buf;
1103 if ((rc != OCS_HW_RTN_SUCCESS) || (rsp->hdr.status)) {
1104 ocs_log_err(hw->os, "FCFI MRQ registration failed. cmd = %x status = %x\n",
1105 rsp->hdr.command, rsp->hdr.status);
1106 return OCS_HW_RTN_ERROR;
1109 if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE) {
1110 hw->fcf_indicator = rsp->fcfi;
1116 * @brief Callback function for getting linkcfg during HW initialization.
1118 * @param status Status of the linkcfg get operation.
1119 * @param value Link configuration enum to which the link configuration is set.
1120 * @param arg Callback argument (ocs_hw_t *).
1125 ocs_hw_init_linkcfg_cb(int32_t status, uintptr_t value, void *arg)
1127 ocs_hw_t *hw = (ocs_hw_t *)arg;
1129 hw->linkcfg = (ocs_hw_linkcfg_e)value;
1131 hw->linkcfg = OCS_HW_LINKCFG_NA;
1133 ocs_log_debug(hw->os, "linkcfg=%d\n", hw->linkcfg);
1137 * @ingroup devInitShutdown
1138 * @brief Tear down the Hardware Abstraction Layer module.
1141 * Frees memory structures needed by the device, and shuts down the device. Does
1142 * not free the HW context memory (which is done by the caller).
1144 * @param hw Hardware context allocated by the caller.
1146 * @return Returns 0 on success, or a non-zero value on failure.
1149 ocs_hw_teardown(ocs_hw_t *hw)
1152 uint32_t iters = 10;/*XXX*/
1154 uint32_t destroy_queues;
1155 uint32_t free_memory;
1158 ocs_log_err(NULL, "bad parameter(s) hw=%p\n", hw);
1159 return OCS_HW_RTN_ERROR;
1162 destroy_queues = (hw->state == OCS_HW_STATE_ACTIVE);
1163 free_memory = (hw->state != OCS_HW_STATE_UNINITIALIZED);
1165 /* shutdown target wqe timer */
1166 shutdown_target_wqe_timer(hw);
1168 /* Cancel watchdog timer if enabled */
1169 if(hw->watchdog_timeout) {
1170 hw->watchdog_timeout = 0;
1171 ocs_hw_config_watchdog_timer(hw);
1174 /* Cancel Sliport Healthcheck */
1175 if(hw->sliport_healthcheck) {
1176 hw->sliport_healthcheck = 0;
1177 ocs_hw_config_sli_port_health_check(hw, 0, 0);
1180 if (hw->state != OCS_HW_STATE_QUEUES_ALLOCATED) {
1181 hw->state = OCS_HW_STATE_TEARDOWN_IN_PROGRESS;
1185 /* If there are outstanding commands, wait for them to complete */
1186 while (!ocs_list_empty(&hw->cmd_head) && iters) {
1192 if (ocs_list_empty(&hw->cmd_head)) {
1193 ocs_log_debug(hw->os, "All commands completed on MQ queue\n");
1195 ocs_log_debug(hw->os, "Some commands still pending on MQ queue\n");
1198 /* Cancel any remaining commands */
1199 ocs_hw_command_cancel(hw);
1201 hw->state = OCS_HW_STATE_TEARDOWN_IN_PROGRESS;
1204 ocs_lock_free(&hw->cmd_lock);
1206 /* Free unregistered RPI if workaround is in force */
1207 if (hw->workaround.use_unregistered_rpi) {
1208 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, hw->workaround.unregistered_rid);
1211 max_rpi = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI);
1213 for (i = 0; i < max_rpi; i++) {
1214 if (ocs_atomic_read(&hw->rpi_ref[i].rpi_count)) {
1215 ocs_log_debug(hw->os, "non-zero ref [%d]=%d\n",
1216 i, ocs_atomic_read(&hw->rpi_ref[i].rpi_count));
1219 ocs_free(hw->os, hw->rpi_ref, max_rpi * sizeof(*hw->rpi_ref));
1223 ocs_dma_free(hw->os, &hw->rnode_mem);
1226 for (i = 0; i < hw->config.n_io; i++) {
1227 if (hw->io[i] && (hw->io[i]->sgl != NULL) &&
1228 (hw->io[i]->sgl->virt != NULL)) {
1229 if(hw->io[i]->is_port_owned) {
1230 ocs_lock_free(&hw->io[i]->axr_lock);
1232 ocs_dma_free(hw->os, hw->io[i]->sgl);
1234 ocs_free(hw->os, hw->io[i], sizeof(ocs_hw_io_t));
1237 ocs_free(hw->os, hw->wqe_buffs, hw->config.n_io * hw->sli.config.wqe_size);
1238 hw->wqe_buffs = NULL;
1239 ocs_free(hw->os, hw->io, hw->config.n_io * sizeof(ocs_hw_io_t *));
1243 ocs_dma_free(hw->os, &hw->xfer_rdy);
1244 ocs_dma_free(hw->os, &hw->dump_sges);
1245 ocs_dma_free(hw->os, &hw->loop_map);
1247 ocs_lock_free(&hw->io_lock);
1248 ocs_lock_free(&hw->io_abort_lock);
1250 for (i = 0; i < hw->wq_count; i++) {
1251 sli_queue_free(&hw->sli, &hw->wq[i], destroy_queues, free_memory);
1254 for (i = 0; i < hw->rq_count; i++) {
1255 sli_queue_free(&hw->sli, &hw->rq[i], destroy_queues, free_memory);
1258 for (i = 0; i < hw->mq_count; i++) {
1259 sli_queue_free(&hw->sli, &hw->mq[i], destroy_queues, free_memory);
1262 for (i = 0; i < hw->cq_count; i++) {
1263 sli_queue_free(&hw->sli, &hw->cq[i], destroy_queues, free_memory);
1266 for (i = 0; i < hw->eq_count; i++) {
1267 sli_queue_free(&hw->sli, &hw->eq[i], destroy_queues, free_memory);
1270 ocs_hw_qtop_free(hw->qtop);
1272 /* Free rq buffers */
1275 hw_queue_teardown(hw);
1277 ocs_hw_rqpair_teardown(hw);
1279 if (sli_teardown(&hw->sli)) {
1280 ocs_log_err(hw->os, "SLI teardown failed\n");
1283 ocs_queue_history_free(&hw->q_hist);
1285 /* record the fact that the queues are non-functional */
1286 hw->state = OCS_HW_STATE_UNINITIALIZED;
1288 /* free sequence free pool */
1289 ocs_array_free(hw->seq_pool);
1290 hw->seq_pool = NULL;
1292 /* free hw_wq_callback pool */
1293 ocs_pool_free(hw->wq_reqtag_pool);
1295 ocs_dma_free(hw->os, &hw->domain_dmem);
1296 ocs_dma_free(hw->os, &hw->fcf_dmem);
1297 /* Mark HW setup as not having been called */
1298 hw->hw_setup_called = FALSE;
1300 return OCS_HW_RTN_SUCCESS;
1304 ocs_hw_reset(ocs_hw_t *hw, ocs_hw_reset_e reset)
1307 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
1309 ocs_hw_state_e prev_state = hw->state;
1311 if (hw->state != OCS_HW_STATE_ACTIVE) {
1312 ocs_log_test(hw->os, "HW state %d is not active\n", hw->state);
1315 hw->state = OCS_HW_STATE_RESET_IN_PROGRESS;
1317 /* shutdown target wqe timer */
1318 shutdown_target_wqe_timer(hw);
1323 * If an mailbox command requiring a DMA is outstanding (i.e. SFP/DDM),
1324 * then the FW will UE when the reset is issued. So attempt to complete
1325 * all mailbox commands.
1328 while (!ocs_list_empty(&hw->cmd_head) && iters) {
1334 if (ocs_list_empty(&hw->cmd_head)) {
1335 ocs_log_debug(hw->os, "All commands completed on MQ queue\n");
1337 ocs_log_debug(hw->os, "Some commands still pending on MQ queue\n");
1340 /* Reset the chip */
1342 case OCS_HW_RESET_FUNCTION:
1343 ocs_log_debug(hw->os, "issuing function level reset\n");
1344 if (sli_reset(&hw->sli)) {
1345 ocs_log_err(hw->os, "sli_reset failed\n");
1346 rc = OCS_HW_RTN_ERROR;
1349 case OCS_HW_RESET_FIRMWARE:
1350 ocs_log_debug(hw->os, "issuing firmware reset\n");
1351 if (sli_fw_reset(&hw->sli)) {
1352 ocs_log_err(hw->os, "sli_soft_reset failed\n");
1353 rc = OCS_HW_RTN_ERROR;
1356 * Because the FW reset leaves the FW in a non-running state,
1357 * follow that with a regular reset.
1359 ocs_log_debug(hw->os, "issuing function level reset\n");
1360 if (sli_reset(&hw->sli)) {
1361 ocs_log_err(hw->os, "sli_reset failed\n");
1362 rc = OCS_HW_RTN_ERROR;
1366 ocs_log_test(hw->os, "unknown reset type - no reset performed\n");
1367 hw->state = prev_state;
1368 return OCS_HW_RTN_ERROR;
1371 /* Not safe to walk command/io lists unless they've been initialized */
1372 if (prev_state != OCS_HW_STATE_UNINITIALIZED) {
1373 ocs_hw_command_cancel(hw);
1375 /* Clean up the inuse list, the free list and the wait free list */
1376 ocs_hw_io_cancel(hw);
1378 ocs_memset(hw->domains, 0, sizeof(hw->domains));
1379 ocs_memset(hw->fcf_index_fcfi, 0, sizeof(hw->fcf_index_fcfi));
1381 ocs_hw_link_event_init(hw);
1383 ocs_lock(&hw->io_lock);
1384 /* The io lists should be empty, but remove any that didn't get cleaned up. */
1385 while (!ocs_list_empty(&hw->io_timed_wqe)) {
1386 ocs_list_remove_head(&hw->io_timed_wqe);
1388 /* Don't clean up the io_inuse list, the backend will do that when it finishes the IO */
1390 while (!ocs_list_empty(&hw->io_free)) {
1391 ocs_list_remove_head(&hw->io_free);
1393 while (!ocs_list_empty(&hw->io_wait_free)) {
1394 ocs_list_remove_head(&hw->io_wait_free);
1397 /* Reset the request tag pool, the HW IO request tags are reassigned in ocs_hw_setup_io() */
1398 ocs_hw_reqtag_reset(hw);
1400 ocs_unlock(&hw->io_lock);
1403 if (prev_state != OCS_HW_STATE_UNINITIALIZED) {
1404 for (i = 0; i < hw->wq_count; i++) {
1405 sli_queue_reset(&hw->sli, &hw->wq[i]);
1408 for (i = 0; i < hw->rq_count; i++) {
1409 sli_queue_reset(&hw->sli, &hw->rq[i]);
1412 for (i = 0; i < hw->hw_rq_count; i++) {
1413 hw_rq_t *rq = hw->hw_rq[i];
1414 if (rq->rq_tracker != NULL) {
1417 for (j = 0; j < rq->entry_count; j++) {
1418 rq->rq_tracker[j] = NULL;
1423 for (i = 0; i < hw->mq_count; i++) {
1424 sli_queue_reset(&hw->sli, &hw->mq[i]);
1427 for (i = 0; i < hw->cq_count; i++) {
1428 sli_queue_reset(&hw->sli, &hw->cq[i]);
1431 for (i = 0; i < hw->eq_count; i++) {
1432 sli_queue_reset(&hw->sli, &hw->eq[i]);
1435 /* Free rq buffers */
1438 /* Teardown the HW queue topology */
1439 hw_queue_teardown(hw);
1441 /* Free rq buffers */
1446 * Re-apply the run-time workarounds after clearing the SLI config
1447 * fields in sli_reset.
1449 ocs_hw_workaround_setup(hw);
1450 hw->state = OCS_HW_STATE_QUEUES_ALLOCATED;
1456 ocs_hw_get_num_eq(ocs_hw_t *hw)
1458 return hw->eq_count;
1462 ocs_hw_get_fw_timed_out(ocs_hw_t *hw)
1464 /* The error values below are taken from LOWLEVEL_SET_WATCHDOG_TIMER_rev1.pdf
1465 * No further explanation is given in the document.
1467 return (sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR1) == 0x2 &&
1468 sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR2) == 0x10);
1472 ocs_hw_get(ocs_hw_t *hw, ocs_hw_property_e prop, uint32_t *value)
1474 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
1478 return OCS_HW_RTN_ERROR;
1485 *value = hw->config.n_io;
1488 *value = (hw->config.n_sgl - SLI4_SGE_MAX_RESERVED);
1491 *value = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI);
1493 case OCS_HW_MAX_NODES:
1494 *value = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI);
1496 case OCS_HW_MAX_RQ_ENTRIES:
1497 *value = hw->num_qentries[SLI_QTYPE_RQ];
1499 case OCS_HW_RQ_DEFAULT_BUFFER_SIZE:
1500 *value = hw->config.rq_default_buffer_size;
1502 case OCS_HW_AUTO_XFER_RDY_CAPABLE:
1503 *value = sli_get_auto_xfer_rdy_capable(&hw->sli);
1505 case OCS_HW_AUTO_XFER_RDY_XRI_CNT:
1506 *value = hw->config.auto_xfer_rdy_xri_cnt;
1508 case OCS_HW_AUTO_XFER_RDY_SIZE:
1509 *value = hw->config.auto_xfer_rdy_size;
1511 case OCS_HW_AUTO_XFER_RDY_BLK_SIZE:
1512 switch (hw->config.auto_xfer_rdy_blk_size_chip) {
1530 rc = OCS_HW_RTN_ERROR;
1534 case OCS_HW_AUTO_XFER_RDY_T10_ENABLE:
1535 *value = hw->config.auto_xfer_rdy_t10_enable;
1537 case OCS_HW_AUTO_XFER_RDY_P_TYPE:
1538 *value = hw->config.auto_xfer_rdy_p_type;
1540 case OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA:
1541 *value = hw->config.auto_xfer_rdy_ref_tag_is_lba;
1543 case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID:
1544 *value = hw->config.auto_xfer_rdy_app_tag_valid;
1546 case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE:
1547 *value = hw->config.auto_xfer_rdy_app_tag_value;
1549 case OCS_HW_MAX_SGE:
1550 *value = sli_get_max_sge(&hw->sli);
1552 case OCS_HW_MAX_SGL:
1553 *value = sli_get_max_sgl(&hw->sli);
1555 case OCS_HW_TOPOLOGY:
1557 * Infer link.status based on link.speed.
1558 * Report OCS_HW_TOPOLOGY_NONE if the link is down.
1560 if (hw->link.speed == 0) {
1561 *value = OCS_HW_TOPOLOGY_NONE;
1564 switch (hw->link.topology) {
1565 case SLI_LINK_TOPO_NPORT:
1566 *value = OCS_HW_TOPOLOGY_NPORT;
1568 case SLI_LINK_TOPO_LOOP:
1569 *value = OCS_HW_TOPOLOGY_LOOP;
1571 case SLI_LINK_TOPO_NONE:
1572 *value = OCS_HW_TOPOLOGY_NONE;
1575 ocs_log_test(hw->os, "unsupported topology %#x\n", hw->link.topology);
1576 rc = OCS_HW_RTN_ERROR;
1580 case OCS_HW_CONFIG_TOPOLOGY:
1581 *value = hw->config.topology;
1583 case OCS_HW_LINK_SPEED:
1584 *value = hw->link.speed;
1586 case OCS_HW_LINK_CONFIG_SPEED:
1587 switch (hw->config.speed) {
1588 case FC_LINK_SPEED_10G:
1591 case FC_LINK_SPEED_AUTO_16_8_4:
1594 case FC_LINK_SPEED_2G:
1597 case FC_LINK_SPEED_4G:
1600 case FC_LINK_SPEED_8G:
1603 case FC_LINK_SPEED_16G:
1606 case FC_LINK_SPEED_32G:
1610 ocs_log_test(hw->os, "unsupported speed %#x\n", hw->config.speed);
1611 rc = OCS_HW_RTN_ERROR;
1615 case OCS_HW_IF_TYPE:
1616 *value = sli_get_if_type(&hw->sli);
1618 case OCS_HW_SLI_REV:
1619 *value = sli_get_sli_rev(&hw->sli);
1621 case OCS_HW_SLI_FAMILY:
1622 *value = sli_get_sli_family(&hw->sli);
1624 case OCS_HW_DIF_CAPABLE:
1625 *value = sli_get_dif_capable(&hw->sli);
1627 case OCS_HW_DIF_SEED:
1628 *value = hw->config.dif_seed;
1630 case OCS_HW_DIF_MODE:
1631 *value = hw->config.dif_mode;
1633 case OCS_HW_DIF_MULTI_SEPARATE:
1634 /* Lancer supports multiple DIF separates */
1635 if (hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) {
1641 case OCS_HW_DUMP_MAX_SIZE:
1642 *value = hw->dump_size;
1644 case OCS_HW_DUMP_READY:
1645 *value = sli_dump_is_ready(&hw->sli);
1647 case OCS_HW_DUMP_PRESENT:
1648 *value = sli_dump_is_present(&hw->sli);
1650 case OCS_HW_RESET_REQUIRED:
1651 tmp = sli_reset_required(&hw->sli);
1653 rc = OCS_HW_RTN_ERROR;
1658 case OCS_HW_FW_ERROR:
1659 *value = sli_fw_error_status(&hw->sli);
1661 case OCS_HW_FW_READY:
1662 *value = sli_fw_ready(&hw->sli);
1664 case OCS_HW_FW_TIMED_OUT:
1665 *value = ocs_hw_get_fw_timed_out(hw);
1667 case OCS_HW_HIGH_LOGIN_MODE:
1668 *value = sli_get_hlm_capable(&hw->sli);
1670 case OCS_HW_PREREGISTER_SGL:
1671 *value = sli_get_sgl_preregister_required(&hw->sli);
1673 case OCS_HW_HW_REV1:
1674 *value = sli_get_hw_revision(&hw->sli, 0);
1676 case OCS_HW_HW_REV2:
1677 *value = sli_get_hw_revision(&hw->sli, 1);
1679 case OCS_HW_HW_REV3:
1680 *value = sli_get_hw_revision(&hw->sli, 2);
1682 case OCS_HW_LINKCFG:
1683 *value = hw->linkcfg;
1685 case OCS_HW_ETH_LICENSE:
1686 *value = hw->eth_license;
1688 case OCS_HW_LINK_MODULE_TYPE:
1689 *value = sli_get_link_module_type(&hw->sli);
1691 case OCS_HW_NUM_CHUTES:
1692 *value = ocs_hw_get_num_chutes(hw);
1694 case OCS_HW_DISABLE_AR_TGT_DIF:
1695 *value = hw->workaround.disable_ar_tgt_dif;
1697 case OCS_HW_EMULATE_I_ONLY_AAB:
1698 *value = hw->config.i_only_aab;
1700 case OCS_HW_EMULATE_TARGET_WQE_TIMEOUT:
1701 *value = hw->config.emulate_tgt_wqe_timeout;
1703 case OCS_HW_VPD_LEN:
1704 *value = sli_get_vpd_len(&hw->sli);
1706 case OCS_HW_SGL_CHAINING_CAPABLE:
1707 *value = sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported;
1709 case OCS_HW_SGL_CHAINING_ALLOWED:
1711 * SGL Chaining is allowed in the following cases:
1712 * 1. Lancer with host SGL Lists
1713 * 2. Skyhawk with pre-registered SGL Lists
1716 if ((sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported) &&
1717 !sli_get_sgl_preregister(&hw->sli) &&
1718 SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) {
1722 if ((sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported) &&
1723 sli_get_sgl_preregister(&hw->sli) &&
1724 ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) ||
1725 (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli)))) {
1729 case OCS_HW_SGL_CHAINING_HOST_ALLOCATED:
1730 /* Only lancer supports host allocated SGL Chaining buffers. */
1731 *value = ((sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported) &&
1732 (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)));
1734 case OCS_HW_SEND_FRAME_CAPABLE:
1735 if (hw->workaround.ignore_send_frame) {
1738 /* Only lancer is capable */
1739 *value = sli_get_if_type(&hw->sli) == SLI4_IF_TYPE_LANCER_FC_ETH;
1742 case OCS_HW_RQ_SELECTION_POLICY:
1743 *value = hw->config.rq_selection_policy;
1745 case OCS_HW_RR_QUANTA:
1746 *value = hw->config.rr_quanta;
1748 case OCS_HW_MAX_VPORTS:
1749 *value = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_VPI);
1752 ocs_log_test(hw->os, "unsupported property %#x\n", prop);
1753 rc = OCS_HW_RTN_ERROR;
1760 ocs_hw_get_ptr(ocs_hw_t *hw, ocs_hw_property_e prop)
1765 case OCS_HW_WWN_NODE:
1766 rc = sli_get_wwn_node(&hw->sli);
1768 case OCS_HW_WWN_PORT:
1769 rc = sli_get_wwn_port(&hw->sli);
1772 /* make sure VPD length is non-zero */
1773 if (sli_get_vpd_len(&hw->sli)) {
1774 rc = sli_get_vpd(&hw->sli);
1778 rc = sli_get_fw_name(&hw->sli, 0);
1780 case OCS_HW_FW_REV2:
1781 rc = sli_get_fw_name(&hw->sli, 1);
1784 rc = sli_get_ipl_name(&hw->sli);
1786 case OCS_HW_PORTNUM:
1787 rc = sli_get_portnum(&hw->sli);
1789 case OCS_HW_BIOS_VERSION_STRING:
1790 rc = sli_get_bios_version_string(&hw->sli);
1793 ocs_log_test(hw->os, "unsupported property %#x\n", prop);
1800 ocs_hw_set(ocs_hw_t *hw, ocs_hw_property_e prop, uint32_t value)
1802 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
1806 if (value > sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI) ||
1808 ocs_log_test(hw->os, "IO value out of range %d vs %d\n",
1809 value, sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI));
1810 rc = OCS_HW_RTN_ERROR;
1812 hw->config.n_io = value;
1816 value += SLI4_SGE_MAX_RESERVED;
1817 if (value > sli_get_max_sgl(&hw->sli)) {
1818 ocs_log_test(hw->os, "SGL value out of range %d vs %d\n",
1819 value, sli_get_max_sgl(&hw->sli));
1820 rc = OCS_HW_RTN_ERROR;
1822 hw->config.n_sgl = value;
1825 case OCS_HW_TOPOLOGY:
1826 if ((sli_get_medium(&hw->sli) != SLI_LINK_MEDIUM_FC) &&
1827 (value != OCS_HW_TOPOLOGY_AUTO)) {
1828 ocs_log_test(hw->os, "unsupported topology=%#x medium=%#x\n",
1829 value, sli_get_medium(&hw->sli));
1830 rc = OCS_HW_RTN_ERROR;
1835 case OCS_HW_TOPOLOGY_AUTO:
1836 if (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC) {
1837 sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FC);
1839 sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FCOE);
1842 case OCS_HW_TOPOLOGY_NPORT:
1843 sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FC_DA);
1845 case OCS_HW_TOPOLOGY_LOOP:
1846 sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FC_AL);
1849 ocs_log_test(hw->os, "unsupported topology %#x\n", value);
1850 rc = OCS_HW_RTN_ERROR;
1852 hw->config.topology = value;
1854 case OCS_HW_LINK_SPEED:
1855 if (sli_get_medium(&hw->sli) != SLI_LINK_MEDIUM_FC) {
1857 case 0: /* Auto-speed negotiation */
1858 case 10000: /* FCoE speed */
1859 hw->config.speed = FC_LINK_SPEED_10G;
1862 ocs_log_test(hw->os, "unsupported speed=%#x medium=%#x\n",
1863 value, sli_get_medium(&hw->sli));
1864 rc = OCS_HW_RTN_ERROR;
1870 case 0: /* Auto-speed negotiation */
1871 hw->config.speed = FC_LINK_SPEED_AUTO_16_8_4;
1873 case 2000: /* FC speeds */
1874 hw->config.speed = FC_LINK_SPEED_2G;
1877 hw->config.speed = FC_LINK_SPEED_4G;
1880 hw->config.speed = FC_LINK_SPEED_8G;
1883 hw->config.speed = FC_LINK_SPEED_16G;
1886 hw->config.speed = FC_LINK_SPEED_32G;
1889 ocs_log_test(hw->os, "unsupported speed %d\n", value);
1890 rc = OCS_HW_RTN_ERROR;
1893 case OCS_HW_DIF_SEED:
1894 /* Set the DIF seed - only for lancer right now */
1895 if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
1896 ocs_log_test(hw->os, "DIF seed not supported for this device\n");
1897 rc = OCS_HW_RTN_ERROR;
1899 hw->config.dif_seed = value;
1902 case OCS_HW_DIF_MODE:
1904 case OCS_HW_DIF_MODE_INLINE:
1906 * Make sure we support inline DIF.
1908 * Note: Having both bits clear means that we have old
1909 * FW that doesn't set the bits.
1911 if (sli_is_dif_inline_capable(&hw->sli)) {
1912 hw->config.dif_mode = value;
1914 ocs_log_test(hw->os, "chip does not support DIF inline\n");
1915 rc = OCS_HW_RTN_ERROR;
1918 case OCS_HW_DIF_MODE_SEPARATE:
1919 /* Make sure we support DIF separates. */
1920 if (sli_is_dif_separate_capable(&hw->sli)) {
1921 hw->config.dif_mode = value;
1923 ocs_log_test(hw->os, "chip does not support DIF separate\n");
1924 rc = OCS_HW_RTN_ERROR;
1928 case OCS_HW_RQ_PROCESS_LIMIT: {
1932 /* For each hw_rq object, set its parent CQ limit value */
1933 for (i = 0; i < hw->hw_rq_count; i++) {
1935 hw->cq[rq->cq->instance].proc_limit = value;
1939 case OCS_HW_RQ_DEFAULT_BUFFER_SIZE:
1940 hw->config.rq_default_buffer_size = value;
1942 case OCS_HW_AUTO_XFER_RDY_XRI_CNT:
1943 hw->config.auto_xfer_rdy_xri_cnt = value;
1945 case OCS_HW_AUTO_XFER_RDY_SIZE:
1946 hw->config.auto_xfer_rdy_size = value;
1948 case OCS_HW_AUTO_XFER_RDY_BLK_SIZE:
1951 hw->config.auto_xfer_rdy_blk_size_chip = 0;
1954 hw->config.auto_xfer_rdy_blk_size_chip = 1;
1957 hw->config.auto_xfer_rdy_blk_size_chip = 2;
1960 hw->config.auto_xfer_rdy_blk_size_chip = 3;
1963 hw->config.auto_xfer_rdy_blk_size_chip = 4;
1966 ocs_log_err(hw->os, "Invalid block size %d\n",
1968 rc = OCS_HW_RTN_ERROR;
1971 case OCS_HW_AUTO_XFER_RDY_T10_ENABLE:
1972 hw->config.auto_xfer_rdy_t10_enable = value;
1974 case OCS_HW_AUTO_XFER_RDY_P_TYPE:
1975 hw->config.auto_xfer_rdy_p_type = value;
1977 case OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA:
1978 hw->config.auto_xfer_rdy_ref_tag_is_lba = value;
1980 case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID:
1981 hw->config.auto_xfer_rdy_app_tag_valid = value;
1983 case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE:
1984 hw->config.auto_xfer_rdy_app_tag_value = value;
1987 hw->config.esoc = value;
1989 case OCS_HW_HIGH_LOGIN_MODE:
1990 rc = sli_set_hlm(&hw->sli, value);
1992 case OCS_HW_PREREGISTER_SGL:
1993 rc = sli_set_sgl_preregister(&hw->sli, value);
1995 case OCS_HW_ETH_LICENSE:
1996 hw->eth_license = value;
1998 case OCS_HW_EMULATE_I_ONLY_AAB:
1999 hw->config.i_only_aab = value;
2001 case OCS_HW_EMULATE_TARGET_WQE_TIMEOUT:
2002 hw->config.emulate_tgt_wqe_timeout = value;
2005 hw->config.bounce = value;
2007 case OCS_HW_RQ_SELECTION_POLICY:
2008 hw->config.rq_selection_policy = value;
2010 case OCS_HW_RR_QUANTA:
2011 hw->config.rr_quanta = value;
2014 ocs_log_test(hw->os, "unsupported property %#x\n", prop);
2015 rc = OCS_HW_RTN_ERROR;
2022 ocs_hw_set_ptr(ocs_hw_t *hw, ocs_hw_property_e prop, void *value)
2024 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
2027 case OCS_HW_WAR_VERSION:
2028 hw->hw_war_version = value;
2030 case OCS_HW_FILTER_DEF: {
2034 for (idx = 0; idx < ARRAY_SIZE(hw->config.filter_def); idx++) {
2035 hw->config.filter_def[idx] = 0;
2038 for (idx = 0; (idx < ARRAY_SIZE(hw->config.filter_def)) && (p != NULL) && *p; ) {
2039 hw->config.filter_def[idx++] = ocs_strtoul(p, 0, 0);
2040 p = ocs_strchr(p, ',');
2049 ocs_log_test(hw->os, "unsupported property %#x\n", prop);
2050 rc = OCS_HW_RTN_ERROR;
2056 * @ingroup interrupt
2057 * @brief Check for the events associated with the interrupt vector.
2059 * @param hw Hardware context.
2060 * @param vector Zero-based interrupt vector number.
2062 * @return Returns 0 on success, or a non-zero value on failure.
2065 ocs_hw_event_check(ocs_hw_t *hw, uint32_t vector)
2070 ocs_log_err(NULL, "HW context NULL?!?\n");
2074 if (vector > hw->eq_count) {
2075 ocs_log_err(hw->os, "vector %d. max %d\n",
2076 vector, hw->eq_count);
2081 * The caller should disable interrupts if they wish to prevent us
2082 * from processing during a shutdown. The following states are defined:
2083 * OCS_HW_STATE_UNINITIALIZED - No queues allocated
2084 * OCS_HW_STATE_QUEUES_ALLOCATED - The state after a chip reset,
2085 * queues are cleared.
2086 * OCS_HW_STATE_ACTIVE - Chip and queues are operational
2087 * OCS_HW_STATE_RESET_IN_PROGRESS - reset, we still want completions
2088 * OCS_HW_STATE_TEARDOWN_IN_PROGRESS - We still want mailbox
2091 if (hw->state != OCS_HW_STATE_UNINITIALIZED) {
2092 rc = sli_queue_is_empty(&hw->sli, &hw->eq[vector]);
2094 /* Re-arm queue if there are no entries */
2096 sli_queue_arm(&hw->sli, &hw->eq[vector], TRUE);
2103 ocs_hw_unsol_process_bounce(void *arg)
2105 ocs_hw_sequence_t *seq = arg;
2106 ocs_hw_t *hw = seq->hw;
2108 ocs_hw_assert(hw != NULL);
2109 ocs_hw_assert(hw->callback.unsolicited != NULL);
2111 hw->callback.unsolicited(hw->args.unsolicited, seq);
2115 ocs_hw_process(ocs_hw_t *hw, uint32_t vector, uint32_t max_isr_time_msec)
2123 * The caller should disable interrupts if they wish to prevent us
2124 * from processing during a shutdown. The following states are defined:
2125 * OCS_HW_STATE_UNINITIALIZED - No queues allocated
2126 * OCS_HW_STATE_QUEUES_ALLOCATED - The state after a chip reset,
2127 * queues are cleared.
2128 * OCS_HW_STATE_ACTIVE - Chip and queues are operational
2129 * OCS_HW_STATE_RESET_IN_PROGRESS - reset, we still want completions
2130 * OCS_HW_STATE_TEARDOWN_IN_PROGRESS - We still want mailbox
2133 if (hw->state == OCS_HW_STATE_UNINITIALIZED) {
2137 /* Get pointer to hw_eq_t */
2138 eq = hw->hw_eq[vector];
2140 OCS_STAT(eq->use_count++);
2142 rc = ocs_hw_eq_process(hw, eq, max_isr_time_msec);
2148 * @ingroup interrupt
2149 * @brief Process events associated with an EQ.
2153 * @n @n Without a mechanism to terminate the completion processing loop, it
2154 * is possible under some workload conditions for the loop to never terminate
2155 * (or at least take longer than the OS is happy to have an interrupt handler
2156 * or kernel thread context hold a CPU without yielding).
2157 * @n @n The approach taken here is to periodically check how much time
2158 * we have been in this
2159 * processing loop, and if we exceed a predetermined time (multiple seconds), the
2160 * loop is terminated, and ocs_hw_process() returns.
2162 * @param hw Hardware context.
2163 * @param eq Pointer to HW EQ object.
2164 * @param max_isr_time_msec Maximum time in msec to stay in this function.
2166 * @return Returns 0 on success, or a non-zero value on failure.
2169 ocs_hw_eq_process(ocs_hw_t *hw, hw_eq_t *eq, uint32_t max_isr_time_msec)
2171 uint8_t eqe[sizeof(sli4_eqe_t)] = { 0 };
2172 uint32_t done = FALSE;
2173 uint32_t tcheck_count;
2177 tcheck_count = OCS_HW_TIMECHECK_ITERATIONS;
2178 tstart = ocs_msectime();
2182 while (!done && !sli_queue_read(&hw->sli, eq->queue, eqe)) {
2186 rc = sli_eq_parse(&hw->sli, eqe, &cq_id);
2192 * Received a sentinel EQE indicating the EQ is full.
2195 for (i = 0; i < hw->cq_count; i++) {
2196 ocs_hw_cq_process(hw, hw->hw_cq[i]);
2203 int32_t index = ocs_hw_queue_hash_find(hw->cq_hash, cq_id);
2204 if (likely(index >= 0)) {
2205 ocs_hw_cq_process(hw, hw->hw_cq[index]);
2207 ocs_log_err(hw->os, "bad CQ_ID %#06x\n", cq_id);
2211 if (eq->queue->n_posted > (eq->queue->posted_limit)) {
2212 sli_queue_arm(&hw->sli, eq->queue, FALSE);
2215 if (tcheck_count && (--tcheck_count == 0)) {
2216 tcheck_count = OCS_HW_TIMECHECK_ITERATIONS;
2217 telapsed = ocs_msectime() - tstart;
2218 if (telapsed >= max_isr_time_msec) {
2223 sli_queue_eq_arm(&hw->sli, eq->queue, TRUE);
2229 * @brief Submit queued (pending) mbx commands.
2232 * Submit queued mailbox commands.
2233 * --- Assumes that hw->cmd_lock is held ---
2235 * @param hw Hardware context.
2237 * @return Returns 0 on success, or a negative error code value on failure.
2240 ocs_hw_cmd_submit_pending(ocs_hw_t *hw)
2242 ocs_command_ctx_t *ctx;
2245 /* Assumes lock held */
2247 /* Only submit MQE if there's room */
2248 while (hw->cmd_head_count < (OCS_HW_MQ_DEPTH - 1)) {
2249 ctx = ocs_list_remove_head(&hw->cmd_pending);
2253 ocs_list_add_tail(&hw->cmd_head, ctx);
2254 hw->cmd_head_count++;
2255 if (sli_queue_write(&hw->sli, hw->mq, ctx->buf) < 0) {
2256 ocs_log_test(hw->os, "sli_queue_write failed: %d\n", rc);
2266 * @brief Issue a SLI command.
2269 * Send a mailbox command to the hardware, and either wait for a completion
2270 * (OCS_CMD_POLL) or get an optional asynchronous completion (OCS_CMD_NOWAIT).
2272 * @param hw Hardware context.
2273 * @param cmd Buffer containing a formatted command and results.
2274 * @param opts Command options:
2275 * - OCS_CMD_POLL - Command executes synchronously and busy-waits for the completion.
2276 * - OCS_CMD_NOWAIT - Command executes asynchronously. Uses callback.
2277 * @param cb Function callback used for asynchronous mode. May be NULL.
2278 * @n Prototype is <tt>(*cb)(void *arg, uint8_t *cmd)</tt>.
2279 * @n @n @b Note: If the
2280 * callback function pointer is NULL, the results of the command are silently
2281 * discarded, allowing this pointer to exist solely on the stack.
2282 * @param arg Argument passed to an asynchronous callback.
2284 * @return Returns 0 on success, or a non-zero value on failure.
2287 ocs_hw_command(ocs_hw_t *hw, uint8_t *cmd, uint32_t opts, void *cb, void *arg)
2289 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
2292 * If the chip is in an error state (UE'd) then reject this mailbox
2295 if (sli_fw_error_status(&hw->sli) > 0) {
2296 uint32_t err1 = sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR1);
2297 uint32_t err2 = sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR2);
2298 if (hw->expiration_logged == 0 && err1 == 0x2 && err2 == 0x10) {
2299 hw->expiration_logged = 1;
2300 ocs_log_crit(hw->os,"Emulex: Heartbeat expired after %d seconds\n",
2301 hw->watchdog_timeout);
2303 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2304 ocs_log_crit(hw->os, "status=%#x error1=%#x error2=%#x\n",
2305 sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_STATUS),
2308 return OCS_HW_RTN_ERROR;
2311 if (OCS_CMD_POLL == opts) {
2312 ocs_lock(&hw->cmd_lock);
2313 if (hw->mq->length && !sli_queue_is_empty(&hw->sli, hw->mq)) {
2315 * Can't issue Boot-strap mailbox command with other
2316 * mail-queue commands pending as this interaction is
2319 rc = OCS_HW_RTN_ERROR;
2321 void *bmbx = hw->sli.bmbx.virt;
2323 ocs_memset(bmbx, 0, SLI4_BMBX_SIZE);
2324 ocs_memcpy(bmbx, cmd, SLI4_BMBX_SIZE);
2326 if (sli_bmbx_command(&hw->sli) == 0) {
2327 rc = OCS_HW_RTN_SUCCESS;
2328 ocs_memcpy(cmd, bmbx, SLI4_BMBX_SIZE);
2331 ocs_unlock(&hw->cmd_lock);
2332 } else if (OCS_CMD_NOWAIT == opts) {
2333 ocs_command_ctx_t *ctx = NULL;
2335 ctx = ocs_malloc(hw->os, sizeof(ocs_command_ctx_t), OCS_M_ZERO | OCS_M_NOWAIT);
2337 ocs_log_err(hw->os, "can't allocate command context\n");
2338 return OCS_HW_RTN_NO_RESOURCES;
2341 if (hw->state != OCS_HW_STATE_ACTIVE) {
2342 ocs_log_err(hw->os, "Can't send command, HW state=%d\n", hw->state);
2343 ocs_free(hw->os, ctx, sizeof(*ctx));
2344 return OCS_HW_RTN_ERROR;
2354 ocs_lock(&hw->cmd_lock);
2356 /* Add to pending list */
2357 ocs_list_add_tail(&hw->cmd_pending, ctx);
2359 /* Submit as much of the pending list as we can */
2360 if (ocs_hw_cmd_submit_pending(hw) == 0) {
2361 rc = OCS_HW_RTN_SUCCESS;
2364 ocs_unlock(&hw->cmd_lock);
2371 * @ingroup devInitShutdown
2372 * @brief Register a callback for the given event.
2374 * @param hw Hardware context.
2375 * @param which Event of interest.
2376 * @param func Function to call when the event occurs.
2377 * @param arg Argument passed to the callback function.
2379 * @return Returns 0 on success, or a non-zero value on failure.
2382 ocs_hw_callback(ocs_hw_t *hw, ocs_hw_callback_e which, void *func, void *arg)
2385 if (!hw || !func || (which >= OCS_HW_CB_MAX)) {
2386 ocs_log_err(NULL, "bad parameter hw=%p which=%#x func=%p\n",
2388 return OCS_HW_RTN_ERROR;
2392 case OCS_HW_CB_DOMAIN:
2393 hw->callback.domain = func;
2394 hw->args.domain = arg;
2396 case OCS_HW_CB_PORT:
2397 hw->callback.port = func;
2398 hw->args.port = arg;
2400 case OCS_HW_CB_UNSOLICITED:
2401 hw->callback.unsolicited = func;
2402 hw->args.unsolicited = arg;
2404 case OCS_HW_CB_REMOTE_NODE:
2405 hw->callback.rnode = func;
2406 hw->args.rnode = arg;
2408 case OCS_HW_CB_BOUNCE:
2409 hw->callback.bounce = func;
2410 hw->args.bounce = arg;
2413 ocs_log_test(hw->os, "unknown callback %#x\n", which);
2414 return OCS_HW_RTN_ERROR;
2417 return OCS_HW_RTN_SUCCESS;
2422 * @brief Allocate a port object.
2425 * This function allocates a VPI object for the port and stores it in the
2426 * indicator field of the port object.
2428 * @param hw Hardware context.
2429 * @param sport SLI port object used to connect to the domain.
2430 * @param domain Domain object associated with this port (may be NULL).
2431 * @param wwpn Port's WWPN in big-endian order, or NULL to use default.
2433 * @return Returns 0 on success, or a non-zero value on failure.
2436 ocs_hw_port_alloc(ocs_hw_t *hw, ocs_sli_port_t *sport, ocs_domain_t *domain,
2439 uint8_t *cmd = NULL;
2440 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
2443 sport->indicator = UINT32_MAX;
2445 sport->ctx.app = sport;
2446 sport->sm_free_req_pending = 0;
2449 * Check if the chip is in an error state (UE'd) before proceeding.
2451 if (sli_fw_error_status(&hw->sli) > 0) {
2452 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2453 return OCS_HW_RTN_ERROR;
2457 ocs_memcpy(&sport->sli_wwpn, wwpn, sizeof(sport->sli_wwpn));
2460 if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_VPI, &sport->indicator, &index)) {
2461 ocs_log_err(hw->os, "FCOE_VPI allocation failure\n");
2462 return OCS_HW_RTN_ERROR;
2465 if (domain != NULL) {
2466 ocs_sm_function_t next = NULL;
2468 cmd = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
2470 ocs_log_err(hw->os, "command memory allocation failed\n");
2471 rc = OCS_HW_RTN_NO_MEMORY;
2472 goto ocs_hw_port_alloc_out;
2475 /* If the WWPN is NULL, fetch the default WWPN and WWNN before
2476 * initializing the VPI
2479 next = __ocs_hw_port_alloc_read_sparm64;
2481 next = __ocs_hw_port_alloc_init_vpi;
2484 ocs_sm_transition(&sport->ctx, next, cmd);
2486 /* This is the convention for the HW, not SLI */
2487 ocs_log_test(hw->os, "need WWN for physical port\n");
2488 rc = OCS_HW_RTN_ERROR;
2490 /* domain NULL and wwpn non-NULL */
2491 ocs_sm_transition(&sport->ctx, __ocs_hw_port_alloc_init, NULL);
2494 ocs_hw_port_alloc_out:
2495 if (rc != OCS_HW_RTN_SUCCESS) {
2496 ocs_free(hw->os, cmd, SLI4_BMBX_SIZE);
2498 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator);
2506 * @brief Attach a physical/virtual SLI port to a domain.
2509 * This function registers a previously-allocated VPI with the
2512 * @param hw Hardware context.
2513 * @param sport Pointer to the SLI port object.
2514 * @param fc_id Fibre Channel ID to associate with this port.
2516 * @return Returns OCS_HW_RTN_SUCCESS on success, or an error code on failure.
2519 ocs_hw_port_attach(ocs_hw_t *hw, ocs_sli_port_t *sport, uint32_t fc_id)
2521 uint8_t *buf = NULL;
2522 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
2524 if (!hw || !sport) {
2525 ocs_log_err(hw ? hw->os : NULL,
2526 "bad parameter(s) hw=%p sport=%p\n", hw,
2528 return OCS_HW_RTN_ERROR;
2532 * Check if the chip is in an error state (UE'd) before proceeding.
2534 if (sli_fw_error_status(&hw->sli) > 0) {
2535 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2536 return OCS_HW_RTN_ERROR;
2539 buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2541 ocs_log_err(hw->os, "no buffer for command\n");
2542 return OCS_HW_RTN_NO_MEMORY;
2545 sport->fc_id = fc_id;
2546 ocs_sm_post_event(&sport->ctx, OCS_EVT_HW_PORT_REQ_ATTACH, buf);
2551 * @brief Called when the port control command completes.
2554 * We only need to free the mailbox command buffer.
2556 * @param hw Hardware context.
2557 * @param status Status field from the mbox completion.
2558 * @param mqe Mailbox response structure.
2559 * @param arg Pointer to a callback function that signals the caller that the command is done.
2561 * @return Returns 0.
2564 ocs_hw_cb_port_control(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
2566 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
2572 * @brief Control a port (initialize, shutdown, or set link configuration).
2575 * This function controls a port depending on the @c ctrl parameter:
2576 * - @b OCS_HW_PORT_INIT -
2577 * Issues the CONFIG_LINK and INIT_LINK commands for the specified port.
2578 * The HW generates an OCS_HW_DOMAIN_FOUND event when the link comes up.
2580 * - @b OCS_HW_PORT_SHUTDOWN -
2581 * Issues the DOWN_LINK command for the specified port.
2582 * The HW generates an OCS_HW_DOMAIN_LOST event when the link is down.
2584 * - @b OCS_HW_PORT_SET_LINK_CONFIG -
2585 * Sets the link configuration.
2587 * @param hw Hardware context.
2588 * @param ctrl Specifies the operation:
2589 * - OCS_HW_PORT_INIT
2590 * - OCS_HW_PORT_SHUTDOWN
2591 * - OCS_HW_PORT_SET_LINK_CONFIG
2593 * @param value Operation-specific value.
2594 * - OCS_HW_PORT_INIT - Selective reset AL_PA
2595 * - OCS_HW_PORT_SHUTDOWN - N/A
2596 * - OCS_HW_PORT_SET_LINK_CONFIG - An enum #ocs_hw_linkcfg_e value.
2598 * @param cb Callback function to invoke the following operation.
2599 * - OCS_HW_PORT_INIT/OCS_HW_PORT_SHUTDOWN - NULL (link events
2600 * are handled by the OCS_HW_CB_DOMAIN callbacks).
2601 * - OCS_HW_PORT_SET_LINK_CONFIG - Invoked after linkcfg mailbox command
2604 * @param arg Callback argument invoked after the command completes.
2605 * - OCS_HW_PORT_INIT/OCS_HW_PORT_SHUTDOWN - NULL (link events
2606 * are handled by the OCS_HW_CB_DOMAIN callbacks).
2607 * - OCS_HW_PORT_SET_LINK_CONFIG - Invoked after linkcfg mailbox command
2610 * @return Returns 0 on success, or a non-zero value on failure.
2613 ocs_hw_port_control(ocs_hw_t *hw, ocs_hw_port_e ctrl, uintptr_t value, ocs_hw_port_control_cb_t cb, void *arg)
2615 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
2618 case OCS_HW_PORT_INIT:
2622 uint8_t reset_alpa = 0;
2624 if (SLI_LINK_MEDIUM_FC == sli_get_medium(&hw->sli)) {
2627 cfg_link = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2628 if (cfg_link == NULL) {
2629 ocs_log_err(hw->os, "no buffer for command\n");
2630 return OCS_HW_RTN_NO_MEMORY;
2633 if (sli_cmd_config_link(&hw->sli, cfg_link, SLI4_BMBX_SIZE)) {
2634 rc = ocs_hw_command(hw, cfg_link, OCS_CMD_NOWAIT,
2635 ocs_hw_cb_port_control, NULL);
2638 if (rc != OCS_HW_RTN_SUCCESS) {
2639 ocs_free(hw->os, cfg_link, SLI4_BMBX_SIZE);
2640 ocs_log_err(hw->os, "CONFIG_LINK failed\n");
2643 speed = hw->config.speed;
2644 reset_alpa = (uint8_t)(value & 0xff);
2646 speed = FC_LINK_SPEED_10G;
2650 * Bring link up, unless FW version is not supported
2652 if (hw->workaround.fw_version_too_low) {
2653 if (SLI4_IF_TYPE_LANCER_FC_ETH == hw->sli.if_type) {
2654 ocs_log_err(hw->os, "Cannot bring up link. Please update firmware to %s or later (current version is %s)\n",
2655 OCS_FW_VER_STR(OCS_MIN_FW_VER_LANCER), (char *) sli_get_fw_name(&hw->sli,0));
2657 ocs_log_err(hw->os, "Cannot bring up link. Please update firmware to %s or later (current version is %s)\n",
2658 OCS_FW_VER_STR(OCS_MIN_FW_VER_SKYHAWK), (char *) sli_get_fw_name(&hw->sli, 0));
2661 return OCS_HW_RTN_ERROR;
2664 rc = OCS_HW_RTN_ERROR;
2666 /* Allocate a new buffer for the init_link command */
2667 init_link = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2668 if (init_link == NULL) {
2669 ocs_log_err(hw->os, "no buffer for command\n");
2670 return OCS_HW_RTN_NO_MEMORY;
2673 if (sli_cmd_init_link(&hw->sli, init_link, SLI4_BMBX_SIZE, speed, reset_alpa)) {
2674 rc = ocs_hw_command(hw, init_link, OCS_CMD_NOWAIT,
2675 ocs_hw_cb_port_control, NULL);
2677 /* Free buffer on error, since no callback is coming */
2678 if (rc != OCS_HW_RTN_SUCCESS) {
2679 ocs_free(hw->os, init_link, SLI4_BMBX_SIZE);
2680 ocs_log_err(hw->os, "INIT_LINK failed\n");
2684 case OCS_HW_PORT_SHUTDOWN:
2688 down_link = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2689 if (down_link == NULL) {
2690 ocs_log_err(hw->os, "no buffer for command\n");
2691 return OCS_HW_RTN_NO_MEMORY;
2693 if (sli_cmd_down_link(&hw->sli, down_link, SLI4_BMBX_SIZE)) {
2694 rc = ocs_hw_command(hw, down_link, OCS_CMD_NOWAIT,
2695 ocs_hw_cb_port_control, NULL);
2697 /* Free buffer on error, since no callback is coming */
2698 if (rc != OCS_HW_RTN_SUCCESS) {
2699 ocs_free(hw->os, down_link, SLI4_BMBX_SIZE);
2700 ocs_log_err(hw->os, "DOWN_LINK failed\n");
2704 case OCS_HW_PORT_SET_LINK_CONFIG:
2705 rc = ocs_hw_set_linkcfg(hw, (ocs_hw_linkcfg_e)value, OCS_CMD_NOWAIT, cb, arg);
2708 ocs_log_test(hw->os, "unhandled control %#x\n", ctrl);
2717 * @brief Free port resources.
2720 * Issue the UNREG_VPI command to free the assigned VPI context.
2722 * @param hw Hardware context.
2723 * @param sport SLI port object used to connect to the domain.
2725 * @return Returns 0 on success, or a non-zero value on failure.
2728 ocs_hw_port_free(ocs_hw_t *hw, ocs_sli_port_t *sport)
2730 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
2732 if (!hw || !sport) {
2733 ocs_log_err(hw ? hw->os : NULL,
2734 "bad parameter(s) hw=%p sport=%p\n", hw,
2736 return OCS_HW_RTN_ERROR;
2740 * Check if the chip is in an error state (UE'd) before proceeding.
2742 if (sli_fw_error_status(&hw->sli) > 0) {
2743 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2744 return OCS_HW_RTN_ERROR;
2747 ocs_sm_post_event(&sport->ctx, OCS_EVT_HW_PORT_REQ_FREE, NULL);
2753 * @brief Allocate a fabric domain object.
2756 * This function starts a series of commands needed to connect to the domain, including
2761 * @b Note: Not all SLI interface types use all of the above commands.
2762 * @n @n Upon successful allocation, the HW generates a OCS_HW_DOMAIN_ALLOC_OK
2763 * event. On failure, it generates a OCS_HW_DOMAIN_ALLOC_FAIL event.
2765 * @param hw Hardware context.
2766 * @param domain Pointer to the domain object.
2767 * @param fcf FCF index.
2768 * @param vlan VLAN ID.
2770 * @return Returns 0 on success, or a non-zero value on failure.
2773 ocs_hw_domain_alloc(ocs_hw_t *hw, ocs_domain_t *domain, uint32_t fcf, uint32_t vlan)
2775 uint8_t *cmd = NULL;
2778 if (!hw || !domain || !domain->sport) {
2779 ocs_log_err(NULL, "bad parameter(s) hw=%p domain=%p sport=%p\n",
2780 hw, domain, domain ? domain->sport : NULL);
2781 return OCS_HW_RTN_ERROR;
2785 * Check if the chip is in an error state (UE'd) before proceeding.
2787 if (sli_fw_error_status(&hw->sli) > 0) {
2788 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2789 return OCS_HW_RTN_ERROR;
2792 cmd = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
2794 ocs_log_err(hw->os, "command memory allocation failed\n");
2795 return OCS_HW_RTN_NO_MEMORY;
2798 domain->dma = hw->domain_dmem;
2801 domain->sm.app = domain;
2803 domain->fcf_indicator = UINT32_MAX;
2804 domain->vlan_id = vlan;
2805 domain->indicator = UINT32_MAX;
2807 if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_VFI, &domain->indicator, &index)) {
2808 ocs_log_err(hw->os, "FCOE_VFI allocation failure\n");
2810 ocs_free(hw->os, cmd, SLI4_BMBX_SIZE);
2812 return OCS_HW_RTN_ERROR;
2815 ocs_sm_transition(&domain->sm, __ocs_hw_domain_init, cmd);
2816 return OCS_HW_RTN_SUCCESS;
2821 * @brief Attach a SLI port to a domain.
2823 * @param hw Hardware context.
2824 * @param domain Pointer to the domain object.
2825 * @param fc_id Fibre Channel ID to associate with this port.
2827 * @return Returns 0 on success, or a non-zero value on failure.
2830 ocs_hw_domain_attach(ocs_hw_t *hw, ocs_domain_t *domain, uint32_t fc_id)
2832 uint8_t *buf = NULL;
2833 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
2835 if (!hw || !domain) {
2836 ocs_log_err(hw ? hw->os : NULL,
2837 "bad parameter(s) hw=%p domain=%p\n",
2839 return OCS_HW_RTN_ERROR;
2843 * Check if the chip is in an error state (UE'd) before proceeding.
2845 if (sli_fw_error_status(&hw->sli) > 0) {
2846 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2847 return OCS_HW_RTN_ERROR;
2850 buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2852 ocs_log_err(hw->os, "no buffer for command\n");
2853 return OCS_HW_RTN_NO_MEMORY;
2856 domain->sport->fc_id = fc_id;
2857 ocs_sm_post_event(&domain->sm, OCS_EVT_HW_DOMAIN_REQ_ATTACH, buf);
2863 * @brief Free a fabric domain object.
2866 * Free both the driver and SLI port resources associated with the domain.
2868 * @param hw Hardware context.
2869 * @param domain Pointer to the domain object.
2871 * @return Returns 0 on success, or a non-zero value on failure.
2874 ocs_hw_domain_free(ocs_hw_t *hw, ocs_domain_t *domain)
2876 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
2878 if (!hw || !domain) {
2879 ocs_log_err(hw ? hw->os : NULL,
2880 "bad parameter(s) hw=%p domain=%p\n",
2882 return OCS_HW_RTN_ERROR;
2886 * Check if the chip is in an error state (UE'd) before proceeding.
2888 if (sli_fw_error_status(&hw->sli) > 0) {
2889 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2890 return OCS_HW_RTN_ERROR;
2893 ocs_sm_post_event(&domain->sm, OCS_EVT_HW_DOMAIN_REQ_FREE, NULL);
2899 * @brief Free a fabric domain object.
2902 * Free the driver resources associated with the domain. The difference between
2903 * this call and ocs_hw_domain_free() is that this call assumes resources no longer
2904 * exist on the SLI port, due to a reset or after some error conditions.
2906 * @param hw Hardware context.
2907 * @param domain Pointer to the domain object.
2909 * @return Returns 0 on success, or a non-zero value on failure.
2912 ocs_hw_domain_force_free(ocs_hw_t *hw, ocs_domain_t *domain)
2914 if (!hw || !domain) {
2915 ocs_log_err(NULL, "bad parameter(s) hw=%p domain=%p\n", hw, domain);
2916 return OCS_HW_RTN_ERROR;
2919 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI, domain->indicator);
2921 return OCS_HW_RTN_SUCCESS;
2926 * @brief Allocate a remote node object.
2928 * @param hw Hardware context.
2929 * @param rnode Allocated remote node object to initialize.
2930 * @param fc_addr FC address of the remote node.
2931 * @param sport SLI port used to connect to remote node.
2933 * @return Returns 0 on success, or a non-zero value on failure.
2936 ocs_hw_node_alloc(ocs_hw_t *hw, ocs_remote_node_t *rnode, uint32_t fc_addr,
2937 ocs_sli_port_t *sport)
2939 /* Check for invalid indicator */
2940 if (UINT32_MAX != rnode->indicator) {
2941 ocs_log_err(hw->os, "FCOE_RPI allocation failure addr=%#x rpi=%#x\n",
2942 fc_addr, rnode->indicator);
2943 return OCS_HW_RTN_ERROR;
2947 * Check if the chip is in an error state (UE'd) before proceeding.
2949 if (sli_fw_error_status(&hw->sli) > 0) {
2950 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2951 return OCS_HW_RTN_ERROR;
2954 /* NULL SLI port indicates an unallocated remote node */
2955 rnode->sport = NULL;
2957 if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_RPI, &rnode->indicator, &rnode->index)) {
2958 ocs_log_err(hw->os, "FCOE_RPI allocation failure addr=%#x\n",
2960 return OCS_HW_RTN_ERROR;
2963 rnode->fc_id = fc_addr;
2964 rnode->sport = sport;
2966 return OCS_HW_RTN_SUCCESS;
2971 * @brief Update a remote node object with the remote port's service parameters.
2973 * @param hw Hardware context.
2974 * @param rnode Allocated remote node object to initialize.
2975 * @param sparms DMA buffer containing the remote port's service parameters.
2977 * @return Returns 0 on success, or a non-zero value on failure.
2980 ocs_hw_node_attach(ocs_hw_t *hw, ocs_remote_node_t *rnode, ocs_dma_t *sparms)
2982 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
2983 uint8_t *buf = NULL;
2986 if (!hw || !rnode || !sparms) {
2987 ocs_log_err(NULL, "bad parameter(s) hw=%p rnode=%p sparms=%p\n",
2989 return OCS_HW_RTN_ERROR;
2993 * Check if the chip is in an error state (UE'd) before proceeding.
2995 if (sli_fw_error_status(&hw->sli) > 0) {
2996 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2997 return OCS_HW_RTN_ERROR;
3000 buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
3002 ocs_log_err(hw->os, "no buffer for command\n");
3003 return OCS_HW_RTN_NO_MEMORY;
3007 * If the attach count is non-zero, this RPI has already been registered.
3008 * Otherwise, register the RPI
3010 if (rnode->index == UINT32_MAX) {
3011 ocs_log_err(NULL, "bad parameter rnode->index invalid\n");
3012 ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
3013 return OCS_HW_RTN_ERROR;
3015 count = ocs_atomic_add_return(&hw->rpi_ref[rnode->index].rpi_count, 1);
3018 * Can't attach multiple FC_ID's to a node unless High Login
3021 if (sli_get_hlm(&hw->sli) == FALSE) {
3022 ocs_log_test(hw->os, "attach to already attached node HLM=%d count=%d\n",
3023 sli_get_hlm(&hw->sli), count);
3024 rc = OCS_HW_RTN_SUCCESS;
3026 rnode->node_group = TRUE;
3027 rnode->attached = ocs_atomic_read(&hw->rpi_ref[rnode->index].rpi_attached);
3028 rc = rnode->attached ? OCS_HW_RTN_SUCCESS_SYNC : OCS_HW_RTN_SUCCESS;
3031 rnode->node_group = FALSE;
3033 ocs_display_sparams("", "reg rpi", 0, NULL, sparms->virt);
3034 if (sli_cmd_reg_rpi(&hw->sli, buf, SLI4_BMBX_SIZE, rnode->fc_id,
3035 rnode->indicator, rnode->sport->indicator,
3036 sparms, 0, (hw->auto_xfer_rdy_enabled && hw->config.auto_xfer_rdy_t10_enable))) {
3037 rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT,
3038 ocs_hw_cb_node_attach, rnode);
3043 if (rc < OCS_HW_RTN_SUCCESS) {
3044 ocs_atomic_sub_return(&hw->rpi_ref[rnode->index].rpi_count, 1);
3045 ocs_log_err(hw->os, "%s error\n", count ? "HLM" : "REG_RPI");
3047 ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
3055 * @brief Free a remote node resource.
3057 * @param hw Hardware context.
3058 * @param rnode Remote node object to free.
3060 * @return Returns 0 on success, or a non-zero value on failure.
3063 ocs_hw_node_free_resources(ocs_hw_t *hw, ocs_remote_node_t *rnode)
3065 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
3067 if (!hw || !rnode) {
3068 ocs_log_err(NULL, "bad parameter(s) hw=%p rnode=%p\n",
3070 return OCS_HW_RTN_ERROR;
3074 if (!rnode->attached) {
3075 if (rnode->indicator != UINT32_MAX) {
3076 if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, rnode->indicator)) {
3077 ocs_log_err(hw->os, "FCOE_RPI free failure RPI %d addr=%#x\n",
3078 rnode->indicator, rnode->fc_id);
3079 rc = OCS_HW_RTN_ERROR;
3081 rnode->node_group = FALSE;
3082 rnode->indicator = UINT32_MAX;
3083 rnode->index = UINT32_MAX;
3084 rnode->free_group = FALSE;
3088 ocs_log_err(hw->os, "Error: rnode is still attached\n");
3089 rc = OCS_HW_RTN_ERROR;
3098 * @brief Free a remote node object.
3100 * @param hw Hardware context.
3101 * @param rnode Remote node object to free.
3103 * @return Returns 0 on success, or a non-zero value on failure.
3106 ocs_hw_node_detach(ocs_hw_t *hw, ocs_remote_node_t *rnode)
3108 uint8_t *buf = NULL;
3109 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS_SYNC;
3110 uint32_t index = UINT32_MAX;
3112 if (!hw || !rnode) {
3113 ocs_log_err(NULL, "bad parameter(s) hw=%p rnode=%p\n",
3115 return OCS_HW_RTN_ERROR;
3119 * Check if the chip is in an error state (UE'd) before proceeding.
3121 if (sli_fw_error_status(&hw->sli) > 0) {
3122 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
3123 return OCS_HW_RTN_ERROR;
3126 index = rnode->index;
3132 if (!rnode->attached) {
3133 return OCS_HW_RTN_SUCCESS_SYNC;
3136 buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
3138 ocs_log_err(hw->os, "no buffer for command\n");
3139 return OCS_HW_RTN_NO_MEMORY;
3142 count = ocs_atomic_sub_return(&hw->rpi_ref[index].rpi_count, 1);
3145 /* There are no other references to this RPI
3146 * so unregister it and free the resource. */
3148 rnode->node_group = FALSE;
3149 rnode->free_group = TRUE;
3151 if (sli_get_hlm(&hw->sli) == FALSE) {
3152 ocs_log_test(hw->os, "Invalid count with HLM disabled, count=%d\n",
3155 fc_id = rnode->fc_id & 0x00ffffff;
3158 rc = OCS_HW_RTN_ERROR;
3160 if (sli_cmd_unreg_rpi(&hw->sli, buf, SLI4_BMBX_SIZE, rnode->indicator,
3161 SLI_RSRC_FCOE_RPI, fc_id)) {
3162 rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_node_free, rnode);
3165 if (rc != OCS_HW_RTN_SUCCESS) {
3166 ocs_log_err(hw->os, "UNREG_RPI failed\n");
3167 ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
3168 rc = OCS_HW_RTN_ERROR;
3177 * @brief Free all remote node objects.
3179 * @param hw Hardware context.
3181 * @return Returns 0 on success, or a non-zero value on failure.
3184 ocs_hw_node_free_all(ocs_hw_t *hw)
3186 uint8_t *buf = NULL;
3187 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
3190 ocs_log_err(NULL, "bad parameter hw=%p\n", hw);
3191 return OCS_HW_RTN_ERROR;
3195 * Check if the chip is in an error state (UE'd) before proceeding.
3197 if (sli_fw_error_status(&hw->sli) > 0) {
3198 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
3199 return OCS_HW_RTN_ERROR;
3202 buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
3204 ocs_log_err(hw->os, "no buffer for command\n");
3205 return OCS_HW_RTN_NO_MEMORY;
3208 if (sli_cmd_unreg_rpi(&hw->sli, buf, SLI4_BMBX_SIZE, 0xffff,
3209 SLI_RSRC_FCOE_FCFI, UINT32_MAX)) {
3210 rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_node_free_all,
3214 if (rc != OCS_HW_RTN_SUCCESS) {
3215 ocs_log_err(hw->os, "UNREG_RPI failed\n");
3216 ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
3217 rc = OCS_HW_RTN_ERROR;
3224 ocs_hw_node_group_alloc(ocs_hw_t *hw, ocs_remote_node_group_t *ngroup)
3227 if (!hw || !ngroup) {
3228 ocs_log_err(NULL, "bad parameter hw=%p ngroup=%p\n",
3230 return OCS_HW_RTN_ERROR;
3233 if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_RPI, &ngroup->indicator,
3235 ocs_log_err(hw->os, "FCOE_RPI allocation failure addr=%#x\n",
3237 return OCS_HW_RTN_ERROR;
3240 return OCS_HW_RTN_SUCCESS;
3244 ocs_hw_node_group_attach(ocs_hw_t *hw, ocs_remote_node_group_t *ngroup, ocs_remote_node_t *rnode)
3247 if (!hw || !ngroup || !rnode) {
3248 ocs_log_err(NULL, "bad parameter hw=%p ngroup=%p rnode=%p\n",
3250 return OCS_HW_RTN_ERROR;
3253 if (rnode->attached) {
3254 ocs_log_err(hw->os, "node already attached RPI=%#x addr=%#x\n",
3255 rnode->indicator, rnode->fc_id);
3256 return OCS_HW_RTN_ERROR;
3259 if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, rnode->indicator)) {
3260 ocs_log_err(hw->os, "FCOE_RPI free failure RPI=%#x\n",
3262 return OCS_HW_RTN_ERROR;
3265 rnode->indicator = ngroup->indicator;
3266 rnode->index = ngroup->index;
3268 return OCS_HW_RTN_SUCCESS;
3272 ocs_hw_node_group_free(ocs_hw_t *hw, ocs_remote_node_group_t *ngroup)
3276 if (!hw || !ngroup) {
3277 ocs_log_err(NULL, "bad parameter hw=%p ngroup=%p\n",
3279 return OCS_HW_RTN_ERROR;
3282 ref = ocs_atomic_read(&hw->rpi_ref[ngroup->index].rpi_count);
3284 /* Hmmm, the reference count is non-zero */
3285 ocs_log_debug(hw->os, "node group reference=%d (RPI=%#x)\n",
3286 ref, ngroup->indicator);
3288 if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, ngroup->indicator)) {
3289 ocs_log_err(hw->os, "FCOE_RPI free failure RPI=%#x\n",
3291 return OCS_HW_RTN_ERROR;
3294 ocs_atomic_set(&hw->rpi_ref[ngroup->index].rpi_count, 0);
3297 ngroup->indicator = UINT32_MAX;
3298 ngroup->index = UINT32_MAX;
3300 return OCS_HW_RTN_SUCCESS;
3304 * @brief Initialize IO fields on each free call.
3306 * @n @b Note: This is done on each free call (as opposed to each
3307 * alloc call) because port-owned XRIs are not
3308 * allocated with ocs_hw_io_alloc() but are freed with this
3311 * @param io Pointer to HW IO.
3314 ocs_hw_init_free_io(ocs_hw_io_t *io)
3317 * Set io->done to NULL, to avoid any callbacks, should
3318 * a completion be received for one of these IOs
3321 io->abort_done = NULL;
3322 io->status_saved = 0;
3323 io->abort_in_progress = FALSE;
3324 io->port_owned_abort_count = 0;
3329 io->tgt_wqe_timeout = 0;
3334 * @brief Lockless allocate a HW IO object.
3337 * Assume that hw->ocs_lock is held. This function is only used if
3338 * use_dif_sec_xri workaround is being used.
3340 * @param hw Hardware context.
3342 * @return Returns a pointer to an object on success, or NULL on failure.
3344 static inline ocs_hw_io_t *
3345 _ocs_hw_io_alloc(ocs_hw_t *hw)
3347 ocs_hw_io_t *io = NULL;
3349 if (NULL != (io = ocs_list_remove_head(&hw->io_free))) {
3350 ocs_list_add_tail(&hw->io_inuse, io);
3351 io->state = OCS_HW_IO_STATE_INUSE;
3352 io->quarantine = FALSE;
3353 io->quarantine_first_phase = TRUE;
3354 io->abort_reqtag = UINT32_MAX;
3355 ocs_ref_init(&io->ref, ocs_hw_io_free_internal, io);
3357 ocs_atomic_add_return(&hw->io_alloc_failed_count, 1);
3364 * @brief Allocate a HW IO object.
3367 * @n @b Note: This function applies to non-port owned XRIs
3370 * @param hw Hardware context.
3372 * @return Returns a pointer to an object on success, or NULL on failure.
3375 ocs_hw_io_alloc(ocs_hw_t *hw)
3377 ocs_hw_io_t *io = NULL;
3379 ocs_lock(&hw->io_lock);
3380 io = _ocs_hw_io_alloc(hw);
3381 ocs_unlock(&hw->io_lock);
3388 * @brief Allocate/Activate a port owned HW IO object.
3391 * This function is called by the transport layer when an XRI is
3392 * allocated by the SLI-Port. This will "activate" the HW IO
3393 * associated with the XRI received from the SLI-Port to mirror
3394 * the state of the XRI.
3395 * @n @n @b Note: This function applies to port owned XRIs only.
3397 * @param hw Hardware context.
3398 * @param io Pointer HW IO to activate/allocate.
3400 * @return Returns a pointer to an object on success, or NULL on failure.
3403 ocs_hw_io_activate_port_owned(ocs_hw_t *hw, ocs_hw_io_t *io)
3405 if (ocs_ref_read_count(&io->ref) > 0) {
3406 ocs_log_err(hw->os, "Bad parameter: refcount > 0\n");
3410 if (io->wq != NULL) {
3411 ocs_log_err(hw->os, "XRI %x already in use\n", io->indicator);
3415 ocs_ref_init(&io->ref, ocs_hw_io_free_port_owned, io);
3423 * @brief When an IO is freed, depending on the exchange busy flag, and other
3424 * workarounds, move it to the correct list.
3427 * @n @b Note: Assumes that the hw->io_lock is held and the item has been removed
3428 * from the busy or wait_free list.
3430 * @param hw Hardware context.
3431 * @param io Pointer to the IO object to move.
3434 ocs_hw_io_free_move_correct_list(ocs_hw_t *hw, ocs_hw_io_t *io)
3437 /* add to wait_free list and wait for XRI_ABORTED CQEs to clean up */
3438 ocs_list_add_tail(&hw->io_wait_free, io);
3439 io->state = OCS_HW_IO_STATE_WAIT_FREE;
3441 /* IO not busy, add to free list */
3442 ocs_list_add_tail(&hw->io_free, io);
3443 io->state = OCS_HW_IO_STATE_FREE;
3446 /* BZ 161832 workaround */
3447 if (hw->workaround.use_dif_sec_xri) {
3448 ocs_hw_check_sec_hio_list(hw);
3454 * @brief Free a HW IO object. Perform cleanup common to
3455 * port and host-owned IOs.
3457 * @param hw Hardware context.
3458 * @param io Pointer to the HW IO object.
3461 ocs_hw_io_free_common(ocs_hw_t *hw, ocs_hw_io_t *io)
3463 /* initialize IO fields */
3464 ocs_hw_init_free_io(io);
3466 /* Restore default SGL */
3467 ocs_hw_io_restore_sgl(hw, io);
3472 * @brief Free a HW IO object associated with a port-owned XRI.
3474 * @param arg Pointer to the HW IO object.
3477 ocs_hw_io_free_port_owned(void *arg)
3479 ocs_hw_io_t *io = (ocs_hw_io_t *)arg;
3480 ocs_hw_t *hw = io->hw;
3483 * For auto xfer rdy, if the dnrx bit is set, then add it to the list of XRIs
3484 * waiting for buffers.
3486 if (io->auto_xfer_rdy_dnrx) {
3487 ocs_lock(&hw->io_lock);
3488 /* take a reference count because we still own the IO until the buffer is posted */
3489 ocs_ref_init(&io->ref, ocs_hw_io_free_port_owned, io);
3490 ocs_list_add_tail(&hw->io_port_dnrx, io);
3491 ocs_unlock(&hw->io_lock);
3494 /* perform common cleanup */
3495 ocs_hw_io_free_common(hw, io);
3500 * @brief Free a previously-allocated HW IO object. Called when
3501 * IO refcount goes to zero (host-owned IOs only).
3503 * @param arg Pointer to the HW IO object.
3506 ocs_hw_io_free_internal(void *arg)
3508 ocs_hw_io_t *io = (ocs_hw_io_t *)arg;
3509 ocs_hw_t *hw = io->hw;
3511 /* perform common cleanup */
3512 ocs_hw_io_free_common(hw, io);
3514 ocs_lock(&hw->io_lock);
3515 /* remove from in-use list */
3516 ocs_list_remove(&hw->io_inuse, io);
3517 ocs_hw_io_free_move_correct_list(hw, io);
3518 ocs_unlock(&hw->io_lock);
3523 * @brief Free a previously-allocated HW IO object.
3526 * @n @b Note: This function applies to port and host owned XRIs.
3528 * @param hw Hardware context.
3529 * @param io Pointer to the HW IO object.
3531 * @return Returns a non-zero value if HW IO was freed, 0 if references
3532 * on the IO still exist, or a negative value if an error occurred.
3535 ocs_hw_io_free(ocs_hw_t *hw, ocs_hw_io_t *io)
3537 /* just put refcount */
3538 if (ocs_ref_read_count(&io->ref) <= 0) {
3539 ocs_log_err(hw->os, "Bad parameter: refcount <= 0 xri=%x tag=%x\n",
3540 io->indicator, io->reqtag);
3544 return ocs_ref_put(&io->ref); /* ocs_ref_get(): ocs_hw_io_alloc() */
3549 * @brief Check if given HW IO is in-use
3552 * This function returns TRUE if the given HW IO has been
3553 * allocated and is in-use, and FALSE otherwise. It applies to
3554 * port and host owned XRIs.
3556 * @param hw Hardware context.
3557 * @param io Pointer to the HW IO object.
3559 * @return TRUE if an IO is in use, or FALSE otherwise.
3562 ocs_hw_io_inuse(ocs_hw_t *hw, ocs_hw_io_t *io)
3564 return (ocs_ref_read_count(&io->ref) > 0);
3568 * @brief Write a HW IO to a work queue.
3571 * A HW IO is written to a work queue.
3573 * @param wq Pointer to work queue.
3574 * @param wqe Pointer to WQ entry.
3576 * @n @b Note: Assumes the SLI-4 queue lock is held.
3578 * @return Returns 0 on success, or a negative error code value on failure.
3581 _hw_wq_write(hw_wq_t *wq, ocs_hw_wqe_t *wqe)
3586 /* Every so often, set the wqec bit to generate comsummed completions */
3587 if (wq->wqec_count) {
3590 if (wq->wqec_count == 0) {
3591 sli4_generic_wqe_t *genwqe = (void*)wqe->wqebuf;
3593 wq->wqec_count = wq->wqec_set_count;
3596 /* Decrement WQ free count */
3599 queue_rc = _sli_queue_write(&wq->hw->sli, wq->queue, wqe->wqebuf);
3605 ocs_queue_history_wq(&wq->hw->q_hist, (void *) wqe->wqebuf, wq->queue->id, queue_rc);
3612 * @brief Write a HW IO to a work queue.
3615 * A HW IO is written to a work queue.
3617 * @param wq Pointer to work queue.
3618 * @param wqe Pointer to WQE entry.
3620 * @n @b Note: Takes the SLI-4 queue lock.
3622 * @return Returns 0 on success, or a negative error code value on failure.
3625 hw_wq_write(hw_wq_t *wq, ocs_hw_wqe_t *wqe)
3629 sli_queue_lock(wq->queue);
3630 if ( ! ocs_list_empty(&wq->pending_list)) {
3631 ocs_list_add_tail(&wq->pending_list, wqe);
3632 OCS_STAT(wq->wq_pending_count++;)
3633 while ((wq->free_count > 0) && ((wqe = ocs_list_remove_head(&wq->pending_list)) != NULL)) {
3634 rc = _hw_wq_write(wq, wqe);
3638 if (wqe->abort_wqe_submit_needed) {
3639 wqe->abort_wqe_submit_needed = 0;
3640 sli_abort_wqe(&wq->hw->sli, wqe->wqebuf, wq->hw->sli.config.wqe_size, SLI_ABORT_XRI,
3641 wqe->send_abts, wqe->id, 0, wqe->abort_reqtag, SLI4_CQ_DEFAULT );
3642 ocs_list_add_tail(&wq->pending_list, wqe);
3643 OCS_STAT(wq->wq_pending_count++;)
3647 if (wq->free_count > 0) {
3648 rc = _hw_wq_write(wq, wqe);
3650 ocs_list_add_tail(&wq->pending_list, wqe);
3651 OCS_STAT(wq->wq_pending_count++;)
3655 sli_queue_unlock(wq->queue);
3662 * @brief Update free count and submit any pending HW IOs
3665 * The WQ free count is updated, and any pending HW IOs are submitted that
3666 * will fit in the queue.
3668 * @param wq Pointer to work queue.
3669 * @param update_free_count Value added to WQs free count.
3674 hw_wq_submit_pending(hw_wq_t *wq, uint32_t update_free_count)
3678 sli_queue_lock(wq->queue);
3680 /* Update free count with value passed in */
3681 wq->free_count += update_free_count;
3683 while ((wq->free_count > 0) && ((wqe = ocs_list_remove_head(&wq->pending_list)) != NULL)) {
3684 _hw_wq_write(wq, wqe);
3686 if (wqe->abort_wqe_submit_needed) {
3687 wqe->abort_wqe_submit_needed = 0;
3688 sli_abort_wqe(&wq->hw->sli, wqe->wqebuf, wq->hw->sli.config.wqe_size, SLI_ABORT_XRI,
3689 wqe->send_abts, wqe->id, 0, wqe->abort_reqtag, SLI4_CQ_DEFAULT);
3690 ocs_list_add_tail(&wq->pending_list, wqe);
3691 OCS_STAT(wq->wq_pending_count++;)
3695 sli_queue_unlock(wq->queue);
3699 * @brief Check to see if there are any BZ 161832 workaround waiting IOs
3702 * Checks hw->sec_hio_wait_list, if an IO is waiting for a HW IO, then try
3703 * to allocate a secondary HW io, and dispatch it.
3705 * @n @b Note: hw->io_lock MUST be taken when called.
3707 * @param hw pointer to HW object
3712 ocs_hw_check_sec_hio_list(ocs_hw_t *hw)
3715 ocs_hw_io_t *sec_io;
3718 while (!ocs_list_empty(&hw->sec_hio_wait_list)) {
3721 sec_io = _ocs_hw_io_alloc(hw);
3722 if (sec_io == NULL) {
3726 io = ocs_list_remove_head(&hw->sec_hio_wait_list);
3727 ocs_list_add_tail(&hw->io_inuse, io);
3728 io->state = OCS_HW_IO_STATE_INUSE;
3729 io->sec_hio = sec_io;
3731 /* mark secondary XRI for second and subsequent data phase as quarantine */
3733 sec_io->quarantine = TRUE;
3736 flags = io->sec_iparam.fcp_tgt.flags;
3738 flags |= SLI4_IO_CONTINUATION;
3740 flags &= ~SLI4_IO_CONTINUATION;
3743 io->tgt_wqe_timeout = io->sec_iparam.fcp_tgt.timeout;
3745 /* Complete (continue) TRECV IO */
3747 if (sli_fcp_cont_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl,
3749 io->sec_iparam.fcp_tgt.offset, io->sec_len, io->indicator, io->sec_hio->indicator,
3750 io->reqtag, SLI4_CQ_DEFAULT,
3751 io->sec_iparam.fcp_tgt.ox_id, io->rnode->indicator, io->rnode,
3753 io->sec_iparam.fcp_tgt.dif_oper, io->sec_iparam.fcp_tgt.blk_size, io->sec_iparam.fcp_tgt.cs_ctl, io->sec_iparam.fcp_tgt.app_id)) {
3754 ocs_log_test(hw->os, "TRECEIVE WQE error\n");
3758 if (sli_fcp_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl,
3760 io->sec_iparam.fcp_tgt.offset, io->sec_len, io->indicator,
3761 io->reqtag, SLI4_CQ_DEFAULT,
3762 io->sec_iparam.fcp_tgt.ox_id, io->rnode->indicator, io->rnode,
3764 io->sec_iparam.fcp_tgt.dif_oper, io->sec_iparam.fcp_tgt.blk_size,
3765 io->sec_iparam.fcp_tgt.cs_ctl, io->sec_iparam.fcp_tgt.app_id)) {
3766 ocs_log_test(hw->os, "TRECEIVE WQE error\n");
3771 if (io->wq == NULL) {
3772 io->wq = ocs_hw_queue_next_wq(hw, io);
3773 ocs_hw_assert(io->wq != NULL);
3778 * Add IO to active io wqe list before submitting, in case the
3779 * wcqe processing preempts this thread.
3781 ocs_hw_add_io_timed_wqe(hw, io);
3782 rc = hw_wq_write(io->wq, &io->wqe);
3784 /* non-negative return is success */
3787 /* failed to write wqe, remove from active wqe list */
3788 ocs_log_err(hw->os, "sli_queue_write failed: %d\n", rc);
3790 ocs_hw_remove_io_timed_wqe(hw, io);
3797 * @brief Send a Single Request/Response Sequence (SRRS).
3800 * This routine supports communication sequences consisting of a single
3801 * request and single response between two endpoints. Examples include:
3802 * - Sending an ELS request.
3803 * - Sending an ELS response - To send an ELS reponse, the caller must provide
3804 * the OX_ID from the received request.
3805 * - Sending a FC Common Transport (FC-CT) request - To send a FC-CT request,
3806 * the caller must provide the R_CTL, TYPE, and DF_CTL
3807 * values to place in the FC frame header.
3809 * @n @b Note: The caller is expected to provide both send and receive
3810 * buffers for requests. In the case of sending a response, no receive buffer
3811 * is necessary and the caller may pass in a NULL pointer.
3813 * @param hw Hardware context.
3814 * @param type Type of sequence (ELS request/response, FC-CT).
3815 * @param io Previously-allocated HW IO object.
3816 * @param send DMA memory holding data to send (for example, ELS request, BLS response).
3817 * @param len Length, in bytes, of data to send.
3818 * @param receive Optional DMA memory to hold a response.
3819 * @param rnode Destination of data (that is, a remote node).
3820 * @param iparam IO parameters (ELS response and FC-CT).
3821 * @param cb Function call upon completion of sending the data (may be NULL).
3822 * @param arg Argument to pass to IO completion function.
3824 * @return Returns 0 on success, or a non-zero on failure.
3827 ocs_hw_srrs_send(ocs_hw_t *hw, ocs_hw_io_type_e type, ocs_hw_io_t *io,
3828 ocs_dma_t *send, uint32_t len, ocs_dma_t *receive,
3829 ocs_remote_node_t *rnode, ocs_hw_io_param_t *iparam,
3830 ocs_hw_srrs_cb_t cb, void *arg)
3832 sli4_sge_t *sge = NULL;
3833 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
3834 uint16_t local_flags = 0;
3836 if (!hw || !io || !rnode || !iparam) {
3837 ocs_log_err(NULL, "bad parm hw=%p io=%p send=%p receive=%p rnode=%p iparam=%p\n",
3838 hw, io, send, receive, rnode, iparam);
3839 return OCS_HW_RTN_ERROR;
3842 if (hw->state != OCS_HW_STATE_ACTIVE) {
3843 ocs_log_test(hw->os, "cannot send SRRS, HW state=%d\n", hw->state);
3844 return OCS_HW_RTN_ERROR;
3847 if (ocs_hw_is_xri_port_owned(hw, io->indicator)) {
3848 /* We must set the XC bit for port owned XRIs */
3849 local_flags |= SLI4_IO_CONTINUATION;
3856 sge = io->sgl->virt;
3858 /* clear both SGE */
3859 ocs_memset(io->sgl->virt, 0, 2 * sizeof(sli4_sge_t));
3862 sge[0].buffer_address_high = ocs_addr32_hi(send->phys);
3863 sge[0].buffer_address_low = ocs_addr32_lo(send->phys);
3864 sge[0].sge_type = SLI4_SGE_TYPE_DATA;
3865 sge[0].buffer_length = len;
3868 if ((OCS_HW_ELS_REQ == type) || (OCS_HW_FC_CT == type)) {
3869 sge[1].buffer_address_high = ocs_addr32_hi(receive->phys);
3870 sge[1].buffer_address_low = ocs_addr32_lo(receive->phys);
3871 sge[1].sge_type = SLI4_SGE_TYPE_DATA;
3872 sge[1].buffer_length = receive->size;
3879 case OCS_HW_ELS_REQ:
3880 if ( (!send) || sli_els_request64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->sgl,
3881 *((uint8_t *)(send->virt)), /* req_type */
3883 iparam->els.timeout, io->indicator, io->reqtag, SLI4_CQ_DEFAULT, rnode)) {
3884 ocs_log_err(hw->os, "REQ WQE error\n");
3885 rc = OCS_HW_RTN_ERROR;
3888 case OCS_HW_ELS_RSP:
3889 if ( (!send) || sli_xmit_els_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, send, len,
3890 io->indicator, io->reqtag, SLI4_CQ_DEFAULT,
3892 rnode, local_flags, UINT32_MAX)) {
3893 ocs_log_err(hw->os, "RSP WQE error\n");
3894 rc = OCS_HW_RTN_ERROR;
3897 case OCS_HW_ELS_RSP_SID:
3898 if ( (!send) || sli_xmit_els_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, send, len,
3899 io->indicator, io->reqtag, SLI4_CQ_DEFAULT,
3900 iparam->els_sid.ox_id,
3901 rnode, local_flags, iparam->els_sid.s_id)) {
3902 ocs_log_err(hw->os, "RSP (SID) WQE error\n");
3903 rc = OCS_HW_RTN_ERROR;
3907 if ( (!send) || sli_gen_request64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->sgl, len,
3908 receive->size, iparam->fc_ct.timeout, io->indicator,
3909 io->reqtag, SLI4_CQ_DEFAULT, rnode, iparam->fc_ct.r_ctl,
3910 iparam->fc_ct.type, iparam->fc_ct.df_ctl)) {
3911 ocs_log_err(hw->os, "GEN WQE error\n");
3912 rc = OCS_HW_RTN_ERROR;
3915 case OCS_HW_FC_CT_RSP:
3916 if ( (!send) || sli_xmit_sequence64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->sgl, len,
3917 iparam->fc_ct_rsp.timeout, iparam->fc_ct_rsp.ox_id, io->indicator,
3918 io->reqtag, rnode, iparam->fc_ct_rsp.r_ctl,
3919 iparam->fc_ct_rsp.type, iparam->fc_ct_rsp.df_ctl)) {
3920 ocs_log_err(hw->os, "XMIT SEQ WQE error\n");
3921 rc = OCS_HW_RTN_ERROR;
3924 case OCS_HW_BLS_ACC:
3925 case OCS_HW_BLS_RJT:
3927 sli_bls_payload_t bls;
3929 if (OCS_HW_BLS_ACC == type) {
3930 bls.type = SLI_BLS_ACC;
3931 ocs_memcpy(&bls.u.acc, iparam->bls.payload, sizeof(bls.u.acc));
3933 bls.type = SLI_BLS_RJT;
3934 ocs_memcpy(&bls.u.rjt, iparam->bls.payload, sizeof(bls.u.rjt));
3937 bls.ox_id = iparam->bls.ox_id;
3938 bls.rx_id = iparam->bls.rx_id;
3940 if (sli_xmit_bls_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &bls,
3941 io->indicator, io->reqtag,
3943 rnode, UINT32_MAX)) {
3944 ocs_log_err(hw->os, "XMIT_BLS_RSP64 WQE error\n");
3945 rc = OCS_HW_RTN_ERROR;
3949 case OCS_HW_BLS_ACC_SID:
3951 sli_bls_payload_t bls;
3953 bls.type = SLI_BLS_ACC;
3954 ocs_memcpy(&bls.u.acc, iparam->bls_sid.payload, sizeof(bls.u.acc));
3956 bls.ox_id = iparam->bls_sid.ox_id;
3957 bls.rx_id = iparam->bls_sid.rx_id;
3959 if (sli_xmit_bls_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &bls,
3960 io->indicator, io->reqtag,
3962 rnode, iparam->bls_sid.s_id)) {
3963 ocs_log_err(hw->os, "XMIT_BLS_RSP64 WQE SID error\n");
3964 rc = OCS_HW_RTN_ERROR;
3969 if ( (!send) || sli_xmit_bcast64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, send, len,
3970 iparam->bcast.timeout, io->indicator, io->reqtag,
3971 SLI4_CQ_DEFAULT, rnode,
3972 iparam->bcast.r_ctl, iparam->bcast.type, iparam->bcast.df_ctl)) {
3973 ocs_log_err(hw->os, "XMIT_BCAST64 WQE error\n");
3974 rc = OCS_HW_RTN_ERROR;
3978 ocs_log_err(hw->os, "bad SRRS type %#x\n", type);
3979 rc = OCS_HW_RTN_ERROR;
3982 if (OCS_HW_RTN_SUCCESS == rc) {
3983 if (io->wq == NULL) {
3984 io->wq = ocs_hw_queue_next_wq(hw, io);
3985 ocs_hw_assert(io->wq != NULL);
3990 * Add IO to active io wqe list before submitting, in case the
3991 * wcqe processing preempts this thread.
3993 OCS_STAT(io->wq->use_count++);
3994 ocs_hw_add_io_timed_wqe(hw, io);
3995 rc = hw_wq_write(io->wq, &io->wqe);
3997 /* non-negative return is success */
4000 /* failed to write wqe, remove from active wqe list */
4001 ocs_log_err(hw->os, "sli_queue_write failed: %d\n", rc);
4003 ocs_hw_remove_io_timed_wqe(hw, io);
4012 * @brief Send a read, write, or response IO.
4015 * This routine supports sending a higher-level IO (for example, FCP) between two endpoints
4016 * as a target or initiator. Examples include:
4017 * - Sending read data and good response (target).
4018 * - Sending a response (target with no data or after receiving write data).
4020 * This routine assumes all IOs use the SGL associated with the HW IO. Prior to
4021 * calling this routine, the data should be loaded using ocs_hw_io_add_sge().
4023 * @param hw Hardware context.
4024 * @param type Type of IO (target read, target response, and so on).
4025 * @param io Previously-allocated HW IO object.
4026 * @param len Length, in bytes, of data to send.
4027 * @param iparam IO parameters.
4028 * @param rnode Destination of data (that is, a remote node).
4029 * @param cb Function call upon completion of sending data (may be NULL).
4030 * @param arg Argument to pass to IO completion function.
4032 * @return Returns 0 on success, or a non-zero value on failure.
4035 * - Support specifiying relative offset.
4036 * - Use a WQ other than 0.
4039 ocs_hw_io_send(ocs_hw_t *hw, ocs_hw_io_type_e type, ocs_hw_io_t *io,
4040 uint32_t len, ocs_hw_io_param_t *iparam, ocs_remote_node_t *rnode,
4041 void *cb, void *arg)
4043 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
4045 uint8_t send_wqe = TRUE;
4049 if (!hw || !io || !rnode || !iparam) {
4050 ocs_log_err(NULL, "bad parm hw=%p io=%p iparam=%p rnode=%p\n",
4051 hw, io, iparam, rnode);
4052 return OCS_HW_RTN_ERROR;
4055 if (hw->state != OCS_HW_STATE_ACTIVE) {
4056 ocs_log_err(hw->os, "cannot send IO, HW state=%d\n", hw->state);
4057 return OCS_HW_RTN_ERROR;
4060 rpi = rnode->indicator;
4062 if (hw->workaround.use_unregistered_rpi && (rpi == UINT32_MAX)) {
4063 rpi = hw->workaround.unregistered_rid;
4064 ocs_log_test(hw->os, "using unregistered RPI: %d\n", rpi);
4068 * Save state needed during later stages
4076 * Format the work queue entry used to send the IO
4079 case OCS_HW_IO_INITIATOR_READ:
4081 * If use_dif_quarantine workaround is in effect, and dif_separates then mark the
4082 * initiator read IO for quarantine
4084 if (hw->workaround.use_dif_quarantine && (hw->config.dif_mode == OCS_HW_DIF_MODE_SEPARATE) &&
4085 (iparam->fcp_tgt.dif_oper != OCS_HW_DIF_OPER_DISABLED)) {
4086 io->quarantine = TRUE;
4089 ocs_hw_io_ini_sge(hw, io, iparam->fcp_ini.cmnd, iparam->fcp_ini.cmnd_size,
4090 iparam->fcp_ini.rsp);
4092 if (sli_fcp_iread64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge, len,
4093 io->indicator, io->reqtag, SLI4_CQ_DEFAULT, rpi, rnode,
4094 iparam->fcp_ini.dif_oper, iparam->fcp_ini.blk_size,
4095 iparam->fcp_ini.timeout)) {
4096 ocs_log_err(hw->os, "IREAD WQE error\n");
4097 rc = OCS_HW_RTN_ERROR;
4100 case OCS_HW_IO_INITIATOR_WRITE:
4101 ocs_hw_io_ini_sge(hw, io, iparam->fcp_ini.cmnd, iparam->fcp_ini.cmnd_size,
4102 iparam->fcp_ini.rsp);
4104 if (sli_fcp_iwrite64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge,
4105 len, iparam->fcp_ini.first_burst,
4106 io->indicator, io->reqtag,
4107 SLI4_CQ_DEFAULT, rpi, rnode,
4108 iparam->fcp_ini.dif_oper, iparam->fcp_ini.blk_size,
4109 iparam->fcp_ini.timeout)) {
4110 ocs_log_err(hw->os, "IWRITE WQE error\n");
4111 rc = OCS_HW_RTN_ERROR;
4114 case OCS_HW_IO_INITIATOR_NODATA:
4115 ocs_hw_io_ini_sge(hw, io, iparam->fcp_ini.cmnd, iparam->fcp_ini.cmnd_size,
4116 iparam->fcp_ini.rsp);
4118 if (sli_fcp_icmnd64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl,
4119 io->indicator, io->reqtag, SLI4_CQ_DEFAULT,
4120 rpi, rnode, iparam->fcp_ini.timeout)) {
4121 ocs_log_err(hw->os, "ICMND WQE error\n");
4122 rc = OCS_HW_RTN_ERROR;
4125 case OCS_HW_IO_TARGET_WRITE: {
4126 uint16_t flags = iparam->fcp_tgt.flags;
4127 fcp_xfer_rdy_iu_t *xfer = io->xfer_rdy.virt;
4130 * Fill in the XFER_RDY for IF_TYPE 0 devices
4132 *((uint32_t *)xfer->fcp_data_ro) = ocs_htobe32(iparam->fcp_tgt.offset);
4133 *((uint32_t *)xfer->fcp_burst_len) = ocs_htobe32(len);
4134 *((uint32_t *)xfer->rsvd) = 0;
4137 flags |= SLI4_IO_CONTINUATION;
4139 flags &= ~SLI4_IO_CONTINUATION;
4142 io->tgt_wqe_timeout = iparam->fcp_tgt.timeout;
4145 * If use_dif_quarantine workaround is in effect, and this is a DIF enabled IO
4146 * then mark the target write IO for quarantine
4148 if (hw->workaround.use_dif_quarantine && (hw->config.dif_mode == OCS_HW_DIF_MODE_SEPARATE) &&
4149 (iparam->fcp_tgt.dif_oper != OCS_HW_DIF_OPER_DISABLED)) {
4150 io->quarantine = TRUE;
4154 * BZ 161832 Workaround:
4155 * Check for use_dif_sec_xri workaround. Note, even though the first dataphase
4156 * doesn't really need a secondary XRI, we allocate one anyway, as this avoids the
4157 * potential for deadlock where all XRI's are allocated as primaries to IOs that
4158 * are on hw->sec_hio_wait_list. If this secondary XRI is not for the first
4159 * data phase, it is marked for quarantine.
4161 if (hw->workaround.use_dif_sec_xri && (iparam->fcp_tgt.dif_oper != OCS_HW_DIF_OPER_DISABLED)) {
4163 * If we have allocated a chained SGL for skyhawk, then
4164 * we can re-use this for the sec_hio.
4166 if (io->ovfl_io != NULL) {
4167 io->sec_hio = io->ovfl_io;
4168 io->sec_hio->quarantine = TRUE;
4170 io->sec_hio = ocs_hw_io_alloc(hw);
4172 if (io->sec_hio == NULL) {
4173 /* Failed to allocate, so save full request context and put
4174 * this IO on the wait list
4176 io->sec_iparam = *iparam;
4178 ocs_lock(&hw->io_lock);
4179 ocs_list_remove(&hw->io_inuse, io);
4180 ocs_list_add_tail(&hw->sec_hio_wait_list, io);
4181 io->state = OCS_HW_IO_STATE_WAIT_SEC_HIO;
4182 hw->sec_hio_wait_count++;
4183 ocs_unlock(&hw->io_lock);
4188 /* We quarantine the secondary IO if this is the second or subsequent data phase */
4190 io->sec_hio->quarantine = TRUE;
4195 * If not the first data phase, and io->sec_hio has been allocated, then issue
4196 * FCP_CONT_TRECEIVE64 WQE, otherwise use the usual FCP_TRECEIVE64 WQE
4198 if (io->xbusy && (io->sec_hio != NULL)) {
4199 if (sli_fcp_cont_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge,
4200 iparam->fcp_tgt.offset, len, io->indicator, io->sec_hio->indicator,
4201 io->reqtag, SLI4_CQ_DEFAULT,
4202 iparam->fcp_tgt.ox_id, rpi, rnode,
4204 iparam->fcp_tgt.dif_oper, iparam->fcp_tgt.blk_size,
4205 iparam->fcp_tgt.cs_ctl, iparam->fcp_tgt.app_id)) {
4206 ocs_log_err(hw->os, "TRECEIVE WQE error\n");
4207 rc = OCS_HW_RTN_ERROR;
4210 if (sli_fcp_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge,
4211 iparam->fcp_tgt.offset, len, io->indicator, io->reqtag,
4213 iparam->fcp_tgt.ox_id, rpi, rnode,
4215 iparam->fcp_tgt.dif_oper, iparam->fcp_tgt.blk_size,
4216 iparam->fcp_tgt.cs_ctl, iparam->fcp_tgt.app_id)) {
4217 ocs_log_err(hw->os, "TRECEIVE WQE error\n");
4218 rc = OCS_HW_RTN_ERROR;
4223 case OCS_HW_IO_TARGET_READ: {
4224 uint16_t flags = iparam->fcp_tgt.flags;
4227 flags |= SLI4_IO_CONTINUATION;
4229 flags &= ~SLI4_IO_CONTINUATION;
4232 io->tgt_wqe_timeout = iparam->fcp_tgt.timeout;
4233 if (sli_fcp_tsend64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge,
4234 iparam->fcp_tgt.offset, len, io->indicator, io->reqtag,
4236 iparam->fcp_tgt.ox_id, rpi, rnode,
4238 iparam->fcp_tgt.dif_oper,
4239 iparam->fcp_tgt.blk_size,
4240 iparam->fcp_tgt.cs_ctl,
4241 iparam->fcp_tgt.app_id)) {
4242 ocs_log_err(hw->os, "TSEND WQE error\n");
4243 rc = OCS_HW_RTN_ERROR;
4244 } else if (hw->workaround.retain_tsend_io_length) {
4249 case OCS_HW_IO_TARGET_RSP: {
4250 uint16_t flags = iparam->fcp_tgt.flags;
4253 flags |= SLI4_IO_CONTINUATION;
4255 flags &= ~SLI4_IO_CONTINUATION;
4258 /* post a new auto xfer ready buffer */
4259 if (hw->auto_xfer_rdy_enabled && io->is_port_owned) {
4260 if ((io->auto_xfer_rdy_dnrx = ocs_hw_rqpair_auto_xfer_rdy_buffer_post(hw, io, 1))) {
4261 flags |= SLI4_IO_DNRX;
4265 io->tgt_wqe_timeout = iparam->fcp_tgt.timeout;
4266 if (sli_fcp_trsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size,
4269 io->indicator, io->reqtag,
4271 iparam->fcp_tgt.ox_id,
4273 flags, iparam->fcp_tgt.cs_ctl,
4275 iparam->fcp_tgt.app_id)) {
4276 ocs_log_err(hw->os, "TRSP WQE error\n");
4277 rc = OCS_HW_RTN_ERROR;
4283 ocs_log_err(hw->os, "unsupported IO type %#x\n", type);
4284 rc = OCS_HW_RTN_ERROR;
4287 if (send_wqe && (OCS_HW_RTN_SUCCESS == rc)) {
4288 if (io->wq == NULL) {
4289 io->wq = ocs_hw_queue_next_wq(hw, io);
4290 ocs_hw_assert(io->wq != NULL);
4296 * Add IO to active io wqe list before submitting, in case the
4297 * wcqe processing preempts this thread.
4299 OCS_STAT(hw->tcmd_wq_submit[io->wq->instance]++);
4300 OCS_STAT(io->wq->use_count++);
4301 ocs_hw_add_io_timed_wqe(hw, io);
4302 rc = hw_wq_write(io->wq, &io->wqe);
4304 /* non-negative return is success */
4307 /* failed to write wqe, remove from active wqe list */
4308 ocs_log_err(hw->os, "sli_queue_write failed: %d\n", rc);
4310 ocs_hw_remove_io_timed_wqe(hw, io);
4318 * @brief Send a raw frame
4321 * Using the SEND_FRAME_WQE, a frame consisting of header and payload is sent.
4323 * @param hw Pointer to HW object.
4324 * @param hdr Pointer to a little endian formatted FC header.
4325 * @param sof Value to use as the frame SOF.
4326 * @param eof Value to use as the frame EOF.
4327 * @param payload Pointer to payload DMA buffer.
4328 * @param ctx Pointer to caller provided send frame context.
4329 * @param callback Callback function.
4330 * @param arg Callback function argument.
4332 * @return Returns 0 on success, or a negative error code value on failure.
4335 ocs_hw_send_frame(ocs_hw_t *hw, fc_header_le_t *hdr, uint8_t sof, uint8_t eof, ocs_dma_t *payload,
4336 ocs_hw_send_frame_context_t *ctx, void (*callback)(void *arg, uint8_t *cqe, int32_t status), void *arg)
4345 /* populate the callback object */
4348 /* Fetch and populate request tag */
4349 ctx->wqcb = ocs_hw_reqtag_alloc(hw, callback, arg);
4350 if (ctx->wqcb == NULL) {
4351 ocs_log_err(hw->os, "can't allocate request tag\n");
4352 return OCS_HW_RTN_NO_RESOURCES;
4355 /* Choose a work queue, first look for a class[1] wq, otherwise just use wq[0] */
4356 wq = ocs_varray_iter_next(hw->wq_class_array[1]);
4361 /* Set XRI and RX_ID in the header based on which WQ, and which send_frame_io we are using */
4362 xri = wq->send_frame_io->indicator;
4364 /* Build the send frame WQE */
4365 rc = sli_send_frame_wqe(&hw->sli, wqe->wqebuf, hw->sli.config.wqe_size, sof, eof, (uint32_t*) hdr, payload,
4366 payload->len, OCS_HW_SEND_FRAME_TIMEOUT, xri, ctx->wqcb->instance_index);
4368 ocs_log_err(hw->os, "sli_send_frame_wqe failed: %d\n", rc);
4369 return OCS_HW_RTN_ERROR;
4373 rc = hw_wq_write(wq, wqe);
4375 ocs_log_err(hw->os, "hw_wq_write failed: %d\n", rc);
4376 return OCS_HW_RTN_ERROR;
4379 OCS_STAT(wq->use_count++);
4381 return OCS_HW_RTN_SUCCESS;
4385 ocs_hw_io_register_sgl(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_dma_t *sgl, uint32_t sgl_count)
4387 if (sli_get_sgl_preregister(&hw->sli)) {
4388 ocs_log_err(hw->os, "can't use temporary SGL with pre-registered SGLs\n");
4389 return OCS_HW_RTN_ERROR;
4392 io->ovfl_sgl_count = sgl_count;
4395 return OCS_HW_RTN_SUCCESS;
4399 ocs_hw_io_restore_sgl(ocs_hw_t *hw, ocs_hw_io_t *io)
4401 /* Restore the default */
4402 io->sgl = &io->def_sgl;
4403 io->sgl_count = io->def_sgl_count;
4406 * For skyhawk, we need to free the IO allocated for the chained
4407 * SGL. For all devices, clear the overflow fields on the IO.
4409 * Note: For DIF IOs, we may be using the same XRI for the sec_hio and
4410 * the chained SGLs. If so, then we clear the ovfl_io field
4411 * when the sec_hio is freed.
4413 if (io->ovfl_io != NULL) {
4414 ocs_hw_io_free(hw, io->ovfl_io);
4418 /* Clear the overflow SGL */
4419 io->ovfl_sgl = NULL;
4420 io->ovfl_sgl_count = 0;
4421 io->ovfl_lsp = NULL;
4426 * @brief Initialize the scatter gather list entries of an IO.
4428 * @param hw Hardware context.
4429 * @param io Previously-allocated HW IO object.
4430 * @param type Type of IO (target read, target response, and so on).
4432 * @return Returns 0 on success, or a non-zero value on failure.
4435 ocs_hw_io_init_sges(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_hw_io_type_e type)
4437 sli4_sge_t *data = NULL;
4442 ocs_log_err(hw ? hw->os : NULL, "bad parameter hw=%p io=%p\n",
4444 return OCS_HW_RTN_ERROR;
4447 /* Clear / reset the scatter-gather list */
4448 io->sgl = &io->def_sgl;
4449 io->sgl_count = io->def_sgl_count;
4450 io->first_data_sge = 0;
4452 ocs_memset(io->sgl->virt, 0, 2 * sizeof(sli4_sge_t));
4458 data = io->sgl->virt;
4461 * Some IO types have underlying hardware requirements on the order
4462 * of SGEs. Process all special entries here.
4465 case OCS_HW_IO_INITIATOR_READ:
4466 case OCS_HW_IO_INITIATOR_WRITE:
4467 case OCS_HW_IO_INITIATOR_NODATA:
4469 * No skips, 2 special for initiator I/Os
4470 * The addresses and length are written later
4472 /* setup command pointer */
4473 data->sge_type = SLI4_SGE_TYPE_DATA;
4476 /* setup response pointer */
4477 data->sge_type = SLI4_SGE_TYPE_DATA;
4479 if (OCS_HW_IO_INITIATOR_NODATA == type) {
4486 case OCS_HW_IO_TARGET_WRITE:
4487 #define OCS_TARGET_WRITE_SKIPS 2
4488 skips = OCS_TARGET_WRITE_SKIPS;
4490 /* populate host resident XFER_RDY buffer */
4491 data->sge_type = SLI4_SGE_TYPE_DATA;
4492 data->buffer_address_high = ocs_addr32_hi(io->xfer_rdy.phys);
4493 data->buffer_address_low = ocs_addr32_lo(io->xfer_rdy.phys);
4494 data->buffer_length = io->xfer_rdy.size;
4501 case OCS_HW_IO_TARGET_READ:
4503 * For FCP_TSEND64, the first 2 entries are SKIP SGE's
4505 #define OCS_TARGET_READ_SKIPS 2
4506 skips = OCS_TARGET_READ_SKIPS;
4508 case OCS_HW_IO_TARGET_RSP:
4510 * No skips, etc. for FCP_TRSP64
4514 ocs_log_err(hw->os, "unsupported IO type %#x\n", type);
4515 return OCS_HW_RTN_ERROR;
4519 * Write skip entries
4521 for (i = 0; i < skips; i++) {
4522 data->sge_type = SLI4_SGE_TYPE_SKIP;
4533 return OCS_HW_RTN_SUCCESS;
4538 * @brief Add a T10 PI seed scatter gather list entry.
4540 * @param hw Hardware context.
4541 * @param io Previously-allocated HW IO object.
4542 * @param dif_info Pointer to T10 DIF fields, or NULL if no DIF.
4544 * @return Returns 0 on success, or a non-zero value on failure.
4547 ocs_hw_io_add_seed_sge(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_hw_dif_info_t *dif_info)
4549 sli4_sge_t *data = NULL;
4550 sli4_diseed_sge_t *dif_seed;
4552 /* If no dif_info, or dif_oper is disabled, then just return success */
4553 if ((dif_info == NULL) || (dif_info->dif_oper == OCS_HW_DIF_OPER_DISABLED)) {
4554 return OCS_HW_RTN_SUCCESS;
4558 ocs_log_err(hw ? hw->os : NULL, "bad parameter hw=%p io=%p dif_info=%p\n",
4560 return OCS_HW_RTN_ERROR;
4563 data = io->sgl->virt;
4566 /* If we are doing T10 DIF add the DIF Seed SGE */
4567 ocs_memset(data, 0, sizeof(sli4_diseed_sge_t));
4568 dif_seed = (sli4_diseed_sge_t *)data;
4569 dif_seed->ref_tag_cmp = dif_info->ref_tag_cmp;
4570 dif_seed->ref_tag_repl = dif_info->ref_tag_repl;
4571 dif_seed->app_tag_repl = dif_info->app_tag_repl;
4572 dif_seed->repl_app_tag = dif_info->repl_app_tag;
4573 if (SLI4_IF_TYPE_LANCER_FC_ETH != hw->sli.if_type) {
4574 dif_seed->atrt = dif_info->disable_app_ref_ffff;
4575 dif_seed->at = dif_info->disable_app_ffff;
4577 dif_seed->sge_type = SLI4_SGE_TYPE_DISEED;
4578 /* Workaround for SKH (BZ157233) */
4579 if (((io->type == OCS_HW_IO_TARGET_WRITE) || (io->type == OCS_HW_IO_INITIATOR_READ)) &&
4580 (SLI4_IF_TYPE_LANCER_FC_ETH != hw->sli.if_type) && dif_info->dif_separate) {
4581 dif_seed->sge_type = SLI4_SGE_TYPE_SKIP;
4584 dif_seed->app_tag_cmp = dif_info->app_tag_cmp;
4585 dif_seed->dif_blk_size = dif_info->blk_size;
4586 dif_seed->auto_incr_ref_tag = dif_info->auto_incr_ref_tag;
4587 dif_seed->check_app_tag = dif_info->check_app_tag;
4588 dif_seed->check_ref_tag = dif_info->check_ref_tag;
4589 dif_seed->check_crc = dif_info->check_guard;
4590 dif_seed->new_ref_tag = dif_info->repl_ref_tag;
4592 switch(dif_info->dif_oper) {
4593 case OCS_HW_SGE_DIF_OP_IN_NODIF_OUT_CRC:
4594 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CRC;
4595 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CRC;
4597 case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_NODIF:
4598 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CRC_OUT_NODIF;
4599 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CRC_OUT_NODIF;
4601 case OCS_HW_SGE_DIF_OP_IN_NODIF_OUT_CHKSUM:
4602 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CHKSUM;
4603 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CHKSUM;
4605 case OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_NODIF:
4606 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_NODIF;
4607 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_NODIF;
4609 case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CRC:
4610 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CRC;
4611 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CRC;
4613 case OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_CHKSUM:
4614 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CHKSUM;
4615 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CHKSUM;
4617 case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CHKSUM:
4618 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CHKSUM;
4619 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CHKSUM;
4621 case OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_CRC:
4622 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CRC;
4623 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CRC;
4625 case OCS_HW_SGE_DIF_OP_IN_RAW_OUT_RAW:
4626 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_RAW_OUT_RAW;
4627 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_RAW_OUT_RAW;
4630 ocs_log_err(hw->os, "unsupported DIF operation %#x\n",
4631 dif_info->dif_oper);
4632 return OCS_HW_RTN_ERROR;
4636 * Set last, clear previous last
4640 data[-1].last = FALSE;
4645 return OCS_HW_RTN_SUCCESS;
4649 ocs_hw_io_overflow_sgl(ocs_hw_t *hw, ocs_hw_io_t *io)
4651 sli4_lsp_sge_t *lsp;
4653 /* fail if we're already pointing to the overflow SGL */
4654 if (io->sgl == io->ovfl_sgl) {
4655 return OCS_HW_RTN_ERROR;
4659 * For skyhawk, we can use another SGL to extend the SGL list. The
4660 * Chained entry must not be in the first 4 entries.
4662 * Note: For DIF enabled IOs, we will use the ovfl_io for the sec_hio.
4664 if (sli_get_sgl_preregister(&hw->sli) &&
4665 io->def_sgl_count > 4 &&
4666 io->ovfl_io == NULL &&
4667 ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) ||
4668 (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli)))) {
4669 io->ovfl_io = ocs_hw_io_alloc(hw);
4670 if (io->ovfl_io != NULL) {
4672 * Note: We can't call ocs_hw_io_register_sgl() here
4673 * because it checks that SGLs are not pre-registered
4674 * and for shyhawk, preregistered SGLs are required.
4676 io->ovfl_sgl = &io->ovfl_io->def_sgl;
4677 io->ovfl_sgl_count = io->ovfl_io->def_sgl_count;
4681 /* fail if we don't have an overflow SGL registered */
4682 if (io->ovfl_io == NULL || io->ovfl_sgl == NULL) {
4683 return OCS_HW_RTN_ERROR;
4687 * Overflow, we need to put a link SGE in the last location of the current SGL, after
4688 * copying the the last SGE to the overflow SGL
4691 ((sli4_sge_t*)io->ovfl_sgl->virt)[0] = ((sli4_sge_t*)io->sgl->virt)[io->n_sge - 1];
4693 lsp = &((sli4_lsp_sge_t*)io->sgl->virt)[io->n_sge - 1];
4694 ocs_memset(lsp, 0, sizeof(*lsp));
4696 if ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) ||
4697 (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli))) {
4698 sli_skh_chain_sge_build(&hw->sli,
4700 io->ovfl_io->indicator,
4704 lsp->buffer_address_high = ocs_addr32_hi(io->ovfl_sgl->phys);
4705 lsp->buffer_address_low = ocs_addr32_lo(io->ovfl_sgl->phys);
4706 lsp->sge_type = SLI4_SGE_TYPE_LSP;
4709 io->ovfl_lsp->segment_length = sizeof(sli4_sge_t);
4712 /* Update the current SGL pointer, and n_sgl */
4713 io->sgl = io->ovfl_sgl;
4714 io->sgl_count = io->ovfl_sgl_count;
4717 return OCS_HW_RTN_SUCCESS;
4722 * @brief Add a scatter gather list entry to an IO.
4724 * @param hw Hardware context.
4725 * @param io Previously-allocated HW IO object.
4726 * @param addr Physical address.
4727 * @param length Length of memory pointed to by @c addr.
4729 * @return Returns 0 on success, or a non-zero value on failure.
4732 ocs_hw_io_add_sge(ocs_hw_t *hw, ocs_hw_io_t *io, uintptr_t addr, uint32_t length)
4734 sli4_sge_t *data = NULL;
4736 if (!hw || !io || !addr || !length) {
4737 ocs_log_err(hw ? hw->os : NULL,
4738 "bad parameter hw=%p io=%p addr=%lx length=%u\n",
4739 hw, io, addr, length);
4740 return OCS_HW_RTN_ERROR;
4743 if ((length != 0) && (io->n_sge + 1) > io->sgl_count) {
4744 if (ocs_hw_io_overflow_sgl(hw, io) != OCS_HW_RTN_SUCCESS) {
4745 ocs_log_err(hw->os, "SGL full (%d)\n", io->n_sge);
4746 return OCS_HW_RTN_ERROR;
4750 if (length > sli_get_max_sge(&hw->sli)) {
4751 ocs_log_err(hw->os, "length of SGE %d bigger than allowed %d\n",
4752 length, sli_get_max_sge(&hw->sli));
4753 return OCS_HW_RTN_ERROR;
4756 data = io->sgl->virt;
4759 data->sge_type = SLI4_SGE_TYPE_DATA;
4760 data->buffer_address_high = ocs_addr32_hi(addr);
4761 data->buffer_address_low = ocs_addr32_lo(addr);
4762 data->buffer_length = length;
4763 data->data_offset = io->sge_offset;
4765 * Always assume this is the last entry and mark as such.
4766 * If this is not the first entry unset the "last SGE"
4767 * indication for the previous entry
4771 data[-1].last = FALSE;
4774 /* Set first_data_bde if not previously set */
4775 if (io->first_data_sge == 0) {
4776 io->first_data_sge = io->n_sge;
4779 io->sge_offset += length;
4782 /* Update the linked segment length (only executed after overflow has begun) */
4783 if (io->ovfl_lsp != NULL) {
4784 io->ovfl_lsp->segment_length = io->n_sge * sizeof(sli4_sge_t);
4787 return OCS_HW_RTN_SUCCESS;
4792 * @brief Add a T10 DIF scatter gather list entry to an IO.
4794 * @param hw Hardware context.
4795 * @param io Previously-allocated HW IO object.
4796 * @param addr DIF physical address.
4798 * @return Returns 0 on success, or a non-zero value on failure.
4801 ocs_hw_io_add_dif_sge(ocs_hw_t *hw, ocs_hw_io_t *io, uintptr_t addr)
4803 sli4_dif_sge_t *data = NULL;
4805 if (!hw || !io || !addr) {
4806 ocs_log_err(hw ? hw->os : NULL,
4807 "bad parameter hw=%p io=%p addr=%lx\n",
4809 return OCS_HW_RTN_ERROR;
4812 if ((io->n_sge + 1) > hw->config.n_sgl) {
4813 if (ocs_hw_io_overflow_sgl(hw, io) != OCS_HW_RTN_ERROR) {
4814 ocs_log_err(hw->os, "SGL full (%d)\n", io->n_sge);
4815 return OCS_HW_RTN_ERROR;
4819 data = io->sgl->virt;
4822 data->sge_type = SLI4_SGE_TYPE_DIF;
4823 /* Workaround for SKH (BZ157233) */
4824 if (((io->type == OCS_HW_IO_TARGET_WRITE) || (io->type == OCS_HW_IO_INITIATOR_READ)) &&
4825 (SLI4_IF_TYPE_LANCER_FC_ETH != hw->sli.if_type)) {
4826 data->sge_type = SLI4_SGE_TYPE_SKIP;
4829 data->buffer_address_high = ocs_addr32_hi(addr);
4830 data->buffer_address_low = ocs_addr32_lo(addr);
4833 * Always assume this is the last entry and mark as such.
4834 * If this is not the first entry unset the "last SGE"
4835 * indication for the previous entry
4839 data[-1].last = FALSE;
4844 return OCS_HW_RTN_SUCCESS;
4849 * @brief Abort a previously-started IO.
4851 * @param hw Hardware context.
4852 * @param io_to_abort The IO to abort.
4853 * @param send_abts Boolean to have the hardware automatically
4855 * @param cb Function call upon completion of the abort (may be NULL).
4856 * @param arg Argument to pass to abort completion function.
4858 * @return Returns 0 on success, or a non-zero value on failure.
4861 ocs_hw_io_abort(ocs_hw_t *hw, ocs_hw_io_t *io_to_abort, uint32_t send_abts, void *cb, void *arg)
4863 sli4_abort_type_e atype = SLI_ABORT_MAX;
4864 uint32_t id = 0, mask = 0;
4865 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
4866 hw_wq_callback_t *wqcb;
4868 if (!hw || !io_to_abort) {
4869 ocs_log_err(hw ? hw->os : NULL,
4870 "bad parameter hw=%p io=%p\n",
4872 return OCS_HW_RTN_ERROR;
4875 if (hw->state != OCS_HW_STATE_ACTIVE) {
4876 ocs_log_err(hw->os, "cannot send IO abort, HW state=%d\n",
4878 return OCS_HW_RTN_ERROR;
4881 /* take a reference on IO being aborted */
4882 if (ocs_ref_get_unless_zero(&io_to_abort->ref) == 0) {
4883 /* command no longer active */
4884 ocs_log_test(hw ? hw->os : NULL,
4885 "io not active xri=0x%x tag=0x%x\n",
4886 io_to_abort->indicator, io_to_abort->reqtag);
4887 return OCS_HW_RTN_IO_NOT_ACTIVE;
4890 /* non-port owned XRI checks */
4891 /* Must have a valid WQ reference */
4892 if (io_to_abort->wq == NULL) {
4893 ocs_log_test(hw->os, "io_to_abort xri=0x%x not active on WQ\n",
4894 io_to_abort->indicator);
4895 ocs_ref_put(&io_to_abort->ref); /* ocs_ref_get(): same function */
4896 return OCS_HW_RTN_IO_NOT_ACTIVE;
4899 /* Validation checks complete; now check to see if already being aborted */
4900 ocs_lock(&hw->io_abort_lock);
4901 if (io_to_abort->abort_in_progress) {
4902 ocs_unlock(&hw->io_abort_lock);
4903 ocs_ref_put(&io_to_abort->ref); /* ocs_ref_get(): same function */
4904 ocs_log_debug(hw ? hw->os : NULL,
4905 "io already being aborted xri=0x%x tag=0x%x\n",
4906 io_to_abort->indicator, io_to_abort->reqtag);
4907 return OCS_HW_RTN_IO_ABORT_IN_PROGRESS;
4911 * This IO is not already being aborted. Set flag so we won't try to
4912 * abort it again. After all, we only have one abort_done callback.
4914 io_to_abort->abort_in_progress = 1;
4915 ocs_unlock(&hw->io_abort_lock);
4918 * If we got here, the possibilities are:
4920 * - io_to_abort->wq_index != UINT32_MAX
4921 * - submit ABORT_WQE to same WQ
4923 * - rxri: io_to_abort->wq_index == UINT32_MAX
4924 * - submit ABORT_WQE to any WQ
4926 * - io_to_abort->index != UINT32_MAX
4927 * - submit ABORT_WQE to same WQ
4928 * - io_to_abort->index == UINT32_MAX
4929 * - submit ABORT_WQE to any WQ
4931 io_to_abort->abort_done = cb;
4932 io_to_abort->abort_arg = arg;
4934 atype = SLI_ABORT_XRI;
4935 id = io_to_abort->indicator;
4937 /* Allocate a request tag for the abort portion of this IO */
4938 wqcb = ocs_hw_reqtag_alloc(hw, ocs_hw_wq_process_abort, io_to_abort);
4940 ocs_log_err(hw->os, "can't allocate request tag\n");
4941 return OCS_HW_RTN_NO_RESOURCES;
4943 io_to_abort->abort_reqtag = wqcb->instance_index;
4946 * If the wqe is on the pending list, then set this wqe to be
4947 * aborted when the IO's wqe is removed from the list.
4949 if (io_to_abort->wq != NULL) {
4950 sli_queue_lock(io_to_abort->wq->queue);
4951 if (ocs_list_on_list(&io_to_abort->wqe.link)) {
4952 io_to_abort->wqe.abort_wqe_submit_needed = 1;
4953 io_to_abort->wqe.send_abts = send_abts;
4954 io_to_abort->wqe.id = id;
4955 io_to_abort->wqe.abort_reqtag = io_to_abort->abort_reqtag;
4956 sli_queue_unlock(io_to_abort->wq->queue);
4959 sli_queue_unlock(io_to_abort->wq->queue);
4962 if (sli_abort_wqe(&hw->sli, io_to_abort->wqe.wqebuf, hw->sli.config.wqe_size, atype, send_abts, id, mask,
4963 io_to_abort->abort_reqtag, SLI4_CQ_DEFAULT)) {
4964 ocs_log_err(hw->os, "ABORT WQE error\n");
4965 io_to_abort->abort_reqtag = UINT32_MAX;
4966 ocs_hw_reqtag_free(hw, wqcb);
4967 rc = OCS_HW_RTN_ERROR;
4970 if (OCS_HW_RTN_SUCCESS == rc) {
4971 if (io_to_abort->wq == NULL) {
4972 io_to_abort->wq = ocs_hw_queue_next_wq(hw, io_to_abort);
4973 ocs_hw_assert(io_to_abort->wq != NULL);
4975 /* ABORT_WQE does not actually utilize an XRI on the Port,
4976 * therefore, keep xbusy as-is to track the exchange's state,
4977 * not the ABORT_WQE's state
4979 rc = hw_wq_write(io_to_abort->wq, &io_to_abort->wqe);
4981 /* non-negative return is success */
4983 /* can't abort an abort so skip adding to timed wqe list */
4987 if (OCS_HW_RTN_SUCCESS != rc) {
4988 ocs_lock(&hw->io_abort_lock);
4989 io_to_abort->abort_in_progress = 0;
4990 ocs_unlock(&hw->io_abort_lock);
4991 ocs_ref_put(&io_to_abort->ref); /* ocs_ref_get(): same function */
4998 * @brief Return the OX_ID/RX_ID of the IO.
5000 * @param hw Hardware context.
5001 * @param io HW IO object.
5003 * @return Returns X_ID on success, or -1 on failure.
5006 ocs_hw_io_get_xid(ocs_hw_t *hw, ocs_hw_io_t *io)
5009 ocs_log_err(hw ? hw->os : NULL,
5010 "bad parameter hw=%p io=%p\n", hw, io);
5014 return io->indicator;
5017 typedef struct ocs_hw_fw_write_cb_arg {
5020 } ocs_hw_fw_write_cb_arg_t;
5022 typedef struct ocs_hw_sfp_cb_arg {
5026 } ocs_hw_sfp_cb_arg_t;
5028 typedef struct ocs_hw_temp_cb_arg {
5029 ocs_hw_temp_cb_t cb;
5031 } ocs_hw_temp_cb_arg_t;
5033 typedef struct ocs_hw_link_stat_cb_arg {
5034 ocs_hw_link_stat_cb_t cb;
5036 } ocs_hw_link_stat_cb_arg_t;
5038 typedef struct ocs_hw_host_stat_cb_arg {
5039 ocs_hw_host_stat_cb_t cb;
5041 } ocs_hw_host_stat_cb_arg_t;
5043 typedef struct ocs_hw_dump_get_cb_arg {
5044 ocs_hw_dump_get_cb_t cb;
5047 } ocs_hw_dump_get_cb_arg_t;
5049 typedef struct ocs_hw_dump_clear_cb_arg {
5050 ocs_hw_dump_clear_cb_t cb;
5053 } ocs_hw_dump_clear_cb_arg_t;
5056 * @brief Write a portion of a firmware image to the device.
5059 * Calls the correct firmware write function based on the device type.
5061 * @param hw Hardware context.
5062 * @param dma DMA structure containing the firmware image chunk.
5063 * @param size Size of the firmware image chunk.
5064 * @param offset Offset, in bytes, from the beginning of the firmware image.
5065 * @param last True if this is the last chunk of the image.
5066 * Causes the image to be committed to flash.
5067 * @param cb Pointer to a callback function that is called when the command completes.
5068 * The callback function prototype is
5069 * <tt>void cb(int32_t status, uint32_t bytes_written, void *arg)</tt>.
5070 * @param arg Pointer to be passed to the callback function.
5072 * @return Returns 0 on success, or a non-zero value on failure.
5075 ocs_hw_firmware_write(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, int last, ocs_hw_fw_cb_t cb, void *arg)
5077 if (hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) {
5078 return ocs_hw_firmware_write_lancer(hw, dma, size, offset, last, cb, arg);
5080 /* Write firmware_write for BE3/Skyhawk not supported */
5086 * @brief Write a portion of a firmware image to the Emulex XE201 ASIC (Lancer).
5089 * Creates a SLI_CONFIG mailbox command, fills it with the correct values to write a
5090 * firmware image chunk, and then sends the command with ocs_hw_command(). On completion,
5091 * the callback function ocs_hw_fw_write_cb() gets called to free the mailbox
5092 * and to signal the caller that the write has completed.
5094 * @param hw Hardware context.
5095 * @param dma DMA structure containing the firmware image chunk.
5096 * @param size Size of the firmware image chunk.
5097 * @param offset Offset, in bytes, from the beginning of the firmware image.
5098 * @param last True if this is the last chunk of the image. Causes the image to be committed to flash.
5099 * @param cb Pointer to a callback function that is called when the command completes.
5100 * The callback function prototype is
5101 * <tt>void cb(int32_t status, uint32_t bytes_written, void *arg)</tt>.
5102 * @param arg Pointer to be passed to the callback function.
5104 * @return Returns 0 on success, or a non-zero value on failure.
5107 ocs_hw_firmware_write_lancer(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, int last, ocs_hw_fw_cb_t cb, void *arg)
5109 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
5111 ocs_hw_fw_write_cb_arg_t *cb_arg;
5112 int noc=0; /* No Commit bit - set to 1 for testing */
5114 if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
5115 ocs_log_test(hw->os, "Function only supported for I/F type 2\n");
5116 return OCS_HW_RTN_ERROR;
5119 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
5120 if (mbxdata == NULL) {
5121 ocs_log_err(hw->os, "failed to malloc mbox\n");
5122 return OCS_HW_RTN_NO_MEMORY;
5125 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_fw_write_cb_arg_t), OCS_M_NOWAIT);
5126 if (cb_arg == NULL) {
5127 ocs_log_err(hw->os, "failed to malloc cb_arg\n");
5128 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5129 return OCS_HW_RTN_NO_MEMORY;
5135 if (sli_cmd_common_write_object(&hw->sli, mbxdata, SLI4_BMBX_SIZE, noc, last,
5136 size, offset, "/prg/", dma)) {
5137 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_fw_write, cb_arg);
5140 if (rc != OCS_HW_RTN_SUCCESS) {
5141 ocs_log_test(hw->os, "COMMON_WRITE_OBJECT failed\n");
5142 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5143 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t));
5151 * @brief Called when the WRITE OBJECT command completes.
5154 * Get the number of bytes actually written out of the response, free the mailbox
5155 * that was malloc'd by ocs_hw_firmware_write(),
5156 * then call the callback and pass the status and bytes written.
5158 * @param hw Hardware context.
5159 * @param status Status field from the mbox completion.
5160 * @param mqe Mailbox response structure.
5161 * @param arg Pointer to a callback function that signals the caller that the command is done.
5162 * The callback function prototype is <tt>void cb(int32_t status, uint32_t bytes_written)</tt>.
5164 * @return Returns 0.
5167 ocs_hw_cb_fw_write(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
5170 sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe;
5171 sli4_res_common_write_object_t* wr_obj_rsp = (sli4_res_common_write_object_t*) &(mbox_rsp->payload.embed);
5172 ocs_hw_fw_write_cb_arg_t *cb_arg = arg;
5173 uint32_t bytes_written;
5174 uint16_t mbox_status;
5175 uint32_t change_status;
5177 bytes_written = wr_obj_rsp->actual_write_length;
5178 mbox_status = mbox_rsp->hdr.status;
5179 change_status = wr_obj_rsp->change_status;
5181 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
5185 if ((status == 0) && mbox_status) {
5186 status = mbox_status;
5188 cb_arg->cb(status, bytes_written, change_status, cb_arg->arg);
5191 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t));
5199 * @brief Called when the READ_TRANSCEIVER_DATA command completes.
5202 * Get the number of bytes read out of the response, free the mailbox that was malloc'd
5203 * by ocs_hw_get_sfp(), then call the callback and pass the status and bytes written.
5205 * @param hw Hardware context.
5206 * @param status Status field from the mbox completion.
5207 * @param mqe Mailbox response structure.
5208 * @param arg Pointer to a callback function that signals the caller that the command is done.
5209 * The callback function prototype is
5210 * <tt>void cb(int32_t status, uint32_t bytes_written, uint32_t *data, void *arg)</tt>.
5212 * @return Returns 0.
5215 ocs_hw_cb_sfp(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
5218 ocs_hw_sfp_cb_arg_t *cb_arg = arg;
5219 ocs_dma_t *payload = NULL;
5220 sli4_res_common_read_transceiver_data_t* mbox_rsp = NULL;
5221 uint32_t bytes_written;
5224 payload = &(cb_arg->payload);
5226 mbox_rsp = (sli4_res_common_read_transceiver_data_t*) payload->virt;
5227 bytes_written = mbox_rsp->hdr.response_length;
5228 if ((status == 0) && mbox_rsp->hdr.status) {
5229 status = mbox_rsp->hdr.status;
5231 cb_arg->cb(hw->os, status, bytes_written, mbox_rsp->page_data, cb_arg->arg);
5234 ocs_dma_free(hw->os, &cb_arg->payload);
5235 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_sfp_cb_arg_t));
5238 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
5244 * @brief Function to retrieve the SFP information.
5246 * @param hw Hardware context.
5247 * @param page The page of SFP data to retrieve (0xa0 or 0xa2).
5248 * @param cb Function call upon completion of sending the data (may be NULL).
5249 * @param arg Argument to pass to IO completion function.
5251 * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY.
5254 ocs_hw_get_sfp(ocs_hw_t *hw, uint16_t page, ocs_hw_sfp_cb_t cb, void *arg)
5256 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
5257 ocs_hw_sfp_cb_arg_t *cb_arg;
5260 /* mbxdata holds the header of the command */
5261 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
5262 if (mbxdata == NULL) {
5263 ocs_log_err(hw->os, "failed to malloc mbox\n");
5264 return OCS_HW_RTN_NO_MEMORY;
5267 /* cb_arg holds the data that will be passed to the callback on completion */
5268 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_sfp_cb_arg_t), OCS_M_NOWAIT);
5269 if (cb_arg == NULL) {
5270 ocs_log_err(hw->os, "failed to malloc cb_arg\n");
5271 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5272 return OCS_HW_RTN_NO_MEMORY;
5278 /* payload holds the non-embedded portion */
5279 if (ocs_dma_alloc(hw->os, &cb_arg->payload, sizeof(sli4_res_common_read_transceiver_data_t),
5280 OCS_MIN_DMA_ALIGNMENT)) {
5281 ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
5282 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_sfp_cb_arg_t));
5283 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5284 return OCS_HW_RTN_NO_MEMORY;
5287 /* Send the HW command */
5288 if (sli_cmd_common_read_transceiver_data(&hw->sli, mbxdata, SLI4_BMBX_SIZE, page,
5289 &cb_arg->payload)) {
5290 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_sfp, cb_arg);
5293 if (rc != OCS_HW_RTN_SUCCESS) {
5294 ocs_log_test(hw->os, "READ_TRANSCEIVER_DATA failed with status %d\n",
5296 ocs_dma_free(hw->os, &cb_arg->payload);
5297 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_sfp_cb_arg_t));
5298 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5305 * @brief Function to retrieve the temperature information.
5307 * @param hw Hardware context.
5308 * @param cb Function call upon completion of sending the data (may be NULL).
5309 * @param arg Argument to pass to IO completion function.
5311 * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY.
5314 ocs_hw_get_temperature(ocs_hw_t *hw, ocs_hw_temp_cb_t cb, void *arg)
5316 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
5317 ocs_hw_temp_cb_arg_t *cb_arg;
5320 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
5321 if (mbxdata == NULL) {
5322 ocs_log_err(hw->os, "failed to malloc mbox");
5323 return OCS_HW_RTN_NO_MEMORY;
5326 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_temp_cb_arg_t), OCS_M_NOWAIT);
5327 if (cb_arg == NULL) {
5328 ocs_log_err(hw->os, "failed to malloc cb_arg");
5329 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5330 return OCS_HW_RTN_NO_MEMORY;
5336 if (sli_cmd_dump_type4(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
5337 SLI4_WKI_TAG_SAT_TEM)) {
5338 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_temp, cb_arg);
5341 if (rc != OCS_HW_RTN_SUCCESS) {
5342 ocs_log_test(hw->os, "DUMP_TYPE4 failed\n");
5343 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5344 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_temp_cb_arg_t));
5351 * @brief Called when the DUMP command completes.
5354 * Get the temperature data out of the response, free the mailbox that was malloc'd
5355 * by ocs_hw_get_temperature(), then call the callback and pass the status and data.
5357 * @param hw Hardware context.
5358 * @param status Status field from the mbox completion.
5359 * @param mqe Mailbox response structure.
5360 * @param arg Pointer to a callback function that signals the caller that the command is done.
5361 * The callback function prototype is defined by ocs_hw_temp_cb_t.
5363 * @return Returns 0.
5366 ocs_hw_cb_temp(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
5369 sli4_cmd_dump4_t* mbox_rsp = (sli4_cmd_dump4_t*) mqe;
5370 ocs_hw_temp_cb_arg_t *cb_arg = arg;
5371 uint32_t curr_temp = mbox_rsp->resp_data[0]; /* word 5 */
5372 uint32_t crit_temp_thrshld = mbox_rsp->resp_data[1]; /* word 6*/
5373 uint32_t warn_temp_thrshld = mbox_rsp->resp_data[2]; /* word 7 */
5374 uint32_t norm_temp_thrshld = mbox_rsp->resp_data[3]; /* word 8 */
5375 uint32_t fan_off_thrshld = mbox_rsp->resp_data[4]; /* word 9 */
5376 uint32_t fan_on_thrshld = mbox_rsp->resp_data[5]; /* word 10 */
5380 if ((status == 0) && mbox_rsp->hdr.status) {
5381 status = mbox_rsp->hdr.status;
5393 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_temp_cb_arg_t));
5395 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
5401 * @brief Function to retrieve the link statistics.
5403 * @param hw Hardware context.
5404 * @param req_ext_counters If TRUE, then the extended counters will be requested.
5405 * @param clear_overflow_flags If TRUE, then overflow flags will be cleared.
5406 * @param clear_all_counters If TRUE, the counters will be cleared.
5407 * @param cb Function call upon completion of sending the data (may be NULL).
5408 * @param arg Argument to pass to IO completion function.
5410 * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY.
5413 ocs_hw_get_link_stats(ocs_hw_t *hw,
5414 uint8_t req_ext_counters,
5415 uint8_t clear_overflow_flags,
5416 uint8_t clear_all_counters,
5417 ocs_hw_link_stat_cb_t cb,
5420 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
5421 ocs_hw_link_stat_cb_arg_t *cb_arg;
5424 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
5425 if (mbxdata == NULL) {
5426 ocs_log_err(hw->os, "failed to malloc mbox");
5427 return OCS_HW_RTN_NO_MEMORY;
5430 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_link_stat_cb_arg_t), OCS_M_NOWAIT);
5431 if (cb_arg == NULL) {
5432 ocs_log_err(hw->os, "failed to malloc cb_arg");
5433 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5434 return OCS_HW_RTN_NO_MEMORY;
5440 if (sli_cmd_read_link_stats(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
5442 clear_overflow_flags,
5443 clear_all_counters)) {
5444 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_link_stat, cb_arg);
5447 if (rc != OCS_HW_RTN_SUCCESS) {
5448 ocs_log_test(hw->os, "READ_LINK_STATS failed\n");
5449 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5450 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_link_stat_cb_arg_t));
5457 * @brief Called when the READ_LINK_STAT command completes.
5460 * Get the counters out of the response, free the mailbox that was malloc'd
5461 * by ocs_hw_get_link_stats(), then call the callback and pass the status and data.
5463 * @param hw Hardware context.
5464 * @param status Status field from the mbox completion.
5465 * @param mqe Mailbox response structure.
5466 * @param arg Pointer to a callback function that signals the caller that the command is done.
5467 * The callback function prototype is defined by ocs_hw_link_stat_cb_t.
5469 * @return Returns 0.
5472 ocs_hw_cb_link_stat(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
5475 sli4_cmd_read_link_stats_t* mbox_rsp = (sli4_cmd_read_link_stats_t*) mqe;
5476 ocs_hw_link_stat_cb_arg_t *cb_arg = arg;
5477 ocs_hw_link_stat_counts_t counts[OCS_HW_LINK_STAT_MAX];
5478 uint32_t num_counters = (mbox_rsp->gec ? 20 : 13);
5480 ocs_memset(counts, 0, sizeof(ocs_hw_link_stat_counts_t) *
5481 OCS_HW_LINK_STAT_MAX);
5483 counts[OCS_HW_LINK_STAT_LINK_FAILURE_COUNT].overflow = mbox_rsp->w02of;
5484 counts[OCS_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].overflow = mbox_rsp->w03of;
5485 counts[OCS_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT].overflow = mbox_rsp->w04of;
5486 counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].overflow = mbox_rsp->w05of;
5487 counts[OCS_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].overflow = mbox_rsp->w06of;
5488 counts[OCS_HW_LINK_STAT_CRC_COUNT].overflow = mbox_rsp->w07of;
5489 counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT].overflow = mbox_rsp->w08of;
5490 counts[OCS_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT].overflow = mbox_rsp->w09of;
5491 counts[OCS_HW_LINK_STAT_ARB_TIMEOUT_COUNT].overflow = mbox_rsp->w10of;
5492 counts[OCS_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT].overflow = mbox_rsp->w11of;
5493 counts[OCS_HW_LINK_STAT_CURR_RCV_B2B_CREDIT].overflow = mbox_rsp->w12of;
5494 counts[OCS_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT].overflow = mbox_rsp->w13of;
5495 counts[OCS_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT].overflow = mbox_rsp->w14of;
5496 counts[OCS_HW_LINK_STAT_RCV_EOFA_COUNT].overflow = mbox_rsp->w15of;
5497 counts[OCS_HW_LINK_STAT_RCV_EOFDTI_COUNT].overflow = mbox_rsp->w16of;
5498 counts[OCS_HW_LINK_STAT_RCV_EOFNI_COUNT].overflow = mbox_rsp->w17of;
5499 counts[OCS_HW_LINK_STAT_RCV_SOFF_COUNT].overflow = mbox_rsp->w18of;
5500 counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT].overflow = mbox_rsp->w19of;
5501 counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT].overflow = mbox_rsp->w20of;
5502 counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT].overflow = mbox_rsp->w21of;
5504 counts[OCS_HW_LINK_STAT_LINK_FAILURE_COUNT].counter = mbox_rsp->link_failure_error_count;
5505 counts[OCS_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].counter = mbox_rsp->loss_of_sync_error_count;
5506 counts[OCS_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT].counter = mbox_rsp->loss_of_signal_error_count;
5507 counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].counter = mbox_rsp->primitive_sequence_error_count;
5508 counts[OCS_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].counter = mbox_rsp->invalid_transmission_word_error_count;
5509 counts[OCS_HW_LINK_STAT_CRC_COUNT].counter = mbox_rsp->crc_error_count;
5510 counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT].counter = mbox_rsp->primitive_sequence_event_timeout_count;
5511 counts[OCS_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT].counter = mbox_rsp->elastic_buffer_overrun_error_count;
5512 counts[OCS_HW_LINK_STAT_ARB_TIMEOUT_COUNT].counter = mbox_rsp->arbitration_fc_al_timout_count;
5513 counts[OCS_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT].counter = mbox_rsp->advertised_receive_bufftor_to_buffer_credit;
5514 counts[OCS_HW_LINK_STAT_CURR_RCV_B2B_CREDIT].counter = mbox_rsp->current_receive_buffer_to_buffer_credit;
5515 counts[OCS_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT].counter = mbox_rsp->advertised_transmit_buffer_to_buffer_credit;
5516 counts[OCS_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT].counter = mbox_rsp->current_transmit_buffer_to_buffer_credit;
5517 counts[OCS_HW_LINK_STAT_RCV_EOFA_COUNT].counter = mbox_rsp->received_eofa_count;
5518 counts[OCS_HW_LINK_STAT_RCV_EOFDTI_COUNT].counter = mbox_rsp->received_eofdti_count;
5519 counts[OCS_HW_LINK_STAT_RCV_EOFNI_COUNT].counter = mbox_rsp->received_eofni_count;
5520 counts[OCS_HW_LINK_STAT_RCV_SOFF_COUNT].counter = mbox_rsp->received_soff_count;
5521 counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT].counter = mbox_rsp->received_dropped_no_aer_count;
5522 counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT].counter = mbox_rsp->received_dropped_no_available_rpi_resources_count;
5523 counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT].counter = mbox_rsp->received_dropped_no_available_xri_resources_count;
5527 if ((status == 0) && mbox_rsp->hdr.status) {
5528 status = mbox_rsp->hdr.status;
5536 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_link_stat_cb_arg_t));
5538 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
5544 * @brief Function to retrieve the link and host statistics.
5546 * @param hw Hardware context.
5547 * @param cc clear counters, if TRUE all counters will be cleared.
5548 * @param cb Function call upon completion of receiving the data.
5549 * @param arg Argument to pass to pointer fc hosts statistics structure.
5551 * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY.
5554 ocs_hw_get_host_stats(ocs_hw_t *hw, uint8_t cc, ocs_hw_host_stat_cb_t cb, void *arg)
5556 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
5557 ocs_hw_host_stat_cb_arg_t *cb_arg;
5560 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO);
5561 if (mbxdata == NULL) {
5562 ocs_log_err(hw->os, "failed to malloc mbox");
5563 return OCS_HW_RTN_NO_MEMORY;
5566 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_host_stat_cb_arg_t), 0);
5567 if (cb_arg == NULL) {
5568 ocs_log_err(hw->os, "failed to malloc cb_arg");
5569 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5570 return OCS_HW_RTN_NO_MEMORY;
5576 /* Send the HW command to get the host stats */
5577 if (sli_cmd_read_status(&hw->sli, mbxdata, SLI4_BMBX_SIZE, cc)) {
5578 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_host_stat, cb_arg);
5581 if (rc != OCS_HW_RTN_SUCCESS) {
5582 ocs_log_test(hw->os, "READ_HOST_STATS failed\n");
5583 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5584 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_host_stat_cb_arg_t));
5591 * @brief Called when the READ_STATUS command completes.
5594 * Get the counters out of the response, free the mailbox that was malloc'd
5595 * by ocs_hw_get_host_stats(), then call the callback and pass
5596 * the status and data.
5598 * @param hw Hardware context.
5599 * @param status Status field from the mbox completion.
5600 * @param mqe Mailbox response structure.
5601 * @param arg Pointer to a callback function that signals the caller that the command is done.
5602 * The callback function prototype is defined by
5603 * ocs_hw_host_stat_cb_t.
5605 * @return Returns 0.
5608 ocs_hw_cb_host_stat(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
5611 sli4_cmd_read_status_t* mbox_rsp = (sli4_cmd_read_status_t*) mqe;
5612 ocs_hw_host_stat_cb_arg_t *cb_arg = arg;
5613 ocs_hw_host_stat_counts_t counts[OCS_HW_HOST_STAT_MAX];
5614 uint32_t num_counters = OCS_HW_HOST_STAT_MAX;
5616 ocs_memset(counts, 0, sizeof(ocs_hw_host_stat_counts_t) *
5617 OCS_HW_HOST_STAT_MAX);
5619 counts[OCS_HW_HOST_STAT_TX_KBYTE_COUNT].counter = mbox_rsp->transmit_kbyte_count;
5620 counts[OCS_HW_HOST_STAT_RX_KBYTE_COUNT].counter = mbox_rsp->receive_kbyte_count;
5621 counts[OCS_HW_HOST_STAT_TX_FRAME_COUNT].counter = mbox_rsp->transmit_frame_count;
5622 counts[OCS_HW_HOST_STAT_RX_FRAME_COUNT].counter = mbox_rsp->receive_frame_count;
5623 counts[OCS_HW_HOST_STAT_TX_SEQ_COUNT].counter = mbox_rsp->transmit_sequence_count;
5624 counts[OCS_HW_HOST_STAT_RX_SEQ_COUNT].counter = mbox_rsp->receive_sequence_count;
5625 counts[OCS_HW_HOST_STAT_TOTAL_EXCH_ORIG].counter = mbox_rsp->total_exchanges_originator;
5626 counts[OCS_HW_HOST_STAT_TOTAL_EXCH_RESP].counter = mbox_rsp->total_exchanges_responder;
5627 counts[OCS_HW_HOSY_STAT_RX_P_BSY_COUNT].counter = mbox_rsp->receive_p_bsy_count;
5628 counts[OCS_HW_HOST_STAT_RX_F_BSY_COUNT].counter = mbox_rsp->receive_f_bsy_count;
5629 counts[OCS_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_RQ_BUF_COUNT].counter = mbox_rsp->dropped_frames_due_to_no_rq_buffer_count;
5630 counts[OCS_HW_HOST_STAT_EMPTY_RQ_TIMEOUT_COUNT].counter = mbox_rsp->empty_rq_timeout_count;
5631 counts[OCS_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_XRI_COUNT].counter = mbox_rsp->dropped_frames_due_to_no_xri_count;
5632 counts[OCS_HW_HOST_STAT_EMPTY_XRI_POOL_COUNT].counter = mbox_rsp->empty_xri_pool_count;
5636 if ((status == 0) && mbox_rsp->hdr.status) {
5637 status = mbox_rsp->hdr.status;
5645 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_host_stat_cb_arg_t));
5647 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
5653 * @brief HW link configuration enum to the CLP string value mapping.
5655 * This structure provides a mapping from the ocs_hw_linkcfg_e
5656 * enum (enum exposed for the OCS_HW_PORT_SET_LINK_CONFIG port
5657 * control) to the CLP string that is used
5658 * in the DMTF_CLP_CMD mailbox command.
5660 typedef struct ocs_hw_linkcfg_map_s {
5661 ocs_hw_linkcfg_e linkcfg;
5662 const char *clp_str;
5663 } ocs_hw_linkcfg_map_t;
5666 * @brief Mapping from the HW linkcfg enum to the CLP command value
5669 static ocs_hw_linkcfg_map_t linkcfg_map[] = {
5670 {OCS_HW_LINKCFG_4X10G, "ELX_4x10G"},
5671 {OCS_HW_LINKCFG_1X40G, "ELX_1x40G"},
5672 {OCS_HW_LINKCFG_2X16G, "ELX_2x16G"},
5673 {OCS_HW_LINKCFG_4X8G, "ELX_4x8G"},
5674 {OCS_HW_LINKCFG_4X1G, "ELX_4x1G"},
5675 {OCS_HW_LINKCFG_2X10G, "ELX_2x10G"},
5676 {OCS_HW_LINKCFG_2X10G_2X8G, "ELX_2x10G_2x8G"}};
5679 * @brief HW link configuration enum to Skyhawk link config ID mapping.
5681 * This structure provides a mapping from the ocs_hw_linkcfg_e
5682 * enum (enum exposed for the OCS_HW_PORT_SET_LINK_CONFIG port
5683 * control) to the link config ID numbers used by Skyhawk
5685 typedef struct ocs_hw_skyhawk_linkcfg_map_s {
5686 ocs_hw_linkcfg_e linkcfg;
5688 } ocs_hw_skyhawk_linkcfg_map_t;
5691 * @brief Mapping from the HW linkcfg enum to the Skyhawk link config IDs
5693 static ocs_hw_skyhawk_linkcfg_map_t skyhawk_linkcfg_map[] = {
5694 {OCS_HW_LINKCFG_4X10G, 0x0a},
5695 {OCS_HW_LINKCFG_1X40G, 0x09},
5699 * @brief Helper function for getting the HW linkcfg enum from the CLP
5702 * @param clp_str CLP string value from OEMELX_LinkConfig.
5704 * @return Returns the HW linkcfg enum corresponding to clp_str.
5706 static ocs_hw_linkcfg_e
5707 ocs_hw_linkcfg_from_clp(const char *clp_str)
5710 for (i = 0; i < ARRAY_SIZE(linkcfg_map); i++) {
5711 if (ocs_strncmp(linkcfg_map[i].clp_str, clp_str, ocs_strlen(clp_str)) == 0) {
5712 return linkcfg_map[i].linkcfg;
5715 return OCS_HW_LINKCFG_NA;
5719 * @brief Helper function for getting the CLP string value from the HW
5722 * @param linkcfg HW linkcfg enum.
5724 * @return Returns the OEMELX_LinkConfig CLP string value corresponding to
5728 ocs_hw_clp_from_linkcfg(ocs_hw_linkcfg_e linkcfg)
5731 for (i = 0; i < ARRAY_SIZE(linkcfg_map); i++) {
5732 if (linkcfg_map[i].linkcfg == linkcfg) {
5733 return linkcfg_map[i].clp_str;
5740 * @brief Helper function for getting a Skyhawk link config ID from the HW
5743 * @param linkcfg HW linkcfg enum.
5745 * @return Returns the Skyhawk link config ID corresponding to
5749 ocs_hw_config_id_from_linkcfg(ocs_hw_linkcfg_e linkcfg)
5752 for (i = 0; i < ARRAY_SIZE(skyhawk_linkcfg_map); i++) {
5753 if (skyhawk_linkcfg_map[i].linkcfg == linkcfg) {
5754 return skyhawk_linkcfg_map[i].config_id;
5761 * @brief Helper function for getting the HW linkcfg enum from a
5762 * Skyhawk config ID.
5764 * @param config_id Skyhawk link config ID.
5766 * @return Returns the HW linkcfg enum corresponding to config_id.
5768 static ocs_hw_linkcfg_e
5769 ocs_hw_linkcfg_from_config_id(const uint32_t config_id)
5772 for (i = 0; i < ARRAY_SIZE(skyhawk_linkcfg_map); i++) {
5773 if (skyhawk_linkcfg_map[i].config_id == config_id) {
5774 return skyhawk_linkcfg_map[i].linkcfg;
5777 return OCS_HW_LINKCFG_NA;
5781 * @brief Link configuration callback argument.
5783 typedef struct ocs_hw_linkcfg_cb_arg_s {
5784 ocs_hw_port_control_cb_t cb;
5790 uint32_t result_len;
5791 } ocs_hw_linkcfg_cb_arg_t;
5794 * @brief Set link configuration.
5796 * @param hw Hardware context.
5797 * @param value Link configuration enum to which the link configuration is
5799 * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
5800 * @param cb Callback function to invoke following mbx command.
5801 * @param arg Callback argument.
5803 * @return Returns OCS_HW_RTN_SUCCESS on success.
5806 ocs_hw_set_linkcfg(ocs_hw_t *hw, ocs_hw_linkcfg_e value, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
5808 if (!sli_link_is_configurable(&hw->sli)) {
5809 ocs_log_debug(hw->os, "Function not supported\n");
5810 return OCS_HW_RTN_ERROR;
5813 if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) {
5814 return ocs_hw_set_linkcfg_lancer(hw, value, opts, cb, arg);
5815 } else if ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) ||
5816 (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli))) {
5817 return ocs_hw_set_linkcfg_skyhawk(hw, value, opts, cb, arg);
5819 ocs_log_test(hw->os, "Function not supported for this IF_TYPE\n");
5820 return OCS_HW_RTN_ERROR;
5825 * @brief Set link configuration for Lancer
5827 * @param hw Hardware context.
5828 * @param value Link configuration enum to which the link configuration is
5830 * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
5831 * @param cb Callback function to invoke following mbx command.
5832 * @param arg Callback argument.
5834 * @return Returns OCS_HW_RTN_SUCCESS on success.
5837 ocs_hw_set_linkcfg_lancer(ocs_hw_t *hw, ocs_hw_linkcfg_e value, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
5839 char cmd[OCS_HW_DMTF_CLP_CMD_MAX];
5840 ocs_hw_linkcfg_cb_arg_t *cb_arg;
5841 const char *value_str = NULL;
5842 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
5844 /* translate ocs_hw_linkcfg_e to CLP string */
5845 value_str = ocs_hw_clp_from_linkcfg(value);
5847 /* allocate memory for callback argument */
5848 cb_arg = ocs_malloc(hw->os, sizeof(*cb_arg), OCS_M_NOWAIT);
5849 if (cb_arg == NULL) {
5850 ocs_log_err(hw->os, "failed to malloc cb_arg");
5851 return OCS_HW_RTN_NO_MEMORY;
5854 ocs_snprintf(cmd, OCS_HW_DMTF_CLP_CMD_MAX, "set / OEMELX_LinkConfig=%s", value_str);
5855 /* allocate DMA for command */
5856 if (ocs_dma_alloc(hw->os, &cb_arg->dma_cmd, ocs_strlen(cmd)+1, 4096)) {
5857 ocs_log_err(hw->os, "malloc failed\n");
5858 ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
5859 return OCS_HW_RTN_NO_MEMORY;
5861 ocs_memset(cb_arg->dma_cmd.virt, 0, ocs_strlen(cmd)+1);
5862 ocs_memcpy(cb_arg->dma_cmd.virt, cmd, ocs_strlen(cmd));
5864 /* allocate DMA for response */
5865 if (ocs_dma_alloc(hw->os, &cb_arg->dma_resp, OCS_HW_DMTF_CLP_RSP_MAX, 4096)) {
5866 ocs_log_err(hw->os, "malloc failed\n");
5867 ocs_dma_free(hw->os, &cb_arg->dma_cmd);
5868 ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
5869 return OCS_HW_RTN_NO_MEMORY;
5873 cb_arg->opts = opts;
5875 rc = ocs_hw_exec_dmtf_clp_cmd(hw, &cb_arg->dma_cmd, &cb_arg->dma_resp,
5876 opts, ocs_hw_linkcfg_dmtf_clp_cb, cb_arg);
5878 if (opts == OCS_CMD_POLL || rc != OCS_HW_RTN_SUCCESS) {
5879 /* if failed, or polling, free memory here; if success and not
5880 * polling, will free in callback function
5883 ocs_log_test(hw->os, "CLP cmd=\"%s\" failed\n",
5884 (char *)cb_arg->dma_cmd.virt);
5886 ocs_dma_free(hw->os, &cb_arg->dma_cmd);
5887 ocs_dma_free(hw->os, &cb_arg->dma_resp);
5888 ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
5894 * @brief Callback for ocs_hw_set_linkcfg_skyhawk
5896 * @param hw Hardware context.
5897 * @param status Status from the RECONFIG_GET_LINK_INFO command.
5898 * @param mqe Mailbox response structure.
5899 * @param arg Pointer to a callback argument.
5904 ocs_hw_set_active_link_config_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
5906 ocs_hw_linkcfg_cb_arg_t *cb_arg = (ocs_hw_linkcfg_cb_arg_t *)arg;
5909 ocs_log_test(hw->os, "SET_RECONFIG_LINK_ID failed, status=%d\n", status);
5912 /* invoke callback */
5914 cb_arg->cb(status, 0, cb_arg->arg);
5917 /* if polling, will free memory in calling function */
5918 if (cb_arg->opts != OCS_CMD_POLL) {
5919 ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
5924 * @brief Set link configuration for a Skyhawk
5926 * @param hw Hardware context.
5927 * @param value Link configuration enum to which the link configuration is
5929 * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
5930 * @param cb Callback function to invoke following mbx command.
5931 * @param arg Callback argument.
5933 * @return Returns OCS_HW_RTN_SUCCESS on success.
5936 ocs_hw_set_linkcfg_skyhawk(ocs_hw_t *hw, ocs_hw_linkcfg_e value, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
5939 ocs_hw_linkcfg_cb_arg_t *cb_arg;
5940 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
5943 config_id = ocs_hw_config_id_from_linkcfg(value);
5945 if (config_id == 0) {
5946 ocs_log_test(hw->os, "Link config %d not supported by Skyhawk\n", value);
5947 return OCS_HW_RTN_ERROR;
5950 /* mbxdata holds the header of the command */
5951 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
5952 if (mbxdata == NULL) {
5953 ocs_log_err(hw->os, "failed to malloc mbox\n");
5954 return OCS_HW_RTN_NO_MEMORY;
5957 /* cb_arg holds the data that will be passed to the callback on completion */
5958 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_linkcfg_cb_arg_t), OCS_M_NOWAIT);
5959 if (cb_arg == NULL) {
5960 ocs_log_err(hw->os, "failed to malloc cb_arg\n");
5961 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5962 return OCS_HW_RTN_NO_MEMORY;
5968 if (sli_cmd_common_set_reconfig_link_id(&hw->sli, mbxdata, SLI4_BMBX_SIZE, NULL, 0, config_id)) {
5969 rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_set_active_link_config_cb, cb_arg);
5972 if (rc != OCS_HW_RTN_SUCCESS) {
5973 ocs_log_err(hw->os, "SET_RECONFIG_LINK_ID failed\n");
5974 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5975 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t));
5976 } else if (opts == OCS_CMD_POLL) {
5977 /* if we're polling we have to call the callback here. */
5978 ocs_hw_set_active_link_config_cb(hw, 0, mbxdata, cb_arg);
5979 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5980 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t));
5982 /* We weren't poling, so the callback got called */
5983 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5990 * @brief Get link configuration.
5992 * @param hw Hardware context.
5993 * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
5994 * @param cb Callback function to invoke following mbx command.
5995 * @param arg Callback argument.
5997 * @return Returns OCS_HW_RTN_SUCCESS on success.
6000 ocs_hw_get_linkcfg(ocs_hw_t *hw, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
6002 if (!sli_link_is_configurable(&hw->sli)) {
6003 ocs_log_debug(hw->os, "Function not supported\n");
6004 return OCS_HW_RTN_ERROR;
6007 if ((SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) ||
6008 (SLI4_IF_TYPE_LANCER_G7 == sli_get_if_type(&hw->sli))){
6009 return ocs_hw_get_linkcfg_lancer(hw, opts, cb, arg);
6010 } else if ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) ||
6011 (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli))) {
6012 return ocs_hw_get_linkcfg_skyhawk(hw, opts, cb, arg);
6014 ocs_log_test(hw->os, "Function not supported for this IF_TYPE\n");
6015 return OCS_HW_RTN_ERROR;
6020 * @brief Get link configuration for a Lancer
6022 * @param hw Hardware context.
6023 * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
6024 * @param cb Callback function to invoke following mbx command.
6025 * @param arg Callback argument.
6027 * @return Returns OCS_HW_RTN_SUCCESS on success.
6030 ocs_hw_get_linkcfg_lancer(ocs_hw_t *hw, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
6032 char cmd[OCS_HW_DMTF_CLP_CMD_MAX];
6033 ocs_hw_linkcfg_cb_arg_t *cb_arg;
6034 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6036 /* allocate memory for callback argument */
6037 cb_arg = ocs_malloc(hw->os, sizeof(*cb_arg), OCS_M_NOWAIT);
6038 if (cb_arg == NULL) {
6039 ocs_log_err(hw->os, "failed to malloc cb_arg");
6040 return OCS_HW_RTN_NO_MEMORY;
6043 ocs_snprintf(cmd, OCS_HW_DMTF_CLP_CMD_MAX, "show / OEMELX_LinkConfig");
6045 /* allocate DMA for command */
6046 if (ocs_dma_alloc(hw->os, &cb_arg->dma_cmd, ocs_strlen(cmd)+1, 4096)) {
6047 ocs_log_err(hw->os, "malloc failed\n");
6048 ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6049 return OCS_HW_RTN_NO_MEMORY;
6052 /* copy CLP command to DMA command */
6053 ocs_memset(cb_arg->dma_cmd.virt, 0, ocs_strlen(cmd)+1);
6054 ocs_memcpy(cb_arg->dma_cmd.virt, cmd, ocs_strlen(cmd));
6056 /* allocate DMA for response */
6057 if (ocs_dma_alloc(hw->os, &cb_arg->dma_resp, OCS_HW_DMTF_CLP_RSP_MAX, 4096)) {
6058 ocs_log_err(hw->os, "malloc failed\n");
6059 ocs_dma_free(hw->os, &cb_arg->dma_cmd);
6060 ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6061 return OCS_HW_RTN_NO_MEMORY;
6065 cb_arg->opts = opts;
6067 rc = ocs_hw_exec_dmtf_clp_cmd(hw, &cb_arg->dma_cmd, &cb_arg->dma_resp,
6068 opts, ocs_hw_linkcfg_dmtf_clp_cb, cb_arg);
6070 if (opts == OCS_CMD_POLL || rc != OCS_HW_RTN_SUCCESS) {
6071 /* if failed or polling, free memory here; if not polling and success,
6072 * will free in callback function
6075 ocs_log_test(hw->os, "CLP cmd=\"%s\" failed\n",
6076 (char *)cb_arg->dma_cmd.virt);
6078 ocs_dma_free(hw->os, &cb_arg->dma_cmd);
6079 ocs_dma_free(hw->os, &cb_arg->dma_resp);
6080 ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6086 * @brief Get the link configuration callback.
6088 * @param hw Hardware context.
6089 * @param status Status from the RECONFIG_GET_LINK_INFO command.
6090 * @param mqe Mailbox response structure.
6091 * @param arg Pointer to a callback argument.
6096 ocs_hw_get_active_link_config_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
6098 ocs_hw_linkcfg_cb_arg_t *cb_arg = (ocs_hw_linkcfg_cb_arg_t *)arg;
6099 sli4_res_common_get_reconfig_link_info_t *rsp = cb_arg->dma_cmd.virt;
6100 ocs_hw_linkcfg_e value = OCS_HW_LINKCFG_NA;
6103 ocs_log_test(hw->os, "GET_RECONFIG_LINK_INFO failed, status=%d\n", status);
6105 /* Call was successful */
6106 value = ocs_hw_linkcfg_from_config_id(rsp->active_link_config_id);
6109 /* invoke callback */
6111 cb_arg->cb(status, value, cb_arg->arg);
6114 /* if polling, will free memory in calling function */
6115 if (cb_arg->opts != OCS_CMD_POLL) {
6116 ocs_dma_free(hw->os, &cb_arg->dma_cmd);
6117 ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6122 * @brief Get link configuration for a Skyhawk.
6124 * @param hw Hardware context.
6125 * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
6126 * @param cb Callback function to invoke following mbx command.
6127 * @param arg Callback argument.
6129 * @return Returns OCS_HW_RTN_SUCCESS on success.
6132 ocs_hw_get_linkcfg_skyhawk(ocs_hw_t *hw, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
6135 ocs_hw_linkcfg_cb_arg_t *cb_arg;
6136 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6138 /* mbxdata holds the header of the command */
6139 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
6140 if (mbxdata == NULL) {
6141 ocs_log_err(hw->os, "failed to malloc mbox\n");
6142 return OCS_HW_RTN_NO_MEMORY;
6145 /* cb_arg holds the data that will be passed to the callback on completion */
6146 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_linkcfg_cb_arg_t), OCS_M_NOWAIT);
6147 if (cb_arg == NULL) {
6148 ocs_log_err(hw->os, "failed to malloc cb_arg\n");
6149 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6150 return OCS_HW_RTN_NO_MEMORY;
6155 cb_arg->opts = opts;
6157 /* dma_mem holds the non-embedded portion */
6158 if (ocs_dma_alloc(hw->os, &cb_arg->dma_cmd, sizeof(sli4_res_common_get_reconfig_link_info_t), 4)) {
6159 ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
6160 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6161 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t));
6162 return OCS_HW_RTN_NO_MEMORY;
6165 if (sli_cmd_common_get_reconfig_link_info(&hw->sli, mbxdata, SLI4_BMBX_SIZE, &cb_arg->dma_cmd)) {
6166 rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_get_active_link_config_cb, cb_arg);
6169 if (rc != OCS_HW_RTN_SUCCESS) {
6170 ocs_log_err(hw->os, "GET_RECONFIG_LINK_INFO failed\n");
6171 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6172 ocs_dma_free(hw->os, &cb_arg->dma_cmd);
6173 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t));
6174 } else if (opts == OCS_CMD_POLL) {
6175 /* if we're polling we have to call the callback here. */
6176 ocs_hw_get_active_link_config_cb(hw, 0, mbxdata, cb_arg);
6177 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6178 ocs_dma_free(hw->os, &cb_arg->dma_cmd);
6179 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t));
6181 /* We weren't poling, so the callback got called */
6182 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6189 * @brief Sets the DIF seed value.
6191 * @param hw Hardware context.
6193 * @return Returns OCS_HW_RTN_SUCCESS on success.
6196 ocs_hw_set_dif_seed(ocs_hw_t *hw)
6198 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6199 uint8_t buf[SLI4_BMBX_SIZE];
6200 sli4_req_common_set_features_dif_seed_t seed_param;
6202 ocs_memset(&seed_param, 0, sizeof(seed_param));
6203 seed_param.seed = hw->config.dif_seed;
6205 /* send set_features command */
6206 if (sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE,
6207 SLI4_SET_FEATURES_DIF_SEED,
6209 (uint32_t*)&seed_param)) {
6210 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
6212 ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc);
6214 ocs_log_debug(hw->os, "DIF seed set to 0x%x\n",
6215 hw->config.dif_seed);
6218 ocs_log_err(hw->os, "sli_cmd_common_set_features failed\n");
6219 rc = OCS_HW_RTN_ERROR;
6225 * @brief Sets the DIF mode value.
6227 * @param hw Hardware context.
6229 * @return Returns OCS_HW_RTN_SUCCESS on success.
6232 ocs_hw_set_dif_mode(ocs_hw_t *hw)
6234 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6235 uint8_t buf[SLI4_BMBX_SIZE];
6236 sli4_req_common_set_features_t10_pi_mem_model_t mode_param;
6238 ocs_memset(&mode_param, 0, sizeof(mode_param));
6239 mode_param.tmm = (hw->config.dif_mode == OCS_HW_DIF_MODE_INLINE ? 0 : 1);
6241 /* send set_features command */
6242 if (sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE,
6243 SLI4_SET_FEATURES_DIF_MEMORY_MODE,
6245 (uint32_t*)&mode_param)) {
6246 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
6248 ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc);
6250 ocs_log_test(hw->os, "DIF mode set to %s\n",
6251 (hw->config.dif_mode == OCS_HW_DIF_MODE_INLINE ? "inline" : "separate"));
6254 ocs_log_err(hw->os, "sli_cmd_common_set_features failed\n");
6255 rc = OCS_HW_RTN_ERROR;
6261 ocs_hw_watchdog_timer_cb(void *arg)
6263 ocs_hw_t *hw = (ocs_hw_t *)arg;
6265 ocs_hw_config_watchdog_timer(hw);
6270 ocs_hw_cb_cfg_watchdog(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
6272 uint16_t timeout = hw->watchdog_timeout;
6275 ocs_log_err(hw->os, "config watchdog timer failed, rc = %d\n", status);
6278 /* keeping callback 500ms before timeout to keep heartbeat alive */
6279 ocs_setup_timer(hw->os, &hw->watchdog_timer, ocs_hw_watchdog_timer_cb, hw, (timeout*1000 - 500) );
6281 ocs_del_timer(&hw->watchdog_timer);
6285 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
6290 * @brief Set configuration parameters for watchdog timer feature.
6292 * @param hw Hardware context.
6293 * @param timeout Timeout for watchdog timer in seconds
6295 * @return Returns OCS_HW_RTN_SUCCESS on success.
6298 ocs_hw_config_watchdog_timer(ocs_hw_t *hw)
6300 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6301 uint8_t *buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
6304 ocs_log_err(hw->os, "no buffer for command\n");
6305 return OCS_HW_RTN_NO_MEMORY;
6308 sli4_cmd_lowlevel_set_watchdog(&hw->sli, buf, SLI4_BMBX_SIZE, hw->watchdog_timeout);
6309 rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_cfg_watchdog, NULL);
6311 ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
6312 ocs_log_err(hw->os, "config watchdog timer failed, rc = %d\n", rc);
6318 * @brief Set configuration parameters for auto-generate xfer_rdy T10 PI feature.
6320 * @param hw Hardware context.
6321 * @param buf Pointer to a mailbox buffer area.
6323 * @return Returns OCS_HW_RTN_SUCCESS on success.
6326 ocs_hw_config_auto_xfer_rdy_t10pi(ocs_hw_t *hw, uint8_t *buf)
6328 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6329 sli4_req_common_set_features_xfer_rdy_t10pi_t param;
6331 ocs_memset(¶m, 0, sizeof(param));
6332 param.rtc = (hw->config.auto_xfer_rdy_ref_tag_is_lba ? 0 : 1);
6333 param.atv = (hw->config.auto_xfer_rdy_app_tag_valid ? 1 : 0);
6334 param.tmm = ((hw->config.dif_mode == OCS_HW_DIF_MODE_INLINE) ? 0 : 1);
6335 param.app_tag = hw->config.auto_xfer_rdy_app_tag_value;
6336 param.blk_size = hw->config.auto_xfer_rdy_blk_size_chip;
6338 switch (hw->config.auto_xfer_rdy_p_type) {
6346 ocs_log_err(hw->os, "unsupported p_type %d\n",
6347 hw->config.auto_xfer_rdy_p_type);
6348 return OCS_HW_RTN_ERROR;
6351 /* build the set_features command */
6352 sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE,
6353 SLI4_SET_FEATURES_SET_CONFIG_AUTO_XFER_RDY_T10PI,
6357 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
6359 ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc);
6361 ocs_log_test(hw->os, "Auto XFER RDY T10 PI configured rtc:%d atv:%d p_type:%d app_tag:%x blk_size:%d\n",
6362 param.rtc, param.atv, param.p_type,
6363 param.app_tag, param.blk_size);
6370 * @brief enable sli port health check
6372 * @param hw Hardware context.
6373 * @param buf Pointer to a mailbox buffer area.
6374 * @param query current status of the health check feature enabled/disabled
6375 * @param enable if 1: enable 0: disable
6376 * @param buf Pointer to a mailbox buffer area.
6378 * @return Returns OCS_HW_RTN_SUCCESS on success.
6381 ocs_hw_config_sli_port_health_check(ocs_hw_t *hw, uint8_t query, uint8_t enable)
6383 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6384 uint8_t buf[SLI4_BMBX_SIZE];
6385 sli4_req_common_set_features_health_check_t param;
6387 ocs_memset(¶m, 0, sizeof(param));
6391 /* build the set_features command */
6392 sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE,
6393 SLI4_SET_FEATURES_SLI_PORT_HEALTH_CHECK,
6397 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
6399 ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc);
6401 ocs_log_test(hw->os, "SLI Port Health Check is enabled \n");
6408 * @brief Set FTD transfer hint feature
6410 * @param hw Hardware context.
6411 * @param fdt_xfer_hint size in bytes where read requests are segmented.
6413 * @return Returns OCS_HW_RTN_SUCCESS on success.
6416 ocs_hw_config_set_fdt_xfer_hint(ocs_hw_t *hw, uint32_t fdt_xfer_hint)
6418 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6419 uint8_t buf[SLI4_BMBX_SIZE];
6420 sli4_req_common_set_features_set_fdt_xfer_hint_t param;
6422 ocs_memset(¶m, 0, sizeof(param));
6423 param.fdt_xfer_hint = fdt_xfer_hint;
6424 /* build the set_features command */
6425 sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE,
6426 SLI4_SET_FEATURES_SET_FTD_XFER_HINT,
6430 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
6432 ocs_log_warn(hw->os, "set FDT hint %d failed: %d\n", fdt_xfer_hint, rc);
6434 ocs_log_debug(hw->os, "Set FTD transfer hint to %d\n", param.fdt_xfer_hint);
6441 * @brief Get the link configuration callback.
6443 * @param hw Hardware context.
6444 * @param status Status from the DMTF CLP command.
6445 * @param result_len Length, in bytes, of the DMTF CLP result.
6446 * @param arg Pointer to a callback argument.
6448 * @return Returns OCS_HW_RTN_SUCCESS on success.
6451 ocs_hw_linkcfg_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint32_t result_len, void *arg)
6454 char retdata_str[64];
6455 ocs_hw_linkcfg_cb_arg_t *cb_arg = (ocs_hw_linkcfg_cb_arg_t *)arg;
6456 ocs_hw_linkcfg_e linkcfg = OCS_HW_LINKCFG_NA;
6459 ocs_log_test(hw->os, "CLP cmd failed, status=%d\n", status);
6461 /* parse CLP response to get return data */
6462 rval = ocs_hw_clp_resp_get_value(hw, "retdata", retdata_str,
6463 sizeof(retdata_str),
6464 cb_arg->dma_resp.virt,
6468 ocs_log_err(hw->os, "failed to get retdata %d\n", result_len);
6470 /* translate string into hw enum */
6471 linkcfg = ocs_hw_linkcfg_from_clp(retdata_str);
6475 /* invoke callback */
6477 cb_arg->cb(status, linkcfg, cb_arg->arg);
6480 /* if polling, will free memory in calling function */
6481 if (cb_arg->opts != OCS_CMD_POLL) {
6482 ocs_dma_free(hw->os, &cb_arg->dma_cmd);
6483 ocs_dma_free(hw->os, &cb_arg->dma_resp);
6484 ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6489 * @brief Set the Lancer dump location
6491 * This function tells a Lancer chip to use a specific DMA
6492 * buffer as a dump location rather than the internal flash.
6494 * @param hw Hardware context.
6495 * @param num_buffers The number of DMA buffers to hold the dump (1..n).
6496 * @param dump_buffers DMA buffers to hold the dump.
6498 * @return Returns OCS_HW_RTN_SUCCESS on success.
6501 ocs_hw_set_dump_location(ocs_hw_t *hw, uint32_t num_buffers, ocs_dma_t *dump_buffers, uint8_t fdb)
6503 uint8_t bus, dev, func;
6504 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6505 uint8_t buf[SLI4_BMBX_SIZE];
6508 * Make sure the FW is new enough to support this command. If the FW
6509 * is too old, the FW will UE.
6511 if (hw->workaround.disable_dump_loc) {
6512 ocs_log_test(hw->os, "FW version is too old for this feature\n");
6513 return OCS_HW_RTN_ERROR;
6516 /* This command is only valid for physical port 0 */
6517 ocs_get_bus_dev_func(hw->os, &bus, &dev, &func);
6518 if (fdb == 0 && func != 0) {
6519 ocs_log_test(hw->os, "function only valid for pci function 0, %d passed\n",
6521 return OCS_HW_RTN_ERROR;
6525 * If a single buffer is used, then it may be passed as is to the chip. For multiple buffers,
6526 * We must allocate a SGL list and then pass the address of the list to the chip.
6528 if (num_buffers > 1) {
6529 uint32_t sge_size = num_buffers * sizeof(sli4_sge_t);
6533 if (hw->dump_sges.size < sge_size) {
6534 ocs_dma_free(hw->os, &hw->dump_sges);
6535 if (ocs_dma_alloc(hw->os, &hw->dump_sges, sge_size, OCS_MIN_DMA_ALIGNMENT)) {
6536 ocs_log_err(hw->os, "SGE DMA allocation failed\n");
6537 return OCS_HW_RTN_NO_MEMORY;
6540 /* build the SGE list */
6541 ocs_memset(hw->dump_sges.virt, 0, hw->dump_sges.size);
6542 hw->dump_sges.len = sge_size;
6543 sge = hw->dump_sges.virt;
6544 for (i = 0; i < num_buffers; i++) {
6545 sge[i].buffer_address_high = ocs_addr32_hi(dump_buffers[i].phys);
6546 sge[i].buffer_address_low = ocs_addr32_lo(dump_buffers[i].phys);
6547 sge[i].last = (i == num_buffers - 1 ? 1 : 0);
6548 sge[i].buffer_length = dump_buffers[i].size;
6550 rc = sli_cmd_common_set_dump_location(&hw->sli, (void *)buf,
6551 SLI4_BMBX_SIZE, FALSE, TRUE,
6552 &hw->dump_sges, fdb);
6554 dump_buffers->len = dump_buffers->size;
6555 rc = sli_cmd_common_set_dump_location(&hw->sli, (void *)buf,
6556 SLI4_BMBX_SIZE, FALSE, FALSE,
6561 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL,
6564 ocs_log_err(hw->os, "ocs_hw_command returns %d\n",
6569 "sli_cmd_common_set_dump_location failed\n");
6570 rc = OCS_HW_RTN_ERROR;
6577 * @brief Set the Ethernet license.
6580 * This function sends the appropriate mailbox command (DMTF
6581 * CLP) to set the Ethernet license to the given license value.
6582 * Since it is used during the time of ocs_hw_init(), the mailbox
6583 * command is sent via polling (the BMBX route).
6585 * @param hw Hardware context.
6586 * @param license 32-bit license value.
6588 * @return Returns OCS_HW_RTN_SUCCESS on success.
6591 ocs_hw_set_eth_license(ocs_hw_t *hw, uint32_t license)
6593 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6594 char cmd[OCS_HW_DMTF_CLP_CMD_MAX];
6598 /* only for lancer right now */
6599 if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
6600 ocs_log_test(hw->os, "Function only supported for I/F type 2\n");
6601 return OCS_HW_RTN_ERROR;
6604 ocs_snprintf(cmd, OCS_HW_DMTF_CLP_CMD_MAX, "set / OEMELX_Ethernet_License=%X", license);
6605 /* allocate DMA for command */
6606 if (ocs_dma_alloc(hw->os, &dma_cmd, ocs_strlen(cmd)+1, 4096)) {
6607 ocs_log_err(hw->os, "malloc failed\n");
6608 return OCS_HW_RTN_NO_MEMORY;
6610 ocs_memset(dma_cmd.virt, 0, ocs_strlen(cmd)+1);
6611 ocs_memcpy(dma_cmd.virt, cmd, ocs_strlen(cmd));
6613 /* allocate DMA for response */
6614 if (ocs_dma_alloc(hw->os, &dma_resp, OCS_HW_DMTF_CLP_RSP_MAX, 4096)) {
6615 ocs_log_err(hw->os, "malloc failed\n");
6616 ocs_dma_free(hw->os, &dma_cmd);
6617 return OCS_HW_RTN_NO_MEMORY;
6620 /* send DMTF CLP command mbx and poll */
6621 if (ocs_hw_exec_dmtf_clp_cmd(hw, &dma_cmd, &dma_resp, OCS_CMD_POLL, NULL, NULL)) {
6622 ocs_log_err(hw->os, "CLP cmd=\"%s\" failed\n", (char *)dma_cmd.virt);
6623 rc = OCS_HW_RTN_ERROR;
6626 ocs_dma_free(hw->os, &dma_cmd);
6627 ocs_dma_free(hw->os, &dma_resp);
6632 * @brief Callback argument structure for the DMTF CLP commands.
6634 typedef struct ocs_hw_clp_cb_arg_s {
6635 ocs_hw_dmtf_clp_cb_t cb;
6636 ocs_dma_t *dma_resp;
6640 } ocs_hw_clp_cb_arg_t;
6643 * @brief Execute the DMTF CLP command.
6645 * @param hw Hardware context.
6646 * @param dma_cmd DMA buffer containing the CLP command.
6647 * @param dma_resp DMA buffer that will contain the response (if successful).
6648 * @param opts Mailbox command options (such as OCS_CMD_NOWAIT and POLL).
6649 * @param cb Callback function.
6650 * @param arg Callback argument.
6652 * @return Returns the number of bytes written to the response
6653 * buffer on success, or a negative value if failed.
6656 ocs_hw_exec_dmtf_clp_cmd(ocs_hw_t *hw, ocs_dma_t *dma_cmd, ocs_dma_t *dma_resp, uint32_t opts, ocs_hw_dmtf_clp_cb_t cb, void *arg)
6658 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
6659 ocs_hw_clp_cb_arg_t *cb_arg;
6662 /* allocate DMA for mailbox */
6663 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
6664 if (mbxdata == NULL) {
6665 ocs_log_err(hw->os, "failed to malloc mbox\n");
6666 return OCS_HW_RTN_NO_MEMORY;
6669 /* allocate memory for callback argument */
6670 cb_arg = ocs_malloc(hw->os, sizeof(*cb_arg), OCS_M_NOWAIT);
6671 if (cb_arg == NULL) {
6672 ocs_log_err(hw->os, "failed to malloc cb_arg");
6673 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6674 return OCS_HW_RTN_NO_MEMORY;
6679 cb_arg->dma_resp = dma_resp;
6680 cb_arg->opts = opts;
6682 /* Send the HW command */
6683 if (sli_cmd_dmtf_exec_clp_cmd(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
6684 dma_cmd, dma_resp)) {
6685 rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_dmtf_clp_cb, cb_arg);
6687 if (opts == OCS_CMD_POLL && rc == OCS_HW_RTN_SUCCESS) {
6688 /* if we're polling, copy response and invoke callback to
6690 ocs_memcpy(mbxdata, hw->sli.bmbx.virt, SLI4_BMBX_SIZE);
6691 ocs_hw_dmtf_clp_cb(hw, 0, mbxdata, cb_arg);
6693 /* set rc to resulting or "parsed" status */
6694 rc = cb_arg->status;
6697 /* if failed, or polling, free memory here */
6698 if (opts == OCS_CMD_POLL || rc != OCS_HW_RTN_SUCCESS) {
6699 if (rc != OCS_HW_RTN_SUCCESS) {
6700 ocs_log_test(hw->os, "ocs_hw_command failed\n");
6702 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6703 ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6706 ocs_log_test(hw->os, "sli_cmd_dmtf_exec_clp_cmd failed\n");
6707 rc = OCS_HW_RTN_ERROR;
6708 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6709 ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6716 * @brief Called when the DMTF CLP command completes.
6718 * @param hw Hardware context.
6719 * @param status Status field from the mbox completion.
6720 * @param mqe Mailbox response structure.
6721 * @param arg Pointer to a callback argument.
6727 ocs_hw_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
6729 int32_t cb_status = 0;
6730 sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe;
6731 sli4_res_dmtf_exec_clp_cmd_t *clp_rsp = (sli4_res_dmtf_exec_clp_cmd_t *) mbox_rsp->payload.embed;
6732 ocs_hw_clp_cb_arg_t *cb_arg = arg;
6733 uint32_t result_len = 0;
6737 /* there are several status codes here, check them all and condense
6738 * into a single callback status
6740 if (status || mbox_rsp->hdr.status || clp_rsp->clp_status) {
6741 ocs_log_debug(hw->os, "status=x%x/x%x/x%x addl=x%x clp=x%x detail=x%x\n",
6743 mbox_rsp->hdr.status,
6744 clp_rsp->hdr.status,
6745 clp_rsp->hdr.additional_status,
6746 clp_rsp->clp_status,
6747 clp_rsp->clp_detailed_status);
6750 } else if (mbox_rsp->hdr.status) {
6751 cb_status = mbox_rsp->hdr.status;
6753 cb_status = clp_rsp->clp_status;
6756 result_len = clp_rsp->resp_length;
6760 goto ocs_hw_cb_dmtf_clp_done;
6763 if ((result_len == 0) || (cb_arg->dma_resp->size < result_len)) {
6764 ocs_log_test(hw->os, "Invalid response length: resp_len=%zu result len=%d\n",
6765 cb_arg->dma_resp->size, result_len);
6767 goto ocs_hw_cb_dmtf_clp_done;
6770 /* parse CLP response to get status */
6771 stat_len = ocs_hw_clp_resp_get_value(hw, "status", stat_str,
6773 cb_arg->dma_resp->virt,
6776 if (stat_len <= 0) {
6777 ocs_log_test(hw->os, "failed to get status %d\n", stat_len);
6779 goto ocs_hw_cb_dmtf_clp_done;
6782 if (ocs_strcmp(stat_str, "0") != 0) {
6783 ocs_log_test(hw->os, "CLP status indicates failure=%s\n", stat_str);
6785 goto ocs_hw_cb_dmtf_clp_done;
6788 ocs_hw_cb_dmtf_clp_done:
6790 /* save status in cb_arg for callers with NULL cb's + polling */
6791 cb_arg->status = cb_status;
6793 cb_arg->cb(hw, cb_status, result_len, cb_arg->arg);
6795 /* if polling, caller will free memory */
6796 if (cb_arg->opts != OCS_CMD_POLL) {
6797 ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6798 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
6803 * @brief Parse the CLP result and get the value corresponding to the given
6806 * @param hw Hardware context.
6807 * @param keyword CLP keyword for which the value is returned.
6808 * @param value Location to which the resulting value is copied.
6809 * @param value_len Length of the value parameter.
6810 * @param resp Pointer to the response buffer that is searched
6811 * for the keyword and value.
6812 * @param resp_len Length of response buffer passed in.
6814 * @return Returns the number of bytes written to the value
6815 * buffer on success, or a negative vaue on failure.
6818 ocs_hw_clp_resp_get_value(ocs_hw_t *hw, const char *keyword, char *value, uint32_t value_len, const char *resp, uint32_t resp_len)
6823 /* look for specified keyword in string */
6824 start = ocs_strstr(resp, keyword);
6825 if (start == NULL) {
6826 ocs_log_test(hw->os, "could not find keyword=%s in CLP response\n",
6831 /* now look for '=' and go one past */
6832 start = ocs_strchr(start, '=');
6833 if (start == NULL) {
6834 ocs_log_test(hw->os, "could not find \'=\' in CLP response for keyword=%s\n",
6840 /* \r\n terminates value */
6841 end = ocs_strstr(start, "\r\n");
6843 ocs_log_test(hw->os, "could not find \\r\\n for keyword=%s in CLP response\n",
6848 /* make sure given result array is big enough */
6849 if ((end - start + 1) > value_len) {
6850 ocs_log_test(hw->os, "value len=%d not large enough for actual=%ld\n",
6851 value_len, (end-start));
6855 ocs_strncpy(value, start, (end - start));
6856 value[end-start] = '\0';
6857 return (end-start+1);
6861 * @brief Cause chip to enter an unrecoverable error state.
6864 * Cause chip to enter an unrecoverable error state. This is
6865 * used when detecting unexpected FW behavior so that the FW can be
6866 * hwted from the driver as soon as the error is detected.
6868 * @param hw Hardware context.
6869 * @param dump Generate dump as part of reset.
6871 * @return Returns 0 on success, or a non-zero value on failure.
6875 ocs_hw_raise_ue(ocs_hw_t *hw, uint8_t dump)
6877 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6879 if (sli_raise_ue(&hw->sli, dump) != 0) {
6880 rc = OCS_HW_RTN_ERROR;
6882 if (hw->state != OCS_HW_STATE_UNINITIALIZED) {
6883 hw->state = OCS_HW_STATE_QUEUES_ALLOCATED;
6891 * @brief Called when the OBJECT_GET command completes.
6894 * Get the number of bytes actually written out of the response, free the mailbox
6895 * that was malloc'd by ocs_hw_dump_get(), then call the callback
6896 * and pass the status and bytes read.
6898 * @param hw Hardware context.
6899 * @param status Status field from the mbox completion.
6900 * @param mqe Mailbox response structure.
6901 * @param arg Pointer to a callback function that signals the caller that the command is done.
6902 * The callback function prototype is <tt>void cb(int32_t status, uint32_t bytes_read)</tt>.
6904 * @return Returns 0.
6907 ocs_hw_cb_dump_get(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
6909 sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe;
6910 sli4_res_common_read_object_t* rd_obj_rsp = (sli4_res_common_read_object_t*) mbox_rsp->payload.embed;
6911 ocs_hw_dump_get_cb_arg_t *cb_arg = arg;
6912 uint32_t bytes_read;
6915 bytes_read = rd_obj_rsp->actual_read_length;
6916 eof = rd_obj_rsp->eof;
6920 if ((status == 0) && mbox_rsp->hdr.status) {
6921 status = mbox_rsp->hdr.status;
6923 cb_arg->cb(status, bytes_read, eof, cb_arg->arg);
6926 ocs_free(hw->os, cb_arg->mbox_cmd, SLI4_BMBX_SIZE);
6927 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_get_cb_arg_t));
6934 * @brief Read a dump image to the host.
6937 * Creates a SLI_CONFIG mailbox command, fills in the correct values to read a
6938 * dump image chunk, then sends the command with the ocs_hw_command(). On completion,
6939 * the callback function ocs_hw_cb_dump_get() gets called to free the mailbox
6940 * and signal the caller that the read has completed.
6942 * @param hw Hardware context.
6943 * @param dma DMA structure to transfer the dump chunk into.
6944 * @param size Size of the dump chunk.
6945 * @param offset Offset, in bytes, from the beginning of the dump.
6946 * @param cb Pointer to a callback function that is called when the command completes.
6947 * The callback function prototype is
6948 * <tt>void cb(int32_t status, uint32_t bytes_read, uint8_t eof, void *arg)</tt>.
6949 * @param arg Pointer to be passed to the callback function.
6951 * @return Returns 0 on success, or a non-zero value on failure.
6954 ocs_hw_dump_get(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, ocs_hw_dump_get_cb_t cb, void *arg)
6956 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
6958 ocs_hw_dump_get_cb_arg_t *cb_arg;
6959 uint32_t opts = (hw->state == OCS_HW_STATE_ACTIVE ? OCS_CMD_NOWAIT : OCS_CMD_POLL);
6961 if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
6962 ocs_log_test(hw->os, "Function only supported for I/F type 2\n");
6963 return OCS_HW_RTN_ERROR;
6966 if (1 != sli_dump_is_present(&hw->sli)) {
6967 ocs_log_test(hw->os, "No dump is present\n");
6968 return OCS_HW_RTN_ERROR;
6971 if (1 == sli_reset_required(&hw->sli)) {
6972 ocs_log_test(hw->os, "device reset required\n");
6973 return OCS_HW_RTN_ERROR;
6976 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
6977 if (mbxdata == NULL) {
6978 ocs_log_err(hw->os, "failed to malloc mbox\n");
6979 return OCS_HW_RTN_NO_MEMORY;
6982 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_dump_get_cb_arg_t), OCS_M_NOWAIT);
6983 if (cb_arg == NULL) {
6984 ocs_log_err(hw->os, "failed to malloc cb_arg\n");
6985 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6986 return OCS_HW_RTN_NO_MEMORY;
6991 cb_arg->mbox_cmd = mbxdata;
6993 if (sli_cmd_common_read_object(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
6994 size, offset, "/dbg/dump.bin", dma)) {
6995 rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_cb_dump_get, cb_arg);
6996 if (rc == 0 && opts == OCS_CMD_POLL) {
6997 ocs_memcpy(mbxdata, hw->sli.bmbx.virt, SLI4_BMBX_SIZE);
6998 rc = ocs_hw_cb_dump_get(hw, 0, mbxdata, cb_arg);
7002 if (rc != OCS_HW_RTN_SUCCESS) {
7003 ocs_log_test(hw->os, "COMMON_READ_OBJECT failed\n");
7004 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7005 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_get_cb_arg_t));
7012 * @brief Called when the OBJECT_DELETE command completes.
7015 * Free the mailbox that was malloc'd
7016 * by ocs_hw_dump_clear(), then call the callback and pass the status.
7018 * @param hw Hardware context.
7019 * @param status Status field from the mbox completion.
7020 * @param mqe Mailbox response structure.
7021 * @param arg Pointer to a callback function that signals the caller that the command is done.
7022 * The callback function prototype is <tt>void cb(int32_t status, void *arg)</tt>.
7024 * @return Returns 0.
7027 ocs_hw_cb_dump_clear(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
7029 ocs_hw_dump_clear_cb_arg_t *cb_arg = arg;
7030 sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe;
7034 if ((status == 0) && mbox_rsp->hdr.status) {
7035 status = mbox_rsp->hdr.status;
7037 cb_arg->cb(status, cb_arg->arg);
7040 ocs_free(hw->os, cb_arg->mbox_cmd, SLI4_BMBX_SIZE);
7041 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_clear_cb_arg_t));
7048 * @brief Clear a dump image from the device.
7051 * Creates a SLI_CONFIG mailbox command, fills it with the correct values to clear
7052 * the dump, then sends the command with ocs_hw_command(). On completion,
7053 * the callback function ocs_hw_cb_dump_clear() gets called to free the mailbox
7054 * and to signal the caller that the write has completed.
7056 * @param hw Hardware context.
7057 * @param cb Pointer to a callback function that is called when the command completes.
7058 * The callback function prototype is
7059 * <tt>void cb(int32_t status, uint32_t bytes_written, void *arg)</tt>.
7060 * @param arg Pointer to be passed to the callback function.
7062 * @return Returns 0 on success, or a non-zero value on failure.
7065 ocs_hw_dump_clear(ocs_hw_t *hw, ocs_hw_dump_clear_cb_t cb, void *arg)
7067 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
7069 ocs_hw_dump_clear_cb_arg_t *cb_arg;
7070 uint32_t opts = (hw->state == OCS_HW_STATE_ACTIVE ? OCS_CMD_NOWAIT : OCS_CMD_POLL);
7072 if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
7073 ocs_log_test(hw->os, "Function only supported for I/F type 2\n");
7074 return OCS_HW_RTN_ERROR;
7077 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7078 if (mbxdata == NULL) {
7079 ocs_log_err(hw->os, "failed to malloc mbox\n");
7080 return OCS_HW_RTN_NO_MEMORY;
7083 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_dump_clear_cb_arg_t), OCS_M_NOWAIT);
7084 if (cb_arg == NULL) {
7085 ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7086 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7087 return OCS_HW_RTN_NO_MEMORY;
7092 cb_arg->mbox_cmd = mbxdata;
7094 if (sli_cmd_common_delete_object(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
7096 rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_cb_dump_clear, cb_arg);
7097 if (rc == 0 && opts == OCS_CMD_POLL) {
7098 ocs_memcpy(mbxdata, hw->sli.bmbx.virt, SLI4_BMBX_SIZE);
7099 rc = ocs_hw_cb_dump_clear(hw, 0, mbxdata, cb_arg);
7103 if (rc != OCS_HW_RTN_SUCCESS) {
7104 ocs_log_test(hw->os, "COMMON_DELETE_OBJECT failed\n");
7105 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7106 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_clear_cb_arg_t));
7112 typedef struct ocs_hw_get_port_protocol_cb_arg_s {
7113 ocs_get_port_protocol_cb_t cb;
7117 } ocs_hw_get_port_protocol_cb_arg_t;
7120 * @brief Called for the completion of get_port_profile for a
7123 * @param hw Hardware context.
7124 * @param status The status from the MQE.
7125 * @param mqe Pointer to mailbox command buffer.
7126 * @param arg Pointer to a callback argument.
7128 * @return Returns 0 on success, or a non-zero value on failure.
7131 ocs_hw_get_port_protocol_cb(ocs_hw_t *hw, int32_t status,
7132 uint8_t *mqe, void *arg)
7134 ocs_hw_get_port_protocol_cb_arg_t *cb_arg = arg;
7135 ocs_dma_t *payload = &(cb_arg->payload);
7136 sli4_res_common_get_profile_config_t* response = (sli4_res_common_get_profile_config_t*) payload->virt;
7137 ocs_hw_port_protocol_e port_protocol;
7138 int num_descriptors;
7139 sli4_resource_descriptor_v1_t *desc_p;
7140 sli4_pcie_resource_descriptor_v1_t *pcie_desc_p;
7143 port_protocol = OCS_HW_PORT_PROTOCOL_OTHER;
7145 num_descriptors = response->desc_count;
7146 desc_p = (sli4_resource_descriptor_v1_t *)response->desc;
7147 for (i=0; i<num_descriptors; i++) {
7148 if (desc_p->descriptor_type == SLI4_RESOURCE_DESCRIPTOR_TYPE_PCIE) {
7149 pcie_desc_p = (sli4_pcie_resource_descriptor_v1_t*) desc_p;
7150 if (pcie_desc_p->pf_number == cb_arg->pci_func) {
7151 switch(pcie_desc_p->pf_type) {
7153 port_protocol = OCS_HW_PORT_PROTOCOL_ISCSI;
7156 port_protocol = OCS_HW_PORT_PROTOCOL_FCOE;
7159 port_protocol = OCS_HW_PORT_PROTOCOL_FC;
7162 port_protocol = OCS_HW_PORT_PROTOCOL_OTHER;
7168 desc_p = (sli4_resource_descriptor_v1_t *) ((uint8_t *)desc_p + desc_p->descriptor_length);
7172 cb_arg->cb(status, port_protocol, cb_arg->arg);
7175 ocs_dma_free(hw->os, &cb_arg->payload);
7176 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_port_protocol_cb_arg_t));
7177 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7184 * @brief Get the current port protocol.
7186 * Issues a SLI4 COMMON_GET_PROFILE_CONFIG mailbox. When the
7187 * command completes the provided mgmt callback function is
7190 * @param hw Hardware context.
7191 * @param pci_func PCI function to query for current protocol.
7192 * @param cb Callback function to be called when the command completes.
7193 * @param ul_arg An argument that is passed to the callback function.
7196 * - OCS_HW_RTN_SUCCESS on success.
7197 * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
7198 * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
7200 * - OCS_HW_RTN_ERROR on any other error.
7203 ocs_hw_get_port_protocol(ocs_hw_t *hw, uint32_t pci_func,
7204 ocs_get_port_protocol_cb_t cb, void* ul_arg)
7207 ocs_hw_get_port_protocol_cb_arg_t *cb_arg;
7208 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
7210 /* Only supported on Skyhawk */
7211 if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) {
7212 return OCS_HW_RTN_ERROR;
7215 /* mbxdata holds the header of the command */
7216 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7217 if (mbxdata == NULL) {
7218 ocs_log_err(hw->os, "failed to malloc mbox\n");
7219 return OCS_HW_RTN_NO_MEMORY;
7222 /* cb_arg holds the data that will be passed to the callback on completion */
7223 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_port_protocol_cb_arg_t), OCS_M_NOWAIT);
7224 if (cb_arg == NULL) {
7225 ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7226 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7227 return OCS_HW_RTN_NO_MEMORY;
7231 cb_arg->arg = ul_arg;
7232 cb_arg->pci_func = pci_func;
7234 /* dma_mem holds the non-embedded portion */
7235 if (ocs_dma_alloc(hw->os, &cb_arg->payload, 4096, 4)) {
7236 ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
7237 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7238 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_port_protocol_cb_arg_t));
7239 return OCS_HW_RTN_NO_MEMORY;
7242 if (sli_cmd_common_get_profile_config(&hw->sli, mbxdata, SLI4_BMBX_SIZE, &cb_arg->payload)) {
7243 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_port_protocol_cb, cb_arg);
7246 if (rc != OCS_HW_RTN_SUCCESS) {
7247 ocs_log_test(hw->os, "GET_PROFILE_CONFIG failed\n");
7248 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7249 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t));
7250 ocs_dma_free(hw->os, &cb_arg->payload);
7257 typedef struct ocs_hw_set_port_protocol_cb_arg_s {
7258 ocs_set_port_protocol_cb_t cb;
7261 uint32_t new_protocol;
7263 } ocs_hw_set_port_protocol_cb_arg_t;
7266 * @brief Called for the completion of set_port_profile for a
7270 * This is the second of two callbacks for the set_port_protocol
7271 * function. The set operation is a read-modify-write. This
7272 * callback is called when the write (SET_PROFILE_CONFIG)
7275 * @param hw Hardware context.
7276 * @param status The status from the MQE.
7277 * @param mqe Pointer to mailbox command buffer.
7278 * @param arg Pointer to a callback argument.
7280 * @return 0 on success, non-zero otherwise
7283 ocs_hw_set_port_protocol_cb2(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
7285 ocs_hw_set_port_protocol_cb_arg_t *cb_arg = arg;
7288 cb_arg->cb( status, cb_arg->arg);
7291 ocs_dma_free(hw->os, &(cb_arg->payload));
7292 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7293 ocs_free(hw->os, arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t));
7299 * @brief Called for the completion of set_port_profile for a
7303 * This is the first of two callbacks for the set_port_protocol
7304 * function. The set operation is a read-modify-write. This
7305 * callback is called when the read completes
7306 * (GET_PROFILE_CONFG). It will updated the resource
7307 * descriptors, then queue the write (SET_PROFILE_CONFIG).
7309 * On entry there are three memory areas that were allocated by
7310 * ocs_hw_set_port_protocol. If a failure is detected in this
7311 * function those need to be freed. If this function succeeds
7312 * it allocates three more areas.
7314 * @param hw Hardware context.
7315 * @param status The status from the MQE
7316 * @param mqe Pointer to mailbox command buffer.
7317 * @param arg Pointer to a callback argument.
7319 * @return Returns 0 on success, or a non-zero value otherwise.
7322 ocs_hw_set_port_protocol_cb1(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
7324 ocs_hw_set_port_protocol_cb_arg_t *cb_arg = arg;
7325 ocs_dma_t *payload = &(cb_arg->payload);
7326 sli4_res_common_get_profile_config_t* response = (sli4_res_common_get_profile_config_t*) payload->virt;
7327 int num_descriptors;
7328 sli4_resource_descriptor_v1_t *desc_p;
7329 sli4_pcie_resource_descriptor_v1_t *pcie_desc_p;
7331 ocs_hw_set_port_protocol_cb_arg_t *new_cb_arg;
7332 ocs_hw_port_protocol_e new_protocol;
7334 sli4_isap_resouce_descriptor_v1_t *isap_desc_p;
7336 int pci_descriptor_count;
7337 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
7338 int num_fcoe_ports = 0;
7339 int num_iscsi_ports = 0;
7341 new_protocol = (ocs_hw_port_protocol_e)cb_arg->new_protocol;
7343 num_descriptors = response->desc_count;
7345 /* Count PCI descriptors */
7346 pci_descriptor_count = 0;
7347 desc_p = (sli4_resource_descriptor_v1_t *)response->desc;
7348 for (i=0; i<num_descriptors; i++) {
7349 if (desc_p->descriptor_type == SLI4_RESOURCE_DESCRIPTOR_TYPE_PCIE) {
7350 ++pci_descriptor_count;
7352 desc_p = (sli4_resource_descriptor_v1_t *) ((uint8_t *)desc_p + desc_p->descriptor_length);
7355 /* mbxdata holds the header of the command */
7356 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7357 if (mbxdata == NULL) {
7358 ocs_log_err(hw->os, "failed to malloc mbox\n");
7359 return OCS_HW_RTN_NO_MEMORY;
7362 /* cb_arg holds the data that will be passed to the callback on completion */
7363 new_cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_port_protocol_cb_arg_t), OCS_M_NOWAIT);
7364 if (new_cb_arg == NULL) {
7365 ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7366 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7367 return OCS_HW_RTN_NO_MEMORY;
7370 new_cb_arg->cb = cb_arg->cb;
7371 new_cb_arg->arg = cb_arg->arg;
7373 /* Allocate memory for the descriptors we're going to send. This is
7374 * one for each PCI descriptor plus one ISAP descriptor. */
7375 if (ocs_dma_alloc(hw->os, &new_cb_arg->payload, sizeof(sli4_req_common_set_profile_config_t) +
7376 (pci_descriptor_count * sizeof(sli4_pcie_resource_descriptor_v1_t)) +
7377 sizeof(sli4_isap_resouce_descriptor_v1_t), 4)) {
7378 ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
7379 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7380 ocs_free(hw->os, new_cb_arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t));
7381 return OCS_HW_RTN_NO_MEMORY;
7384 sli_cmd_common_set_profile_config(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
7385 &new_cb_arg->payload,
7386 0, pci_descriptor_count+1, 1);
7388 /* Point dst to the first descriptor entry in the SET_PROFILE_CONFIG command */
7389 dst = (uint8_t *)&(((sli4_req_common_set_profile_config_t *) new_cb_arg->payload.virt)->desc);
7391 /* Loop over all descriptors. If the descriptor is a PCIe descriptor, copy it
7392 * to the SET_PROFILE_CONFIG command to be written back. If it's the descriptor
7393 * that we're trying to change also set its pf_type.
7395 desc_p = (sli4_resource_descriptor_v1_t *)response->desc;
7396 for (i=0; i<num_descriptors; i++) {
7397 if (desc_p->descriptor_type == SLI4_RESOURCE_DESCRIPTOR_TYPE_PCIE) {
7398 pcie_desc_p = (sli4_pcie_resource_descriptor_v1_t*) desc_p;
7399 if (pcie_desc_p->pf_number == cb_arg->pci_func) {
7400 /* This is the PCIe descriptor for this OCS instance.
7401 * Update it with the new pf_type */
7402 switch(new_protocol) {
7403 case OCS_HW_PORT_PROTOCOL_FC:
7404 pcie_desc_p->pf_type = SLI4_PROTOCOL_FC;
7406 case OCS_HW_PORT_PROTOCOL_FCOE:
7407 pcie_desc_p->pf_type = SLI4_PROTOCOL_FCOE;
7409 case OCS_HW_PORT_PROTOCOL_ISCSI:
7410 pcie_desc_p->pf_type = SLI4_PROTOCOL_ISCSI;
7413 pcie_desc_p->pf_type = SLI4_PROTOCOL_DEFAULT;
7418 if (pcie_desc_p->pf_type == SLI4_PROTOCOL_FCOE) {
7421 if (pcie_desc_p->pf_type == SLI4_PROTOCOL_ISCSI) {
7424 ocs_memcpy(dst, pcie_desc_p, sizeof(sli4_pcie_resource_descriptor_v1_t));
7425 dst += sizeof(sli4_pcie_resource_descriptor_v1_t);
7428 desc_p = (sli4_resource_descriptor_v1_t *) ((uint8_t *)desc_p + desc_p->descriptor_length);
7431 /* Create an ISAP resource descriptor */
7432 isap_desc_p = (sli4_isap_resouce_descriptor_v1_t*)dst;
7433 isap_desc_p->descriptor_type = SLI4_RESOURCE_DESCRIPTOR_TYPE_ISAP;
7434 isap_desc_p->descriptor_length = sizeof(sli4_isap_resouce_descriptor_v1_t);
7435 if (num_iscsi_ports > 0) {
7436 isap_desc_p->iscsi_tgt = 1;
7437 isap_desc_p->iscsi_ini = 1;
7438 isap_desc_p->iscsi_dif = 1;
7440 if (num_fcoe_ports > 0) {
7441 isap_desc_p->fcoe_tgt = 1;
7442 isap_desc_p->fcoe_ini = 1;
7443 isap_desc_p->fcoe_dif = 1;
7446 /* At this point we're done with the memory allocated by ocs_port_set_protocol */
7447 ocs_dma_free(hw->os, &cb_arg->payload);
7448 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7449 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t));
7451 /* Send a SET_PROFILE_CONFIG mailbox command with the new descriptors */
7452 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_port_protocol_cb2, new_cb_arg);
7454 ocs_log_err(hw->os, "Error posting COMMON_SET_PROFILE_CONFIG\n");
7455 /* Call the upper level callback to report a failure */
7456 if (new_cb_arg->cb) {
7457 new_cb_arg->cb( rc, new_cb_arg->arg);
7460 /* Free the memory allocated by this function */
7461 ocs_dma_free(hw->os, &new_cb_arg->payload);
7462 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7463 ocs_free(hw->os, new_cb_arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t));
7471 * @brief Set the port protocol.
7473 * Setting the port protocol is a read-modify-write operation.
7474 * This function submits a GET_PROFILE_CONFIG command to read
7475 * the current settings. The callback function will modify the
7476 * settings and issue the write.
7478 * On successful completion this function will have allocated
7479 * two regular memory areas and one dma area which will need to
7480 * get freed later in the callbacks.
7482 * @param hw Hardware context.
7483 * @param new_protocol New protocol to use.
7484 * @param pci_func PCI function to configure.
7485 * @param cb Callback function to be called when the command completes.
7486 * @param ul_arg An argument that is passed to the callback function.
7489 * - OCS_HW_RTN_SUCCESS on success.
7490 * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
7491 * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
7493 * - OCS_HW_RTN_ERROR on any other error.
7496 ocs_hw_set_port_protocol(ocs_hw_t *hw, ocs_hw_port_protocol_e new_protocol,
7497 uint32_t pci_func, ocs_set_port_protocol_cb_t cb, void *ul_arg)
7500 ocs_hw_set_port_protocol_cb_arg_t *cb_arg;
7501 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
7503 /* Only supported on Skyhawk */
7504 if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) {
7505 return OCS_HW_RTN_ERROR;
7508 /* mbxdata holds the header of the command */
7509 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7510 if (mbxdata == NULL) {
7511 ocs_log_err(hw->os, "failed to malloc mbox\n");
7512 return OCS_HW_RTN_NO_MEMORY;
7515 /* cb_arg holds the data that will be passed to the callback on completion */
7516 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_port_protocol_cb_arg_t), OCS_M_NOWAIT);
7517 if (cb_arg == NULL) {
7518 ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7519 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7520 return OCS_HW_RTN_NO_MEMORY;
7524 cb_arg->arg = ul_arg;
7525 cb_arg->new_protocol = new_protocol;
7526 cb_arg->pci_func = pci_func;
7528 /* dma_mem holds the non-embedded portion */
7529 if (ocs_dma_alloc(hw->os, &cb_arg->payload, 4096, 4)) {
7530 ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
7531 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7532 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_port_protocol_cb_arg_t));
7533 return OCS_HW_RTN_NO_MEMORY;
7536 if (sli_cmd_common_get_profile_config(&hw->sli, mbxdata, SLI4_BMBX_SIZE, &cb_arg->payload)) {
7537 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_port_protocol_cb1, cb_arg);
7540 if (rc != OCS_HW_RTN_SUCCESS) {
7541 ocs_log_test(hw->os, "GET_PROFILE_CONFIG failed\n");
7542 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7543 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t));
7544 ocs_dma_free(hw->os, &cb_arg->payload);
7550 typedef struct ocs_hw_get_profile_list_cb_arg_s {
7551 ocs_get_profile_list_cb_t cb;
7554 } ocs_hw_get_profile_list_cb_arg_t;
7557 * @brief Called for the completion of get_profile_list for a
7560 * This function is called when the COMMMON_GET_PROFILE_LIST
7561 * mailbox completes. The response will be in
7562 * ctx->non_embedded_mem.virt. This function parses the
7563 * response and creates a ocs_hw_profile_list, then calls the
7564 * mgmt_cb callback function and passes that list to it.
7566 * @param hw Hardware context.
7567 * @param status The status from the MQE
7568 * @param mqe Pointer to mailbox command buffer.
7569 * @param arg Pointer to a callback argument.
7571 * @return Returns 0 on success, or a non-zero value on failure.
7574 ocs_hw_get_profile_list_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
7576 ocs_hw_profile_list_t *list;
7577 ocs_hw_get_profile_list_cb_arg_t *cb_arg = arg;
7578 ocs_dma_t *payload = &(cb_arg->payload);
7579 sli4_res_common_get_profile_list_t *response = (sli4_res_common_get_profile_list_t *)payload->virt;
7581 int num_descriptors;
7583 list = ocs_malloc(hw->os, sizeof(ocs_hw_profile_list_t), OCS_M_ZERO);
7584 list->num_descriptors = response->profile_descriptor_count;
7586 num_descriptors = list->num_descriptors;
7587 if (num_descriptors > OCS_HW_MAX_PROFILES) {
7588 num_descriptors = OCS_HW_MAX_PROFILES;
7591 for (i=0; i<num_descriptors; i++) {
7592 list->descriptors[i].profile_id = response->profile_descriptor[i].profile_id;
7593 list->descriptors[i].profile_index = response->profile_descriptor[i].profile_index;
7594 ocs_strcpy(list->descriptors[i].profile_description, (char *)response->profile_descriptor[i].profile_description);
7598 cb_arg->cb(status, list, cb_arg->arg);
7600 ocs_free(hw->os, list, sizeof(*list));
7603 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7604 ocs_dma_free(hw->os, &cb_arg->payload);
7605 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_profile_list_cb_arg_t));
7612 * @brief Get a list of available profiles.
7614 * Issues a SLI-4 COMMON_GET_PROFILE_LIST mailbox. When the
7615 * command completes the provided mgmt callback function is
7618 * @param hw Hardware context.
7619 * @param cb Callback function to be called when the
7620 * command completes.
7621 * @param ul_arg An argument that is passed to the callback
7625 * - OCS_HW_RTN_SUCCESS on success.
7626 * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
7627 * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
7629 * - OCS_HW_RTN_ERROR on any other error.
7632 ocs_hw_get_profile_list(ocs_hw_t *hw, ocs_get_profile_list_cb_t cb, void* ul_arg)
7635 ocs_hw_get_profile_list_cb_arg_t *cb_arg;
7636 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
7638 /* Only supported on Skyhawk */
7639 if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) {
7640 return OCS_HW_RTN_ERROR;
7643 /* mbxdata holds the header of the command */
7644 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7645 if (mbxdata == NULL) {
7646 ocs_log_err(hw->os, "failed to malloc mbox\n");
7647 return OCS_HW_RTN_NO_MEMORY;
7650 /* cb_arg holds the data that will be passed to the callback on completion */
7651 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_profile_list_cb_arg_t), OCS_M_NOWAIT);
7652 if (cb_arg == NULL) {
7653 ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7654 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7655 return OCS_HW_RTN_NO_MEMORY;
7659 cb_arg->arg = ul_arg;
7661 /* dma_mem holds the non-embedded portion */
7662 if (ocs_dma_alloc(hw->os, &cb_arg->payload, sizeof(sli4_res_common_get_profile_list_t), 4)) {
7663 ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
7664 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7665 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_profile_list_cb_arg_t));
7666 return OCS_HW_RTN_NO_MEMORY;
7669 if (sli_cmd_common_get_profile_list(&hw->sli, mbxdata, SLI4_BMBX_SIZE, 0, &cb_arg->payload)) {
7670 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_profile_list_cb, cb_arg);
7673 if (rc != OCS_HW_RTN_SUCCESS) {
7674 ocs_log_test(hw->os, "GET_PROFILE_LIST failed\n");
7675 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7676 ocs_dma_free(hw->os, &cb_arg->payload);
7677 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_profile_list_cb_arg_t));
7683 typedef struct ocs_hw_get_active_profile_cb_arg_s {
7684 ocs_get_active_profile_cb_t cb;
7686 } ocs_hw_get_active_profile_cb_arg_t;
7689 * @brief Called for the completion of get_active_profile for a
7692 * @param hw Hardware context.
7693 * @param status The status from the MQE
7694 * @param mqe Pointer to mailbox command buffer.
7695 * @param arg Pointer to a callback argument.
7697 * @return Returns 0 on success, or a non-zero value on failure.
7700 ocs_hw_get_active_profile_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
7702 ocs_hw_get_active_profile_cb_arg_t *cb_arg = arg;
7703 sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe;
7704 sli4_res_common_get_active_profile_t* response = (sli4_res_common_get_active_profile_t*) mbox_rsp->payload.embed;
7705 uint32_t active_profile;
7707 active_profile = response->active_profile_id;
7710 cb_arg->cb(status, active_profile, cb_arg->arg);
7713 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7714 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_active_profile_cb_arg_t));
7721 * @brief Get the currently active profile.
7723 * Issues a SLI-4 COMMON_GET_ACTIVE_PROFILE mailbox. When the
7724 * command completes the provided mgmt callback function is
7727 * @param hw Hardware context.
7728 * @param cb Callback function to be called when the
7729 * command completes.
7730 * @param ul_arg An argument that is passed to the callback
7734 * - OCS_HW_RTN_SUCCESS on success.
7735 * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
7736 * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
7738 * - OCS_HW_RTN_ERROR on any other error.
7741 ocs_hw_get_active_profile(ocs_hw_t *hw, ocs_get_active_profile_cb_t cb, void* ul_arg)
7744 ocs_hw_get_active_profile_cb_arg_t *cb_arg;
7745 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
7747 /* Only supported on Skyhawk */
7748 if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) {
7749 return OCS_HW_RTN_ERROR;
7752 /* mbxdata holds the header of the command */
7753 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7754 if (mbxdata == NULL) {
7755 ocs_log_err(hw->os, "failed to malloc mbox\n");
7756 return OCS_HW_RTN_NO_MEMORY;
7759 /* cb_arg holds the data that will be passed to the callback on completion */
7760 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_active_profile_cb_arg_t), OCS_M_NOWAIT);
7761 if (cb_arg == NULL) {
7762 ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7763 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7764 return OCS_HW_RTN_NO_MEMORY;
7768 cb_arg->arg = ul_arg;
7770 if (sli_cmd_common_get_active_profile(&hw->sli, mbxdata, SLI4_BMBX_SIZE)) {
7771 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_active_profile_cb, cb_arg);
7774 if (rc != OCS_HW_RTN_SUCCESS) {
7775 ocs_log_test(hw->os, "GET_ACTIVE_PROFILE failed\n");
7776 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7777 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_active_profile_cb_arg_t));
7783 typedef struct ocs_hw_get_nvparms_cb_arg_s {
7784 ocs_get_nvparms_cb_t cb;
7786 } ocs_hw_get_nvparms_cb_arg_t;
7789 * @brief Called for the completion of get_nvparms for a
7792 * @param hw Hardware context.
7793 * @param status The status from the MQE.
7794 * @param mqe Pointer to mailbox command buffer.
7795 * @param arg Pointer to a callback argument.
7797 * @return 0 on success, non-zero otherwise
7800 ocs_hw_get_nvparms_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
7802 ocs_hw_get_nvparms_cb_arg_t *cb_arg = arg;
7803 sli4_cmd_read_nvparms_t* mbox_rsp = (sli4_cmd_read_nvparms_t*) mqe;
7806 cb_arg->cb(status, mbox_rsp->wwpn, mbox_rsp->wwnn, mbox_rsp->hard_alpa,
7807 mbox_rsp->preferred_d_id, cb_arg->arg);
7810 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7811 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_nvparms_cb_arg_t));
7818 * @brief Read non-volatile parms.
7820 * Issues a SLI-4 READ_NVPARMS mailbox. When the
7821 * command completes the provided mgmt callback function is
7824 * @param hw Hardware context.
7825 * @param cb Callback function to be called when the
7826 * command completes.
7827 * @param ul_arg An argument that is passed to the callback
7831 * - OCS_HW_RTN_SUCCESS on success.
7832 * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
7833 * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
7835 * - OCS_HW_RTN_ERROR on any other error.
7838 ocs_hw_get_nvparms(ocs_hw_t *hw, ocs_get_nvparms_cb_t cb, void* ul_arg)
7841 ocs_hw_get_nvparms_cb_arg_t *cb_arg;
7842 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
7844 /* mbxdata holds the header of the command */
7845 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7846 if (mbxdata == NULL) {
7847 ocs_log_err(hw->os, "failed to malloc mbox\n");
7848 return OCS_HW_RTN_NO_MEMORY;
7851 /* cb_arg holds the data that will be passed to the callback on completion */
7852 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_nvparms_cb_arg_t), OCS_M_NOWAIT);
7853 if (cb_arg == NULL) {
7854 ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7855 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7856 return OCS_HW_RTN_NO_MEMORY;
7860 cb_arg->arg = ul_arg;
7862 if (sli_cmd_read_nvparms(&hw->sli, mbxdata, SLI4_BMBX_SIZE)) {
7863 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_nvparms_cb, cb_arg);
7866 if (rc != OCS_HW_RTN_SUCCESS) {
7867 ocs_log_test(hw->os, "READ_NVPARMS failed\n");
7868 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7869 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_nvparms_cb_arg_t));
7875 typedef struct ocs_hw_set_nvparms_cb_arg_s {
7876 ocs_set_nvparms_cb_t cb;
7878 } ocs_hw_set_nvparms_cb_arg_t;
7881 * @brief Called for the completion of set_nvparms for a
7884 * @param hw Hardware context.
7885 * @param status The status from the MQE.
7886 * @param mqe Pointer to mailbox command buffer.
7887 * @param arg Pointer to a callback argument.
7889 * @return Returns 0 on success, or a non-zero value on failure.
7892 ocs_hw_set_nvparms_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
7894 ocs_hw_set_nvparms_cb_arg_t *cb_arg = arg;
7897 cb_arg->cb(status, cb_arg->arg);
7900 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7901 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_nvparms_cb_arg_t));
7908 * @brief Write non-volatile parms.
7910 * Issues a SLI-4 WRITE_NVPARMS mailbox. When the
7911 * command completes the provided mgmt callback function is
7914 * @param hw Hardware context.
7915 * @param cb Callback function to be called when the
7916 * command completes.
7917 * @param wwpn Port's WWPN in big-endian order, or NULL to use default.
7918 * @param wwnn Port's WWNN in big-endian order, or NULL to use default.
7919 * @param hard_alpa A hard AL_PA address setting used during loop
7920 * initialization. If no hard AL_PA is required, set to 0.
7921 * @param preferred_d_id A preferred D_ID address setting
7922 * that may be overridden with the CONFIG_LINK mailbox command.
7923 * If there is no preference, set to 0.
7924 * @param ul_arg An argument that is passed to the callback
7928 * - OCS_HW_RTN_SUCCESS on success.
7929 * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
7930 * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
7932 * - OCS_HW_RTN_ERROR on any other error.
7935 ocs_hw_set_nvparms(ocs_hw_t *hw, ocs_set_nvparms_cb_t cb, uint8_t *wwpn,
7936 uint8_t *wwnn, uint8_t hard_alpa, uint32_t preferred_d_id, void* ul_arg)
7939 ocs_hw_set_nvparms_cb_arg_t *cb_arg;
7940 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
7942 /* mbxdata holds the header of the command */
7943 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7944 if (mbxdata == NULL) {
7945 ocs_log_err(hw->os, "failed to malloc mbox\n");
7946 return OCS_HW_RTN_NO_MEMORY;
7949 /* cb_arg holds the data that will be passed to the callback on completion */
7950 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_nvparms_cb_arg_t), OCS_M_NOWAIT);
7951 if (cb_arg == NULL) {
7952 ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7953 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7954 return OCS_HW_RTN_NO_MEMORY;
7958 cb_arg->arg = ul_arg;
7960 if (sli_cmd_write_nvparms(&hw->sli, mbxdata, SLI4_BMBX_SIZE, wwpn, wwnn, hard_alpa, preferred_d_id)) {
7961 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_nvparms_cb, cb_arg);
7964 if (rc != OCS_HW_RTN_SUCCESS) {
7965 ocs_log_test(hw->os, "SET_NVPARMS failed\n");
7966 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7967 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_nvparms_cb_arg_t));
7974 * @brief Called to obtain the count for the specified type.
7976 * @param hw Hardware context.
7977 * @param io_count_type IO count type (inuse, free, wait_free).
7979 * @return Returns the number of IOs on the specified list type.
7982 ocs_hw_io_get_count(ocs_hw_t *hw, ocs_hw_io_count_type_e io_count_type)
7984 ocs_hw_io_t *io = NULL;
7987 ocs_lock(&hw->io_lock);
7989 switch (io_count_type) {
7990 case OCS_HW_IO_INUSE_COUNT :
7991 ocs_list_foreach(&hw->io_inuse, io) {
7995 case OCS_HW_IO_FREE_COUNT :
7996 ocs_list_foreach(&hw->io_free, io) {
8000 case OCS_HW_IO_WAIT_FREE_COUNT :
8001 ocs_list_foreach(&hw->io_wait_free, io) {
8005 case OCS_HW_IO_PORT_OWNED_COUNT:
8006 ocs_list_foreach(&hw->io_port_owned, io) {
8010 case OCS_HW_IO_N_TOTAL_IO_COUNT :
8011 count = hw->config.n_io;
8015 ocs_unlock(&hw->io_lock);
8021 * @brief Called to obtain the count of produced RQs.
8023 * @param hw Hardware context.
8025 * @return Returns the number of RQs produced.
8028 ocs_hw_get_rqes_produced_count(ocs_hw_t *hw)
8034 for (i = 0; i < hw->hw_rq_count; i++) {
8035 hw_rq_t *rq = hw->hw_rq[i];
8036 if (rq->rq_tracker != NULL) {
8037 for (j = 0; j < rq->entry_count; j++) {
8038 if (rq->rq_tracker[j] != NULL) {
8048 typedef struct ocs_hw_set_active_profile_cb_arg_s {
8049 ocs_set_active_profile_cb_t cb;
8051 } ocs_hw_set_active_profile_cb_arg_t;
8054 * @brief Called for the completion of set_active_profile for a
8057 * @param hw Hardware context.
8058 * @param status The status from the MQE
8059 * @param mqe Pointer to mailbox command buffer.
8060 * @param arg Pointer to a callback argument.
8062 * @return Returns 0 on success, or a non-zero value on failure.
8065 ocs_hw_set_active_profile_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
8067 ocs_hw_set_active_profile_cb_arg_t *cb_arg = arg;
8070 cb_arg->cb(status, cb_arg->arg);
8073 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
8074 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_active_profile_cb_arg_t));
8081 * @brief Set the currently active profile.
8083 * Issues a SLI4 COMMON_GET_ACTIVE_PROFILE mailbox. When the
8084 * command completes the provided mgmt callback function is
8087 * @param hw Hardware context.
8088 * @param profile_id Profile ID to activate.
8089 * @param cb Callback function to be called when the command completes.
8090 * @param ul_arg An argument that is passed to the callback function.
8093 * - OCS_HW_RTN_SUCCESS on success.
8094 * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
8095 * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
8097 * - OCS_HW_RTN_ERROR on any other error.
8100 ocs_hw_set_active_profile(ocs_hw_t *hw, ocs_set_active_profile_cb_t cb, uint32_t profile_id, void* ul_arg)
8103 ocs_hw_set_active_profile_cb_arg_t *cb_arg;
8104 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
8106 /* Only supported on Skyhawk */
8107 if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) {
8108 return OCS_HW_RTN_ERROR;
8111 /* mbxdata holds the header of the command */
8112 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
8113 if (mbxdata == NULL) {
8114 ocs_log_err(hw->os, "failed to malloc mbox\n");
8115 return OCS_HW_RTN_NO_MEMORY;
8118 /* cb_arg holds the data that will be passed to the callback on completion */
8119 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_active_profile_cb_arg_t), OCS_M_NOWAIT);
8120 if (cb_arg == NULL) {
8121 ocs_log_err(hw->os, "failed to malloc cb_arg\n");
8122 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
8123 return OCS_HW_RTN_NO_MEMORY;
8127 cb_arg->arg = ul_arg;
8129 if (sli_cmd_common_set_active_profile(&hw->sli, mbxdata, SLI4_BMBX_SIZE, 0, profile_id)) {
8130 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_active_profile_cb, cb_arg);
8133 if (rc != OCS_HW_RTN_SUCCESS) {
8134 ocs_log_test(hw->os, "SET_ACTIVE_PROFILE failed\n");
8135 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
8136 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_active_profile_cb_arg_t));
8147 * @brief Update the queue hash with the ID and index.
8149 * @param hash Pointer to hash table.
8150 * @param id ID that was created.
8151 * @param index The index into the hash object.
8154 ocs_hw_queue_hash_add(ocs_queue_hash_t *hash, uint16_t id, uint16_t index)
8156 uint32_t hash_index = id & (OCS_HW_Q_HASH_SIZE - 1);
8159 * Since the hash is always bigger than the number of queues, then we
8160 * never have to worry about an infinite loop.
8162 while(hash[hash_index].in_use) {
8163 hash_index = (hash_index + 1) & (OCS_HW_Q_HASH_SIZE - 1);
8166 /* not used, claim the entry */
8167 hash[hash_index].id = id;
8168 hash[hash_index].in_use = 1;
8169 hash[hash_index].index = index;
8173 * @brief Find index given queue ID.
8175 * @param hash Pointer to hash table.
8176 * @param id ID to find.
8178 * @return Returns the index into the HW cq array or -1 if not found.
8181 ocs_hw_queue_hash_find(ocs_queue_hash_t *hash, uint16_t id)
8184 int32_t index = id & (OCS_HW_Q_HASH_SIZE - 1);
8187 * Since the hash is always bigger than the maximum number of Qs, then we
8188 * never have to worry about an infinite loop. We will always find an
8192 if (hash[index].in_use &&
8193 hash[index].id == id) {
8194 rc = hash[index].index;
8196 index = (index + 1) & (OCS_HW_Q_HASH_SIZE - 1);
8198 } while(rc == -1 && hash[index].in_use);
8204 ocs_hw_domain_add(ocs_hw_t *hw, ocs_domain_t *domain)
8206 int32_t rc = OCS_HW_RTN_ERROR;
8207 uint16_t fcfi = UINT16_MAX;
8209 if ((hw == NULL) || (domain == NULL)) {
8210 ocs_log_err(NULL, "bad parameter hw=%p domain=%p\n",
8212 return OCS_HW_RTN_ERROR;
8215 fcfi = domain->fcf_indicator;
8217 if (fcfi < SLI4_MAX_FCFI) {
8218 uint16_t fcf_index = UINT16_MAX;
8220 ocs_log_debug(hw->os, "adding domain %p @ %#x\n",
8222 hw->domains[fcfi] = domain;
8224 /* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */
8225 if (hw->workaround.override_fcfi) {
8226 if (hw->first_domain_idx < 0) {
8227 hw->first_domain_idx = fcfi;
8231 fcf_index = domain->fcf;
8233 if (fcf_index < SLI4_MAX_FCF_INDEX) {
8234 ocs_log_debug(hw->os, "adding map of FCF index %d to FCFI %d\n",
8236 hw->fcf_index_fcfi[fcf_index] = fcfi;
8237 rc = OCS_HW_RTN_SUCCESS;
8239 ocs_log_test(hw->os, "FCF index %d out of range (max %d)\n",
8240 fcf_index, SLI4_MAX_FCF_INDEX);
8241 hw->domains[fcfi] = NULL;
8244 ocs_log_test(hw->os, "FCFI %#x out of range (max %#x)\n",
8245 fcfi, SLI4_MAX_FCFI);
8252 ocs_hw_domain_del(ocs_hw_t *hw, ocs_domain_t *domain)
8254 int32_t rc = OCS_HW_RTN_ERROR;
8255 uint16_t fcfi = UINT16_MAX;
8257 if ((hw == NULL) || (domain == NULL)) {
8258 ocs_log_err(NULL, "bad parameter hw=%p domain=%p\n",
8260 return OCS_HW_RTN_ERROR;
8263 fcfi = domain->fcf_indicator;
8265 if (fcfi < SLI4_MAX_FCFI) {
8266 uint16_t fcf_index = UINT16_MAX;
8268 ocs_log_debug(hw->os, "deleting domain %p @ %#x\n",
8271 if (domain != hw->domains[fcfi]) {
8272 ocs_log_test(hw->os, "provided domain %p does not match stored domain %p\n",
8273 domain, hw->domains[fcfi]);
8274 return OCS_HW_RTN_ERROR;
8277 hw->domains[fcfi] = NULL;
8279 /* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */
8280 if (hw->workaround.override_fcfi) {
8281 if (hw->first_domain_idx == fcfi) {
8282 hw->first_domain_idx = -1;
8286 fcf_index = domain->fcf;
8288 if (fcf_index < SLI4_MAX_FCF_INDEX) {
8289 if (hw->fcf_index_fcfi[fcf_index] == fcfi) {
8290 hw->fcf_index_fcfi[fcf_index] = 0;
8291 rc = OCS_HW_RTN_SUCCESS;
8293 ocs_log_test(hw->os, "indexed FCFI %#x doesn't match provided %#x @ %d\n",
8294 hw->fcf_index_fcfi[fcf_index], fcfi, fcf_index);
8297 ocs_log_test(hw->os, "FCF index %d out of range (max %d)\n",
8298 fcf_index, SLI4_MAX_FCF_INDEX);
8301 ocs_log_test(hw->os, "FCFI %#x out of range (max %#x)\n",
8302 fcfi, SLI4_MAX_FCFI);
8309 ocs_hw_domain_get(ocs_hw_t *hw, uint16_t fcfi)
8313 ocs_log_err(NULL, "bad parameter hw=%p\n", hw);
8317 if (fcfi < SLI4_MAX_FCFI) {
8318 return hw->domains[fcfi];
8320 ocs_log_test(hw->os, "FCFI %#x out of range (max %#x)\n",
8321 fcfi, SLI4_MAX_FCFI);
8326 static ocs_domain_t *
8327 ocs_hw_domain_get_indexed(ocs_hw_t *hw, uint16_t fcf_index)
8331 ocs_log_err(NULL, "bad parameter hw=%p\n", hw);
8335 if (fcf_index < SLI4_MAX_FCF_INDEX) {
8336 return ocs_hw_domain_get(hw, hw->fcf_index_fcfi[fcf_index]);
8338 ocs_log_test(hw->os, "FCF index %d out of range (max %d)\n",
8339 fcf_index, SLI4_MAX_FCF_INDEX);
8345 * @brief Quaratine an IO by taking a reference count and adding it to the
8346 * quarantine list. When the IO is popped from the list then the
8347 * count is released and the IO MAY be freed depending on whether
8348 * it is still referenced by the IO.
8350 * @n @b Note: BZ 160124 - If this is a target write or an initiator read using
8351 * DIF, then we must add the XRI to a quarantine list until we receive
8352 * 4 more completions of this same type.
8354 * @param hw Hardware context.
8355 * @param wq Pointer to the WQ associated with the IO object to quarantine.
8356 * @param io Pointer to the io object to quarantine.
8359 ocs_hw_io_quarantine(ocs_hw_t *hw, hw_wq_t *wq, ocs_hw_io_t *io)
8361 ocs_quarantine_info_t *q_info = &wq->quarantine_info;
8363 ocs_hw_io_t *free_io = NULL;
8365 /* return if the QX bit was clear */
8366 if (!io->quarantine) {
8370 /* increment the IO refcount to prevent it from being freed before the quarantine is over */
8371 if (ocs_ref_get_unless_zero(&io->ref) == 0) {
8372 /* command no longer active */
8373 ocs_log_debug(hw ? hw->os : NULL,
8374 "io not active xri=0x%x tag=0x%x\n",
8375 io->indicator, io->reqtag);
8379 sli_queue_lock(wq->queue);
8380 index = q_info->quarantine_index;
8381 free_io = q_info->quarantine_ios[index];
8382 q_info->quarantine_ios[index] = io;
8383 q_info->quarantine_index = (index + 1) % OCS_HW_QUARANTINE_QUEUE_DEPTH;
8384 sli_queue_unlock(wq->queue);
8386 if (free_io != NULL) {
8387 ocs_ref_put(&free_io->ref); /* ocs_ref_get(): same function */
8392 * @brief Process entries on the given completion queue.
8394 * @param hw Hardware context.
8395 * @param cq Pointer to the HW completion queue object.
8400 ocs_hw_cq_process(ocs_hw_t *hw, hw_cq_t *cq)
8402 uint8_t cqe[sizeof(sli4_mcqe_t)];
8403 uint16_t rid = UINT16_MAX;
8404 sli4_qentry_e ctype; /* completion type */
8406 uint32_t n_processed = 0;
8410 tstart = ocs_msectime();
8412 while (!sli_queue_read(&hw->sli, cq->queue, cqe)) {
8413 status = sli_cq_parse(&hw->sli, cq->queue, cqe, &ctype, &rid);
8415 * The sign of status is significant. If status is:
8416 * == 0 : call completed correctly and the CQE indicated success
8417 * > 0 : call completed correctly and the CQE indicated an error
8418 * < 0 : call failed and no information is available about the CQE
8422 /* Notification that an entry was consumed, but not completed */
8430 case SLI_QENTRY_ASYNC:
8432 sli_cqe_async(&hw->sli, cqe);
8436 * Process MQ entry. Note there is no way to determine
8437 * the MQ_ID from the completion entry.
8440 ocs_hw_mq_process(hw, status, hw->mq);
8442 case SLI_QENTRY_OPT_WRITE_CMD:
8443 ocs_hw_rqpair_process_auto_xfr_rdy_cmd(hw, cq, cqe);
8445 case SLI_QENTRY_OPT_WRITE_DATA:
8446 ocs_hw_rqpair_process_auto_xfr_rdy_data(hw, cq, cqe);
8450 ocs_hw_wq_process(hw, cq, cqe, status, rid);
8452 case SLI_QENTRY_WQ_RELEASE: {
8453 uint32_t wq_id = rid;
8454 int32_t index = ocs_hw_queue_hash_find(hw->wq_hash, wq_id);
8456 if (unlikely(index < 0)) {
8457 ocs_log_err(hw->os, "unknown idx=%#x rid=%#x\n",
8462 hw_wq_t *wq = hw->hw_wq[index];
8464 /* Submit any HW IOs that are on the WQ pending list */
8465 hw_wq_submit_pending(wq, wq->wqec_set_count);
8472 ocs_hw_rqpair_process_rq(hw, cq, cqe);
8474 case SLI_QENTRY_XABT: {
8476 ocs_hw_xabt_process(hw, cq, cqe, rid);
8480 ocs_log_test(hw->os, "unhandled ctype=%#x rid=%#x\n", ctype, rid);
8485 if (n_processed == cq->queue->proc_limit) {
8489 if (cq->queue->n_posted >= (cq->queue->posted_limit)) {
8490 sli_queue_arm(&hw->sli, cq->queue, FALSE);
8494 sli_queue_arm(&hw->sli, cq->queue, TRUE);
8496 if (n_processed > cq->queue->max_num_processed) {
8497 cq->queue->max_num_processed = n_processed;
8499 telapsed = ocs_msectime() - tstart;
8500 if (telapsed > cq->queue->max_process_time) {
8501 cq->queue->max_process_time = telapsed;
8506 * @brief Process WQ completion queue entries.
8508 * @param hw Hardware context.
8509 * @param cq Pointer to the HW completion queue object.
8510 * @param cqe Pointer to WQ completion queue.
8511 * @param status Completion status.
8512 * @param rid Resource ID (IO tag).
8517 ocs_hw_wq_process(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe, int32_t status, uint16_t rid)
8519 hw_wq_callback_t *wqcb;
8521 ocs_queue_history_cqe(&hw->q_hist, SLI_QENTRY_WQ, (void *)cqe, ((sli4_fc_wcqe_t *)cqe)->status, cq->queue->id,
8522 ((cq->queue->index - 1) & (cq->queue->length - 1)));
8524 if(rid == OCS_HW_REQUE_XRI_REGTAG) {
8526 ocs_log_err(hw->os, "reque xri failed, status = %d \n", status);
8531 wqcb = ocs_hw_reqtag_get_instance(hw, rid);
8533 ocs_log_err(hw->os, "invalid request tag: x%x\n", rid);
8537 if (wqcb->callback == NULL) {
8538 ocs_log_err(hw->os, "wqcb callback is NULL\n");
8542 (*wqcb->callback)(wqcb->arg, cqe, status);
8546 * @brief Process WQ completions for IO requests
8548 * @param arg Generic callback argument
8549 * @param cqe Pointer to completion queue entry
8550 * @param status Completion status
8553 * @n @b Note: Regarding io->reqtag, the reqtag is assigned once when HW IOs are initialized
8554 * in ocs_hw_setup_io(), and don't need to be returned to the hw->wq_reqtag_pool.
8559 ocs_hw_wq_process_io(void *arg, uint8_t *cqe, int32_t status)
8561 ocs_hw_io_t *io = arg;
8562 ocs_hw_t *hw = io->hw;
8563 sli4_fc_wcqe_t *wcqe = (void *)cqe;
8566 uint8_t out_of_order_axr_cmd = 0;
8567 uint8_t out_of_order_axr_data = 0;
8568 uint8_t lock_taken = 0;
8569 #if defined(OCS_DISC_SPIN_DELAY)
8575 * For the primary IO, this will also be used for the
8576 * response. So it is important to only set/clear this
8577 * flag on the first data phase of the IO because
8578 * subsequent phases will be done on the secondary XRI.
8580 if (io->quarantine && io->quarantine_first_phase) {
8581 io->quarantine = (wcqe->qx == 1);
8582 ocs_hw_io_quarantine(hw, io->wq, io);
8584 io->quarantine_first_phase = FALSE;
8586 /* BZ 161832 - free secondary HW IO */
8587 if (io->sec_hio != NULL &&
8588 io->sec_hio->quarantine) {
8590 * If the quarantine flag is set on the
8591 * IO, then set it on the secondary IO
8592 * based on the quarantine XRI (QX) bit
8595 io->sec_hio->quarantine = (wcqe->qx == 1);
8596 /* use the primary io->wq because it is not set on the secondary IO. */
8597 ocs_hw_io_quarantine(hw, io->wq, io->sec_hio);
8600 ocs_hw_remove_io_timed_wqe(hw, io);
8602 /* clear xbusy flag if WCQE[XB] is clear */
8603 if (io->xbusy && wcqe->xb == 0) {
8607 /* get extended CQE status */
8609 case OCS_HW_BLS_ACC:
8610 case OCS_HW_BLS_ACC_SID:
8612 case OCS_HW_ELS_REQ:
8613 sli_fc_els_did(&hw->sli, cqe, &ext);
8614 len = sli_fc_response_length(&hw->sli, cqe);
8616 case OCS_HW_ELS_RSP:
8617 case OCS_HW_ELS_RSP_SID:
8618 case OCS_HW_FC_CT_RSP:
8621 len = sli_fc_response_length(&hw->sli, cqe);
8623 case OCS_HW_IO_TARGET_WRITE:
8624 len = sli_fc_io_length(&hw->sli, cqe);
8625 #if defined(OCS_DISC_SPIN_DELAY)
8626 if (ocs_get_property("disk_spin_delay", prop_buf, sizeof(prop_buf)) == 0) {
8627 delay = ocs_strtoul(prop_buf, 0, 0);
8632 case OCS_HW_IO_TARGET_READ:
8633 len = sli_fc_io_length(&hw->sli, cqe);
8635 * if_type == 2 seems to return 0 "total length placed" on
8636 * FCP_TSEND64_WQE completions. If this appears to happen,
8637 * use the CTIO data transfer length instead.
8639 if (hw->workaround.retain_tsend_io_length && !len && !status) {
8644 case OCS_HW_IO_TARGET_RSP:
8645 if(io->is_port_owned) {
8646 ocs_lock(&io->axr_lock);
8648 if(io->axr_buf->call_axr_cmd) {
8649 out_of_order_axr_cmd = 1;
8651 if(io->axr_buf->call_axr_data) {
8652 out_of_order_axr_data = 1;
8656 case OCS_HW_IO_INITIATOR_READ:
8657 len = sli_fc_io_length(&hw->sli, cqe);
8659 case OCS_HW_IO_INITIATOR_WRITE:
8660 len = sli_fc_io_length(&hw->sli, cqe);
8662 case OCS_HW_IO_INITIATOR_NODATA:
8664 case OCS_HW_IO_DNRX_REQUEUE:
8665 /* release the count for re-posting the buffer */
8666 //ocs_hw_io_free(hw, io);
8669 ocs_log_test(hw->os, "XXX unhandled io type %#x for XRI 0x%x\n",
8670 io->type, io->indicator);
8674 ext = sli_fc_ext_status(&hw->sli, cqe);
8675 /* Emulate IAAB=0 for initiator WQEs only; i.e. automatically
8676 * abort exchange if an error occurred and exchange is still busy.
8678 if (hw->config.i_only_aab &&
8679 (ocs_hw_iotype_is_originator(io->type)) &&
8680 (ocs_hw_wcqe_abort_needed(status, ext, wcqe->xb))) {
8683 ocs_log_debug(hw->os, "aborting xri=%#x tag=%#x\n",
8684 io->indicator, io->reqtag);
8686 * Because the initiator will not issue another IO phase, then it is OK to to issue the
8687 * callback on the abort completion, but for consistency with the target, wait for the
8688 * XRI_ABORTED CQE to issue the IO callback.
8690 rc = ocs_hw_io_abort(hw, io, TRUE, NULL, NULL);
8692 if (rc == OCS_HW_RTN_SUCCESS) {
8693 /* latch status to return after abort is complete */
8694 io->status_saved = 1;
8695 io->saved_status = status;
8696 io->saved_ext = ext;
8697 io->saved_len = len;
8698 goto exit_ocs_hw_wq_process_io;
8699 } else if (rc == OCS_HW_RTN_IO_ABORT_IN_PROGRESS) {
8701 * Already being aborted by someone else (ABTS
8702 * perhaps). Just fall through and return original
8705 ocs_log_debug(hw->os, "abort in progress xri=%#x tag=%#x\n",
8706 io->indicator, io->reqtag);
8709 /* Failed to abort for some other reason, log error */
8710 ocs_log_test(hw->os, "Failed to abort xri=%#x tag=%#x rc=%d\n",
8711 io->indicator, io->reqtag, rc);
8716 * If we're not an originator IO, and XB is set, then issue abort for the IO from within the HW
8718 if ( (! ocs_hw_iotype_is_originator(io->type)) && wcqe->xb) {
8721 ocs_log_debug(hw->os, "aborting xri=%#x tag=%#x\n", io->indicator, io->reqtag);
8724 * Because targets may send a response when the IO completes using the same XRI, we must
8725 * wait for the XRI_ABORTED CQE to issue the IO callback
8727 rc = ocs_hw_io_abort(hw, io, FALSE, NULL, NULL);
8728 if (rc == OCS_HW_RTN_SUCCESS) {
8729 /* latch status to return after abort is complete */
8730 io->status_saved = 1;
8731 io->saved_status = status;
8732 io->saved_ext = ext;
8733 io->saved_len = len;
8734 goto exit_ocs_hw_wq_process_io;
8735 } else if (rc == OCS_HW_RTN_IO_ABORT_IN_PROGRESS) {
8737 * Already being aborted by someone else (ABTS
8738 * perhaps). Just fall through and return original
8741 ocs_log_debug(hw->os, "abort in progress xri=%#x tag=%#x\n",
8742 io->indicator, io->reqtag);
8745 /* Failed to abort for some other reason, log error */
8746 ocs_log_test(hw->os, "Failed to abort xri=%#x tag=%#x rc=%d\n",
8747 io->indicator, io->reqtag, rc);
8751 /* BZ 161832 - free secondary HW IO */
8752 if (io->sec_hio != NULL) {
8753 ocs_hw_io_free(hw, io->sec_hio);
8757 if (io->done != NULL) {
8758 ocs_hw_done_t done = io->done;
8759 void *arg = io->arg;
8763 if (io->status_saved) {
8764 /* use latched status if exists */
8765 status = io->saved_status;
8766 len = io->saved_len;
8767 ext = io->saved_ext;
8768 io->status_saved = 0;
8771 /* Restore default SGL */
8772 ocs_hw_io_restore_sgl(hw, io);
8773 done(io, io->rnode, len, status, ext, arg);
8776 if(out_of_order_axr_cmd) {
8777 /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */
8778 if (hw->config.bounce) {
8779 fc_header_t *hdr = io->axr_buf->cmd_seq->header->dma.virt;
8780 uint32_t s_id = fc_be24toh(hdr->s_id);
8781 uint32_t d_id = fc_be24toh(hdr->d_id);
8782 uint32_t ox_id = ocs_be16toh(hdr->ox_id);
8783 if (hw->callback.bounce != NULL) {
8784 (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, io->axr_buf->cmd_seq, s_id, d_id, ox_id);
8787 hw->callback.unsolicited(hw->args.unsolicited, io->axr_buf->cmd_seq);
8790 if(out_of_order_axr_data) {
8791 /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */
8792 if (hw->config.bounce) {
8793 fc_header_t *hdr = io->axr_buf->seq.header->dma.virt;
8794 uint32_t s_id = fc_be24toh(hdr->s_id);
8795 uint32_t d_id = fc_be24toh(hdr->d_id);
8796 uint32_t ox_id = ocs_be16toh(hdr->ox_id);
8797 if (hw->callback.bounce != NULL) {
8798 (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, &io->axr_buf->seq, s_id, d_id, ox_id);
8801 hw->callback.unsolicited(hw->args.unsolicited, &io->axr_buf->seq);
8806 exit_ocs_hw_wq_process_io:
8808 ocs_unlock(&io->axr_lock);
8813 * @brief Process WQ completions for abort requests.
8815 * @param arg Generic callback argument.
8816 * @param cqe Pointer to completion queue entry.
8817 * @param status Completion status.
8822 ocs_hw_wq_process_abort(void *arg, uint8_t *cqe, int32_t status)
8824 ocs_hw_io_t *io = arg;
8825 ocs_hw_t *hw = io->hw;
8828 hw_wq_callback_t *wqcb;
8831 * For IOs that were aborted internally, we may need to issue the callback here depending
8832 * on whether a XRI_ABORTED CQE is expected ot not. If the status is Local Reject/No XRI, then
8833 * issue the callback now.
8835 ext = sli_fc_ext_status(&hw->sli, cqe);
8836 if (status == SLI4_FC_WCQE_STATUS_LOCAL_REJECT &&
8837 ext == SLI4_FC_LOCAL_REJECT_NO_XRI &&
8839 ocs_hw_done_t done = io->done;
8840 void *arg = io->arg;
8845 * Use latched status as this is always saved for an internal abort
8847 * Note: We wont have both a done and abort_done function, so don't worry about
8848 * clobbering the len, status and ext fields.
8850 status = io->saved_status;
8851 len = io->saved_len;
8852 ext = io->saved_ext;
8853 io->status_saved = 0;
8854 done(io, io->rnode, len, status, ext, arg);
8857 if (io->abort_done != NULL) {
8858 ocs_hw_done_t done = io->abort_done;
8859 void *arg = io->abort_arg;
8861 io->abort_done = NULL;
8863 done(io, io->rnode, len, status, ext, arg);
8865 ocs_lock(&hw->io_abort_lock);
8866 /* clear abort bit to indicate abort is complete */
8867 io->abort_in_progress = 0;
8868 ocs_unlock(&hw->io_abort_lock);
8870 /* Free the WQ callback */
8871 ocs_hw_assert(io->abort_reqtag != UINT32_MAX);
8872 wqcb = ocs_hw_reqtag_get_instance(hw, io->abort_reqtag);
8873 ocs_hw_reqtag_free(hw, wqcb);
8876 * Call ocs_hw_io_free() because this releases the WQ reservation as
8877 * well as doing the refcount put. Don't duplicate the code here.
8879 (void)ocs_hw_io_free(hw, io);
8883 * @brief Process XABT completions
8885 * @param hw Hardware context.
8886 * @param cq Pointer to the HW completion queue object.
8887 * @param cqe Pointer to WQ completion queue.
8888 * @param rid Resource ID (IO tag).
8894 ocs_hw_xabt_process(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe, uint16_t rid)
8896 /* search IOs wait free list */
8897 ocs_hw_io_t *io = NULL;
8899 io = ocs_hw_io_lookup(hw, rid);
8901 ocs_queue_history_cqe(&hw->q_hist, SLI_QENTRY_XABT, (void *)cqe, 0, cq->queue->id,
8902 ((cq->queue->index - 1) & (cq->queue->length - 1)));
8904 /* IO lookup failure should never happen */
8905 ocs_log_err(hw->os, "Error: xabt io lookup failed rid=%#x\n", rid);
8910 ocs_log_debug(hw->os, "xabt io not busy rid=%#x\n", rid);
8912 /* mark IO as no longer busy */
8916 if (io->is_port_owned) {
8917 ocs_lock(&hw->io_lock);
8918 /* Take reference so that below callback will not free io before reque */
8919 ocs_ref_get(&io->ref);
8920 ocs_unlock(&hw->io_lock);
8923 /* For IOs that were aborted internally, we need to issue any pending callback here. */
8924 if (io->done != NULL) {
8925 ocs_hw_done_t done = io->done;
8926 void *arg = io->arg;
8928 /* Use latched status as this is always saved for an internal abort */
8929 int32_t status = io->saved_status;
8930 uint32_t len = io->saved_len;
8931 uint32_t ext = io->saved_ext;
8934 io->status_saved = 0;
8936 done(io, io->rnode, len, status, ext, arg);
8939 /* Check to see if this is a port owned XRI */
8940 if (io->is_port_owned) {
8941 ocs_lock(&hw->io_lock);
8942 ocs_hw_reque_xri(hw, io);
8943 ocs_unlock(&hw->io_lock);
8944 /* Not hanlding reque xri completion, free io */
8945 ocs_hw_io_free(hw, io);
8949 ocs_lock(&hw->io_lock);
8950 if ((io->state == OCS_HW_IO_STATE_INUSE) || (io->state == OCS_HW_IO_STATE_WAIT_FREE)) {
8951 /* if on wait_free list, caller has already freed IO;
8952 * remove from wait_free list and add to free list.
8953 * if on in-use list, already marked as no longer busy;
8954 * just leave there and wait for caller to free.
8956 if (io->state == OCS_HW_IO_STATE_WAIT_FREE) {
8957 io->state = OCS_HW_IO_STATE_FREE;
8958 ocs_list_remove(&hw->io_wait_free, io);
8959 ocs_hw_io_free_move_correct_list(hw, io);
8962 ocs_unlock(&hw->io_lock);
8966 * @brief Adjust the number of WQs and CQs within the HW.
8969 * Calculates the number of WQs and associated CQs needed in the HW based on
8970 * the number of IOs. Calculates the starting CQ index for each WQ, RQ and
8973 * @param hw Hardware context allocated by the caller.
8976 ocs_hw_adjust_wqs(ocs_hw_t *hw)
8978 uint32_t max_wq_num = sli_get_max_queue(&hw->sli, SLI_QTYPE_WQ);
8979 uint32_t max_wq_entries = hw->num_qentries[SLI_QTYPE_WQ];
8980 uint32_t max_cq_entries = hw->num_qentries[SLI_QTYPE_CQ];
8983 * possibly adjust the the size of the WQs so that the CQ is twice as
8984 * big as the WQ to allow for 2 completions per IO. This allows us to
8985 * handle multi-phase as well as aborts.
8987 if (max_cq_entries < max_wq_entries * 2) {
8988 max_wq_entries = hw->num_qentries[SLI_QTYPE_WQ] = max_cq_entries / 2;
8992 * Calculate the number of WQs to use base on the number of IOs.
8994 * Note: We need to reserve room for aborts which must be sent down
8995 * the same WQ as the IO. So we allocate enough WQ space to
8996 * handle 2 times the number of IOs. Half of the space will be
8997 * used for normal IOs and the other hwf is reserved for aborts.
8999 hw->config.n_wq = ((hw->config.n_io * 2) + (max_wq_entries - 1)) / max_wq_entries;
9002 * For performance reasons, it is best to use use a minimum of 4 WQs
9003 * for BE3 and Skyhawk.
9005 if (hw->config.n_wq < 4 &&
9006 SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) {
9007 hw->config.n_wq = 4;
9011 * For dual-chute support, we need to have at least one WQ per chute.
9013 if (hw->config.n_wq < 2 &&
9014 ocs_hw_get_num_chutes(hw) > 1) {
9015 hw->config.n_wq = 2;
9018 /* make sure we haven't exceeded the max supported in the HW */
9019 if (hw->config.n_wq > OCS_HW_MAX_NUM_WQ) {
9020 hw->config.n_wq = OCS_HW_MAX_NUM_WQ;
9023 /* make sure we haven't exceeded the chip maximum */
9024 if (hw->config.n_wq > max_wq_num) {
9025 hw->config.n_wq = max_wq_num;
9029 * Using Queue Topology string, we divide by number of chutes
9031 hw->config.n_wq /= ocs_hw_get_num_chutes(hw);
9035 ocs_hw_command_process(ocs_hw_t *hw, int32_t status, uint8_t *mqe, size_t size)
9037 ocs_command_ctx_t *ctx = NULL;
9039 ocs_lock(&hw->cmd_lock);
9040 if (NULL == (ctx = ocs_list_remove_head(&hw->cmd_head))) {
9041 ocs_log_err(hw->os, "XXX no command context?!?\n");
9042 ocs_unlock(&hw->cmd_lock);
9046 hw->cmd_head_count--;
9048 /* Post any pending requests */
9049 ocs_hw_cmd_submit_pending(hw);
9051 ocs_unlock(&hw->cmd_lock);
9055 ocs_memcpy(ctx->buf, mqe, size);
9057 ctx->cb(hw, status, ctx->buf, ctx->arg);
9060 ocs_memset(ctx, 0, sizeof(ocs_command_ctx_t));
9061 ocs_free(hw->os, ctx, sizeof(ocs_command_ctx_t));
9067 * @brief Process entries on the given mailbox queue.
9069 * @param hw Hardware context.
9070 * @param status CQE status.
9071 * @param mq Pointer to the mailbox queue object.
9073 * @return Returns 0 on success, or a non-zero value on failure.
9076 ocs_hw_mq_process(ocs_hw_t *hw, int32_t status, sli4_queue_t *mq)
9078 uint8_t mqe[SLI4_BMBX_SIZE];
9080 if (!sli_queue_read(&hw->sli, mq, mqe)) {
9081 ocs_hw_command_process(hw, status, mqe, mq->size);
9088 * @brief Read a FCF table entry.
9090 * @param hw Hardware context.
9091 * @param index Table index to read. Use SLI4_FCOE_FCF_TABLE_FIRST for the first
9092 * read and the next_index field from the FCOE_READ_FCF_TABLE command
9093 * for subsequent reads.
9095 * @return Returns 0 on success, or a non-zero value on failure.
9098 ocs_hw_read_fcf(ocs_hw_t *hw, uint32_t index)
9100 uint8_t *buf = NULL;
9101 int32_t rc = OCS_HW_RTN_ERROR;
9103 buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
9105 ocs_log_err(hw->os, "no buffer for command\n");
9106 return OCS_HW_RTN_NO_MEMORY;
9109 if (sli_cmd_fcoe_read_fcf_table(&hw->sli, buf, SLI4_BMBX_SIZE, &hw->fcf_dmem,
9111 rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_read_fcf, &hw->fcf_dmem);
9114 if (rc != OCS_HW_RTN_SUCCESS) {
9115 ocs_log_test(hw->os, "FCOE_READ_FCF_TABLE failed\n");
9116 ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
9123 * @brief Callback function for the FCOE_READ_FCF_TABLE command.
9126 * Note that the caller has allocated:
9127 * - DMA memory to hold the table contents
9128 * - DMA memory structure
9129 * - Command/results buffer
9131 * Each of these must be freed here.
9133 * @param hw Hardware context.
9134 * @param status Hardware status.
9135 * @param mqe Pointer to the mailbox command/results buffer.
9136 * @param arg Pointer to the DMA memory structure.
9138 * @return Returns 0 on success, or a non-zero value on failure.
9141 ocs_hw_cb_read_fcf(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
9143 ocs_dma_t *dma = arg;
9144 sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe;
9146 if (status || hdr->status) {
9147 ocs_log_test(hw->os, "bad status cqe=%#x mqe=%#x\n",
9148 status, hdr->status);
9149 } else if (dma->virt) {
9150 sli4_res_fcoe_read_fcf_table_t *read_fcf = dma->virt;
9152 /* if FC or FCOE and FCF entry valid, process it */
9153 if (read_fcf->fcf_entry.fc ||
9154 (read_fcf->fcf_entry.val && !read_fcf->fcf_entry.sol)) {
9155 if (hw->callback.domain != NULL) {
9156 ocs_domain_record_t drec = {0};
9158 if (read_fcf->fcf_entry.fc) {
9160 * This is a pseudo FCF entry. Create a domain
9161 * record based on the read topology information
9163 drec.speed = hw->link.speed;
9164 drec.fc_id = hw->link.fc_id;
9166 if (SLI_LINK_TOPO_LOOP == hw->link.topology) {
9167 drec.is_loop = TRUE;
9168 ocs_memcpy(drec.map.loop, hw->link.loop_map,
9169 sizeof(drec.map.loop));
9170 } else if (SLI_LINK_TOPO_NPORT == hw->link.topology) {
9171 drec.is_nport = TRUE;
9174 drec.index = read_fcf->fcf_entry.fcf_index;
9175 drec.priority = read_fcf->fcf_entry.fip_priority;
9177 /* copy address, wwn and vlan_bitmap */
9178 ocs_memcpy(drec.address, read_fcf->fcf_entry.fcf_mac_address,
9179 sizeof(drec.address));
9180 ocs_memcpy(drec.wwn, read_fcf->fcf_entry.fabric_name_id,
9182 ocs_memcpy(drec.map.vlan, read_fcf->fcf_entry.vlan_bitmap,
9183 sizeof(drec.map.vlan));
9185 drec.is_ethernet = TRUE;
9186 drec.is_nport = TRUE;
9189 hw->callback.domain(hw->args.domain,
9190 OCS_HW_DOMAIN_FOUND,
9194 /* if FCOE and FCF is not valid, ignore it */
9195 ocs_log_test(hw->os, "ignore invalid FCF entry\n");
9198 if (SLI4_FCOE_FCF_TABLE_LAST != read_fcf->next_index) {
9199 ocs_hw_read_fcf(hw, read_fcf->next_index);
9203 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
9204 //ocs_dma_free(hw->os, dma);
9205 //ocs_free(hw->os, dma, sizeof(ocs_dma_t));
9211 * @brief Callback function for the SLI link events.
9214 * This function allocates memory which must be freed in its callback.
9216 * @param ctx Hardware context pointer (that is, ocs_hw_t *).
9217 * @param e Event structure pointer (that is, sli4_link_event_t *).
9219 * @return Returns 0 on success, or a non-zero value on failure.
9222 ocs_hw_cb_link(void *ctx, void *e)
9225 sli4_link_event_t *event = e;
9226 ocs_domain_t *d = NULL;
9228 int32_t rc = OCS_HW_RTN_ERROR;
9229 ocs_t *ocs = hw->os;
9231 ocs_hw_link_event_init(hw);
9233 switch (event->status) {
9234 case SLI_LINK_STATUS_UP:
9238 if (SLI_LINK_TOPO_NPORT == event->topology) {
9239 device_printf(ocs->dev, "Link Up, NPORT, speed is %d\n", event->speed);
9240 ocs_hw_read_fcf(hw, SLI4_FCOE_FCF_TABLE_FIRST);
9241 } else if (SLI_LINK_TOPO_LOOP == event->topology) {
9242 uint8_t *buf = NULL;
9243 device_printf(ocs->dev, "Link Up, LOOP, speed is %d\n", event->speed);
9245 buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
9247 ocs_log_err(hw->os, "no buffer for command\n");
9251 if (sli_cmd_read_topology(&hw->sli, buf, SLI4_BMBX_SIZE, &hw->loop_map)) {
9252 rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, __ocs_read_topology_cb, NULL);
9255 if (rc != OCS_HW_RTN_SUCCESS) {
9256 ocs_log_test(hw->os, "READ_TOPOLOGY failed\n");
9257 ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
9260 device_printf(ocs->dev, "Link Up, unsupported topology (%#x), speed is %d\n",
9261 event->topology, event->speed);
9264 case SLI_LINK_STATUS_DOWN:
9265 device_printf(ocs->dev, "Link Down\n");
9267 hw->link.status = event->status;
9269 for (i = 0; i < SLI4_MAX_FCFI; i++) {
9272 hw->callback.domain != NULL) {
9273 hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, d);
9278 ocs_log_test(hw->os, "unhandled link status %#x\n", event->status);
9286 ocs_hw_cb_fip(void *ctx, void *e)
9289 ocs_domain_t *domain = NULL;
9290 sli4_fip_event_t *event = e;
9292 ocs_hw_assert(event);
9295 /* Find the associated domain object */
9296 if (event->type == SLI4_FCOE_FIP_FCF_CLEAR_VLINK) {
9297 ocs_domain_t *d = NULL;
9300 /* Clear VLINK is different from the other FIP events as it passes back
9301 * a VPI instead of a FCF index. Check all attached SLI ports for a
9303 for (i = 0; i < SLI4_MAX_FCFI; i++) {
9306 ocs_sport_t *sport = NULL;
9308 ocs_list_foreach(&d->sport_list, sport) {
9309 if (sport->indicator == event->index) {
9315 if (domain != NULL) {
9321 domain = ocs_hw_domain_get_indexed(hw, event->index);
9324 switch (event->type) {
9325 case SLI4_FCOE_FIP_FCF_DISCOVERED:
9326 ocs_hw_read_fcf(hw, event->index);
9328 case SLI4_FCOE_FIP_FCF_DEAD:
9329 if (domain != NULL &&
9330 hw->callback.domain != NULL) {
9331 hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, domain);
9334 case SLI4_FCOE_FIP_FCF_CLEAR_VLINK:
9335 if (domain != NULL &&
9336 hw->callback.domain != NULL) {
9338 * We will want to issue rediscover FCF when this domain is free'd in order
9339 * to invalidate the FCF table
9341 domain->req_rediscover_fcf = TRUE;
9342 hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, domain);
9345 case SLI4_FCOE_FIP_FCF_MODIFIED:
9346 if (domain != NULL &&
9347 hw->callback.domain != NULL) {
9348 hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, domain);
9351 ocs_hw_read_fcf(hw, event->index);
9354 ocs_log_test(hw->os, "unsupported event %#x\n", event->type);
9361 ocs_hw_cb_node_attach(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
9363 ocs_remote_node_t *rnode = arg;
9364 sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe;
9365 ocs_hw_remote_node_event_e evt = 0;
9367 if (status || hdr->status) {
9368 ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status,
9370 ocs_atomic_sub_return(&hw->rpi_ref[rnode->index].rpi_count, 1);
9371 rnode->attached = FALSE;
9372 ocs_atomic_set(&hw->rpi_ref[rnode->index].rpi_attached, 0);
9373 evt = OCS_HW_NODE_ATTACH_FAIL;
9375 rnode->attached = TRUE;
9376 ocs_atomic_set(&hw->rpi_ref[rnode->index].rpi_attached, 1);
9377 evt = OCS_HW_NODE_ATTACH_OK;
9380 if (hw->callback.rnode != NULL) {
9381 hw->callback.rnode(hw->args.rnode, evt, rnode);
9383 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
9389 ocs_hw_cb_node_free(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
9391 ocs_remote_node_t *rnode = arg;
9392 sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe;
9393 ocs_hw_remote_node_event_e evt = OCS_HW_NODE_FREE_FAIL;
9396 if (status || hdr->status) {
9397 ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status,
9401 * In certain cases, a non-zero MQE status is OK (all must be true):
9402 * - node is attached
9403 * - if High Login Mode is enabled, node is part of a node group
9404 * - status is 0x1400
9406 if (!rnode->attached || ((sli_get_hlm(&hw->sli) == TRUE) && !rnode->node_group) ||
9407 (hdr->status != SLI4_MBOX_STATUS_RPI_NOT_REG)) {
9413 rnode->node_group = FALSE;
9414 rnode->attached = FALSE;
9416 if (ocs_atomic_read(&hw->rpi_ref[rnode->index].rpi_count) == 0) {
9417 ocs_atomic_set(&hw->rpi_ref[rnode->index].rpi_attached, 0);
9420 evt = OCS_HW_NODE_FREE_OK;
9423 if (hw->callback.rnode != NULL) {
9424 hw->callback.rnode(hw->args.rnode, evt, rnode);
9427 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
9433 ocs_hw_cb_node_free_all(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
9435 sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe;
9436 ocs_hw_remote_node_event_e evt = OCS_HW_NODE_FREE_FAIL;
9440 if (status || hdr->status) {
9441 ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status,
9444 evt = OCS_HW_NODE_FREE_ALL_OK;
9447 if (evt == OCS_HW_NODE_FREE_ALL_OK) {
9448 for (i = 0; i < sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI); i++) {
9449 ocs_atomic_set(&hw->rpi_ref[i].rpi_count, 0);
9452 if (sli_resource_reset(&hw->sli, SLI_RSRC_FCOE_RPI)) {
9453 ocs_log_test(hw->os, "FCOE_RPI free all failure\n");
9458 if (hw->callback.rnode != NULL) {
9459 hw->callback.rnode(hw->args.rnode, evt, NULL);
9462 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
9468 * @brief Initialize the pool of HW IO objects.
9470 * @param hw Hardware context.
9472 * @return Returns 0 on success, or a non-zero value on failure.
9475 ocs_hw_setup_io(ocs_hw_t *hw)
9478 ocs_hw_io_t *io = NULL;
9479 uintptr_t xfer_virt = 0;
9480 uintptr_t xfer_phys = 0;
9482 uint8_t new_alloc = TRUE;
9484 if (NULL == hw->io) {
9485 hw->io = ocs_malloc(hw->os, hw->config.n_io * sizeof(ocs_hw_io_t *), OCS_M_ZERO | OCS_M_NOWAIT);
9487 if (NULL == hw->io) {
9488 ocs_log_err(hw->os, "IO pointer memory allocation failed, %d Ios at size %zu\n",
9490 sizeof(ocs_hw_io_t *));
9491 return OCS_HW_RTN_NO_MEMORY;
9493 for (i = 0; i < hw->config.n_io; i++) {
9494 hw->io[i] = ocs_malloc(hw->os, sizeof(ocs_hw_io_t),
9495 OCS_M_ZERO | OCS_M_NOWAIT);
9496 if (hw->io[i] == NULL) {
9497 ocs_log_err(hw->os, "IO(%d) memory allocation failed\n", i);
9502 /* Create WQE buffs for IO */
9503 hw->wqe_buffs = ocs_malloc(hw->os, hw->config.n_io * hw->sli.config.wqe_size,
9504 OCS_M_ZERO | OCS_M_NOWAIT);
9505 if (NULL == hw->wqe_buffs) {
9506 ocs_free(hw->os, hw->io, hw->config.n_io * sizeof(ocs_hw_io_t));
9507 ocs_log_err(hw->os, "%s: IO WQE buff allocation failed, %d Ios at size %zu\n",
9508 __func__, hw->config.n_io, hw->sli.config.wqe_size);
9509 return OCS_HW_RTN_NO_MEMORY;
9513 /* re-use existing IOs, including SGLs */
9518 if (ocs_dma_alloc(hw->os, &hw->xfer_rdy,
9519 sizeof(fcp_xfer_rdy_iu_t) * hw->config.n_io,
9520 4/*XXX what does this need to be? */)) {
9521 ocs_log_err(hw->os, "XFER_RDY buffer allocation failed\n");
9522 return OCS_HW_RTN_NO_MEMORY;
9525 xfer_virt = (uintptr_t)hw->xfer_rdy.virt;
9526 xfer_phys = hw->xfer_rdy.phys;
9528 for (i = 0; i < hw->config.n_io; i++) {
9529 hw_wq_callback_t *wqcb;
9533 /* initialize IO fields */
9536 /* Assign a WQE buff */
9537 io->wqe.wqebuf = &hw->wqe_buffs[i * hw->sli.config.wqe_size];
9539 /* Allocate the request tag for this IO */
9540 wqcb = ocs_hw_reqtag_alloc(hw, ocs_hw_wq_process_io, io);
9542 ocs_log_err(hw->os, "can't allocate request tag\n");
9543 return OCS_HW_RTN_NO_RESOURCES;
9545 io->reqtag = wqcb->instance_index;
9547 /* Now for the fields that are initialized on each free */
9548 ocs_hw_init_free_io(io);
9550 /* The XB flag isn't cleared on IO free, so initialize it to zero here */
9553 if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_XRI, &io->indicator, &index)) {
9554 ocs_log_err(hw->os, "sli_resource_alloc failed @ %d\n", i);
9555 return OCS_HW_RTN_NO_MEMORY;
9558 if (new_alloc && ocs_dma_alloc(hw->os, &io->def_sgl, hw->config.n_sgl * sizeof(sli4_sge_t), 64)) {
9559 ocs_log_err(hw->os, "ocs_dma_alloc failed @ %d\n", i);
9560 ocs_memset(&io->def_sgl, 0, sizeof(ocs_dma_t));
9561 return OCS_HW_RTN_NO_MEMORY;
9563 io->def_sgl_count = hw->config.n_sgl;
9564 io->sgl = &io->def_sgl;
9565 io->sgl_count = io->def_sgl_count;
9567 if (hw->xfer_rdy.size) {
9568 io->xfer_rdy.virt = (void *)xfer_virt;
9569 io->xfer_rdy.phys = xfer_phys;
9570 io->xfer_rdy.size = sizeof(fcp_xfer_rdy_iu_t);
9572 xfer_virt += sizeof(fcp_xfer_rdy_iu_t);
9573 xfer_phys += sizeof(fcp_xfer_rdy_iu_t);
9577 return OCS_HW_RTN_SUCCESS;
9579 for (i = 0; i < hw->config.n_io && hw->io[i]; i++) {
9580 ocs_free(hw->os, hw->io[i], sizeof(ocs_hw_io_t));
9584 return OCS_HW_RTN_NO_MEMORY;
9588 ocs_hw_init_io(ocs_hw_t *hw)
9590 uint32_t i = 0, io_index = 0;
9591 uint32_t prereg = 0;
9592 ocs_hw_io_t *io = NULL;
9593 uint8_t cmd[SLI4_BMBX_SIZE];
9594 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
9595 uint32_t nremaining;
9597 uint32_t sgls_per_request = 256;
9598 ocs_dma_t **sgls = NULL;
9599 ocs_dma_t reqbuf = { 0 };
9601 prereg = sli_get_sgl_preregister(&hw->sli);
9604 sgls = ocs_malloc(hw->os, sizeof(*sgls) * sgls_per_request, OCS_M_NOWAIT);
9606 ocs_log_err(hw->os, "ocs_malloc sgls failed\n");
9607 return OCS_HW_RTN_NO_MEMORY;
9610 rc = ocs_dma_alloc(hw->os, &reqbuf, 32 + sgls_per_request*16, OCS_MIN_DMA_ALIGNMENT);
9612 ocs_log_err(hw->os, "ocs_dma_alloc reqbuf failed\n");
9613 ocs_free(hw->os, sgls, sizeof(*sgls) * sgls_per_request);
9614 return OCS_HW_RTN_NO_MEMORY;
9618 io = hw->io[io_index];
9619 for (nremaining = hw->config.n_io; nremaining; nremaining -= n) {
9621 /* Copy address of SGL's into local sgls[] array, break out if the xri
9622 * is not contiguous.
9624 for (n = 0; n < MIN(sgls_per_request, nremaining); n++) {
9625 /* Check that we have contiguous xri values */
9627 if (hw->io[io_index + n]->indicator != (hw->io[io_index + n-1]->indicator+1)) {
9631 sgls[n] = hw->io[io_index + n]->sgl;
9634 if (sli_cmd_fcoe_post_sgl_pages(&hw->sli, cmd, sizeof(cmd),
9635 io->indicator, n, sgls, NULL, &reqbuf)) {
9636 if (ocs_hw_command(hw, cmd, OCS_CMD_POLL, NULL, NULL)) {
9637 rc = OCS_HW_RTN_ERROR;
9638 ocs_log_err(hw->os, "SGL post failed\n");
9646 /* Add to tail if successful */
9647 for (i = 0; i < n; i ++) {
9648 io->is_port_owned = 0;
9649 io->state = OCS_HW_IO_STATE_FREE;
9650 ocs_list_add_tail(&hw->io_free, io);
9651 io = hw->io[io_index+1];
9657 ocs_dma_free(hw->os, &reqbuf);
9658 ocs_free(hw->os, sgls, sizeof(*sgls) * sgls_per_request);
9665 ocs_hw_flush(ocs_hw_t *hw)
9669 /* Process any remaining completions */
9670 for (i = 0; i < hw->eq_count; i++) {
9671 ocs_hw_process(hw, i, ~0);
9678 ocs_hw_command_cancel(ocs_hw_t *hw)
9681 ocs_lock(&hw->cmd_lock);
9684 * Manually clean up remaining commands. Note: since this calls
9685 * ocs_hw_command_process(), we'll also process the cmd_pending
9686 * list, so no need to manually clean that out.
9688 while (!ocs_list_empty(&hw->cmd_head)) {
9689 uint8_t mqe[SLI4_BMBX_SIZE] = { 0 };
9690 ocs_command_ctx_t *ctx = ocs_list_get_head(&hw->cmd_head);
9692 ocs_log_test(hw->os, "hung command %08x\n",
9693 NULL == ctx ? UINT32_MAX :
9694 (NULL == ctx->buf ? UINT32_MAX : *((uint32_t *)ctx->buf)));
9695 ocs_unlock(&hw->cmd_lock);
9696 ocs_hw_command_process(hw, -1/*Bad status*/, mqe, SLI4_BMBX_SIZE);
9697 ocs_lock(&hw->cmd_lock);
9700 ocs_unlock(&hw->cmd_lock);
9706 * @brief Find IO given indicator (xri).
9708 * @param hw Hal context.
9709 * @param indicator Indicator (xri) to look for.
9711 * @return Returns io if found, NULL otherwise.
9714 ocs_hw_io_lookup(ocs_hw_t *hw, uint32_t xri)
9717 ioindex = xri - hw->sli.config.extent[SLI_RSRC_FCOE_XRI].base[0];
9718 return hw->io[ioindex];
9722 * @brief Issue any pending callbacks for an IO and remove off the timer and pending lists.
9724 * @param hw Hal context.
9725 * @param io Pointer to the IO to cleanup.
9728 ocs_hw_io_cancel_cleanup(ocs_hw_t *hw, ocs_hw_io_t *io)
9730 ocs_hw_done_t done = io->done;
9731 ocs_hw_done_t abort_done = io->abort_done;
9733 /* first check active_wqe list and remove if there */
9734 if (ocs_list_on_list(&io->wqe_link)) {
9735 ocs_list_remove(&hw->io_timed_wqe, io);
9738 /* Remove from WQ pending list */
9739 if ((io->wq != NULL) && ocs_list_on_list(&io->wq->pending_list)) {
9740 ocs_list_remove(&io->wq->pending_list, io);
9744 void *arg = io->arg;
9747 ocs_unlock(&hw->io_lock);
9748 done(io, io->rnode, 0, SLI4_FC_WCQE_STATUS_SHUTDOWN, 0, arg);
9749 ocs_lock(&hw->io_lock);
9752 if (io->abort_done != NULL) {
9753 void *abort_arg = io->abort_arg;
9755 io->abort_done = NULL;
9756 ocs_unlock(&hw->io_lock);
9757 abort_done(io, io->rnode, 0, SLI4_FC_WCQE_STATUS_SHUTDOWN, 0, abort_arg);
9758 ocs_lock(&hw->io_lock);
9763 ocs_hw_io_cancel(ocs_hw_t *hw)
9765 ocs_hw_io_t *io = NULL;
9766 ocs_hw_io_t *tmp_io = NULL;
9767 uint32_t iters = 100; /* One second limit */
9770 * Manually clean up outstanding IO.
9771 * Only walk through list once: the backend will cleanup any IOs when done/abort_done is called.
9773 ocs_lock(&hw->io_lock);
9774 ocs_list_foreach_safe(&hw->io_inuse, io, tmp_io) {
9775 ocs_hw_done_t done = io->done;
9776 ocs_hw_done_t abort_done = io->abort_done;
9778 ocs_hw_io_cancel_cleanup(hw, io);
9781 * Since this is called in a reset/shutdown
9782 * case, If there is no callback, then just
9785 * Note: A port owned XRI cannot be on
9786 * the in use list. We cannot call
9787 * ocs_hw_io_free() because we already
9791 abort_done == NULL) {
9793 * Since this is called in a reset/shutdown
9794 * case, If there is no callback, then just
9797 ocs_hw_io_free_common(hw, io);
9798 ocs_list_remove(&hw->io_inuse, io);
9799 ocs_hw_io_free_move_correct_list(hw, io);
9804 * For port owned XRIs, they are not on the in use list, so
9805 * walk though XRIs and issue any callbacks.
9807 ocs_list_foreach_safe(&hw->io_port_owned, io, tmp_io) {
9808 /* check list and remove if there */
9809 if (ocs_list_on_list(&io->dnrx_link)) {
9810 ocs_list_remove(&hw->io_port_dnrx, io);
9811 ocs_ref_put(&io->ref); /* ocs_ref_get(): same function */
9813 ocs_hw_io_cancel_cleanup(hw, io);
9814 ocs_list_remove(&hw->io_port_owned, io);
9815 ocs_hw_io_free_common(hw, io);
9817 ocs_unlock(&hw->io_lock);
9819 /* Give time for the callbacks to complete */
9823 } while (!ocs_list_empty(&hw->io_inuse) && iters);
9825 /* Leave a breadcrumb that cleanup is not yet complete. */
9826 if (!ocs_list_empty(&hw->io_inuse)) {
9827 ocs_log_test(hw->os, "io_inuse list is not empty\n");
9834 ocs_hw_io_ini_sge(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_dma_t *cmnd, uint32_t cmnd_size,
9837 sli4_sge_t *data = NULL;
9840 ocs_log_err(NULL, "bad parm hw=%p io=%p\n", hw, io);
9841 return OCS_HW_RTN_ERROR;
9844 data = io->def_sgl.virt;
9846 /* setup command pointer */
9847 data->buffer_address_high = ocs_addr32_hi(cmnd->phys);
9848 data->buffer_address_low = ocs_addr32_lo(cmnd->phys);
9849 data->buffer_length = cmnd_size;
9852 /* setup response pointer */
9853 data->buffer_address_high = ocs_addr32_hi(rsp->phys);
9854 data->buffer_address_low = ocs_addr32_lo(rsp->phys);
9855 data->buffer_length = rsp->size;
9861 __ocs_read_topology_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
9863 sli4_cmd_read_topology_t *read_topo = (sli4_cmd_read_topology_t *)mqe;
9865 if (status || read_topo->hdr.status) {
9866 ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n",
9867 status, read_topo->hdr.status);
9868 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
9872 switch (read_topo->attention_type) {
9873 case SLI4_READ_TOPOLOGY_LINK_UP:
9874 hw->link.status = SLI_LINK_STATUS_UP;
9876 case SLI4_READ_TOPOLOGY_LINK_DOWN:
9877 hw->link.status = SLI_LINK_STATUS_DOWN;
9879 case SLI4_READ_TOPOLOGY_LINK_NO_ALPA:
9880 hw->link.status = SLI_LINK_STATUS_NO_ALPA;
9883 hw->link.status = SLI_LINK_STATUS_MAX;
9887 switch (read_topo->topology) {
9888 case SLI4_READ_TOPOLOGY_NPORT:
9889 hw->link.topology = SLI_LINK_TOPO_NPORT;
9891 case SLI4_READ_TOPOLOGY_FC_AL:
9892 hw->link.topology = SLI_LINK_TOPO_LOOP;
9893 if (SLI_LINK_STATUS_UP == hw->link.status) {
9894 hw->link.loop_map = hw->loop_map.virt;
9896 hw->link.fc_id = read_topo->acquired_al_pa;
9899 hw->link.topology = SLI_LINK_TOPO_MAX;
9903 hw->link.medium = SLI_LINK_MEDIUM_FC;
9905 switch (read_topo->link_current.link_speed) {
9906 case SLI4_READ_TOPOLOGY_SPEED_1G:
9907 hw->link.speed = 1 * 1000;
9909 case SLI4_READ_TOPOLOGY_SPEED_2G:
9910 hw->link.speed = 2 * 1000;
9912 case SLI4_READ_TOPOLOGY_SPEED_4G:
9913 hw->link.speed = 4 * 1000;
9915 case SLI4_READ_TOPOLOGY_SPEED_8G:
9916 hw->link.speed = 8 * 1000;
9918 case SLI4_READ_TOPOLOGY_SPEED_16G:
9919 hw->link.speed = 16 * 1000;
9920 hw->link.loop_map = NULL;
9922 case SLI4_READ_TOPOLOGY_SPEED_32G:
9923 hw->link.speed = 32 * 1000;
9924 hw->link.loop_map = NULL;
9928 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
9930 ocs_hw_read_fcf(hw, SLI4_FCOE_FCF_TABLE_FIRST);
9936 __ocs_hw_port_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
9938 ocs_sli_port_t *sport = ctx->app;
9939 ocs_hw_t *hw = sport->hw;
9948 case OCS_EVT_HW_PORT_REQ_FREE:
9949 case OCS_EVT_HW_PORT_REQ_ATTACH:
9951 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
9955 ocs_log_test(hw->os, "%s %-20s not handled\n", funcname, ocs_sm_event_name(evt));
9963 __ocs_hw_port_free_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
9965 ocs_sli_port_t *sport = ctx->app;
9966 ocs_hw_t *hw = sport->hw;
9973 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
9975 if (hw->callback.port != NULL) {
9976 hw->callback.port(hw->args.port,
9977 OCS_HW_PORT_FREE_FAIL, sport);
9988 __ocs_hw_port_freed(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
9990 ocs_sli_port_t *sport = ctx->app;
9991 ocs_hw_t *hw = sport->hw;
9997 /* free SLI resource */
9998 if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator)) {
9999 ocs_log_err(hw->os, "FCOE_VPI free failure addr=%#x\n", sport->fc_id);
10002 /* free mailbox buffer */
10003 if (data != NULL) {
10004 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10006 if (hw->callback.port != NULL) {
10007 hw->callback.port(hw->args.port,
10008 OCS_HW_PORT_FREE_OK, sport);
10019 __ocs_hw_port_attach_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10021 ocs_sli_port_t *sport = ctx->app;
10022 ocs_hw_t *hw = sport->hw;
10027 case OCS_EVT_ENTER:
10028 /* free SLI resource */
10029 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator);
10031 /* free mailbox buffer */
10032 if (data != NULL) {
10033 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10036 if (hw->callback.port != NULL) {
10037 hw->callback.port(hw->args.port,
10038 OCS_HW_PORT_ATTACH_FAIL, sport);
10040 if (sport->sm_free_req_pending) {
10041 ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
10045 __ocs_hw_port_common(__func__, ctx, evt, data);
10053 __ocs_hw_port_free_unreg_vpi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10055 ocs_sli_port_t *sport = ctx->app;
10056 ocs_hw_t *hw = sport->hw;
10057 uint8_t *cmd = NULL;
10062 case OCS_EVT_ENTER:
10063 /* allocate memory and send unreg_vpi */
10064 cmd = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
10066 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10070 if (0 == sli_cmd_unreg_vpi(&hw->sli, cmd, SLI4_BMBX_SIZE, sport->indicator,
10071 SLI4_UNREG_TYPE_PORT)) {
10072 ocs_log_err(hw->os, "UNREG_VPI format failure\n");
10073 ocs_free(hw->os, cmd, SLI4_BMBX_SIZE);
10074 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10078 if (ocs_hw_command(hw, cmd, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) {
10079 ocs_log_err(hw->os, "UNREG_VPI command failure\n");
10080 ocs_free(hw->os, cmd, SLI4_BMBX_SIZE);
10081 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10085 case OCS_EVT_RESPONSE:
10086 ocs_sm_transition(ctx, __ocs_hw_port_freed, data);
10088 case OCS_EVT_ERROR:
10089 ocs_sm_transition(ctx, __ocs_hw_port_free_report_fail, data);
10092 __ocs_hw_port_common(__func__, ctx, evt, data);
10100 __ocs_hw_port_free_nop(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10102 ocs_sli_port_t *sport = ctx->app;
10103 ocs_hw_t *hw = sport->hw;
10108 case OCS_EVT_ENTER:
10109 /* Forward to execute in mailbox completion processing context */
10110 if (ocs_hw_async_call(hw, __ocs_hw_port_realloc_cb, sport)) {
10111 ocs_log_err(hw->os, "ocs_hw_async_call failed\n");
10114 case OCS_EVT_RESPONSE:
10115 ocs_sm_transition(ctx, __ocs_hw_port_freed, data);
10117 case OCS_EVT_ERROR:
10118 ocs_sm_transition(ctx, __ocs_hw_port_free_report_fail, data);
10128 __ocs_hw_port_attached(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10130 ocs_sli_port_t *sport = ctx->app;
10131 ocs_hw_t *hw = sport->hw;
10136 case OCS_EVT_ENTER:
10137 if (data != NULL) {
10138 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10140 if (hw->callback.port != NULL) {
10141 hw->callback.port(hw->args.port,
10142 OCS_HW_PORT_ATTACH_OK, sport);
10144 if (sport->sm_free_req_pending) {
10145 ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
10148 case OCS_EVT_HW_PORT_REQ_FREE:
10149 /* virtual/physical port request free */
10150 ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
10153 __ocs_hw_port_common(__func__, ctx, evt, data);
10161 __ocs_hw_port_attach_reg_vpi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10163 ocs_sli_port_t *sport = ctx->app;
10164 ocs_hw_t *hw = sport->hw;
10169 case OCS_EVT_ENTER:
10170 if (0 == sli_cmd_reg_vpi(&hw->sli, data, SLI4_BMBX_SIZE, sport, FALSE)) {
10171 ocs_log_err(hw->os, "REG_VPI format failure\n");
10172 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10176 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) {
10177 ocs_log_err(hw->os, "REG_VPI command failure\n");
10178 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10182 case OCS_EVT_RESPONSE:
10183 ocs_sm_transition(ctx, __ocs_hw_port_attached, data);
10185 case OCS_EVT_ERROR:
10186 ocs_sm_transition(ctx, __ocs_hw_port_attach_report_fail, data);
10188 case OCS_EVT_HW_PORT_REQ_FREE:
10189 /* Wait for attach response and then free */
10190 sport->sm_free_req_pending = 1;
10193 __ocs_hw_port_common(__func__, ctx, evt, data);
10201 __ocs_hw_port_done(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10203 ocs_sli_port_t *sport = ctx->app;
10204 ocs_hw_t *hw = sport->hw;
10209 case OCS_EVT_ENTER:
10210 /* free SLI resource */
10211 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator);
10213 /* free mailbox buffer */
10214 if (data != NULL) {
10215 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10219 __ocs_hw_port_common(__func__, ctx, evt, data);
10227 __ocs_hw_port_allocated(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10229 ocs_sli_port_t *sport = ctx->app;
10230 ocs_hw_t *hw = sport->hw;
10235 case OCS_EVT_ENTER:
10236 if (data != NULL) {
10237 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10239 if (hw->callback.port != NULL) {
10240 hw->callback.port(hw->args.port,
10241 OCS_HW_PORT_ALLOC_OK, sport);
10243 /* If there is a pending free request, then handle it now */
10244 if (sport->sm_free_req_pending) {
10245 ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
10248 case OCS_EVT_HW_PORT_REQ_ATTACH:
10249 /* virtual port requests attach */
10250 ocs_sm_transition(ctx, __ocs_hw_port_attach_reg_vpi, data);
10252 case OCS_EVT_HW_PORT_ATTACH_OK:
10253 /* physical port attached (as part of attaching domain) */
10254 ocs_sm_transition(ctx, __ocs_hw_port_attached, data);
10256 case OCS_EVT_HW_PORT_REQ_FREE:
10257 /* virtual port request free */
10258 if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) {
10259 ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
10262 * Note: BE3/Skyhawk will respond with a status of 0x20
10263 * unless the reg_vpi has been issued, so we can
10264 * skip the unreg_vpi for these adapters.
10266 * Send a nop to make sure that free doesn't occur in
10269 ocs_sm_transition(ctx, __ocs_hw_port_free_nop, NULL);
10273 __ocs_hw_port_common(__func__, ctx, evt, data);
10281 __ocs_hw_port_alloc_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10283 ocs_sli_port_t *sport = ctx->app;
10284 ocs_hw_t *hw = sport->hw;
10289 case OCS_EVT_ENTER:
10290 /* free SLI resource */
10291 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator);
10293 /* free mailbox buffer */
10294 if (data != NULL) {
10295 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10298 if (hw->callback.port != NULL) {
10299 hw->callback.port(hw->args.port,
10300 OCS_HW_PORT_ALLOC_FAIL, sport);
10303 /* If there is a pending free request, then handle it now */
10304 if (sport->sm_free_req_pending) {
10305 ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
10309 __ocs_hw_port_common(__func__, ctx, evt, data);
10317 __ocs_hw_port_alloc_read_sparm64(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10319 ocs_sli_port_t *sport = ctx->app;
10320 ocs_hw_t *hw = sport->hw;
10321 uint8_t *payload = NULL;
10326 case OCS_EVT_ENTER:
10327 /* allocate memory for the service parameters */
10328 if (ocs_dma_alloc(hw->os, &sport->dma, 112, 4)) {
10329 ocs_log_err(hw->os, "Failed to allocate DMA memory\n");
10330 ocs_sm_transition(ctx, __ocs_hw_port_done, data);
10334 if (0 == sli_cmd_read_sparm64(&hw->sli, data, SLI4_BMBX_SIZE,
10335 &sport->dma, sport->indicator)) {
10336 ocs_log_err(hw->os, "READ_SPARM64 allocation failure\n");
10337 ocs_dma_free(hw->os, &sport->dma);
10338 ocs_sm_transition(ctx, __ocs_hw_port_done, data);
10342 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) {
10343 ocs_log_err(hw->os, "READ_SPARM64 command failure\n");
10344 ocs_dma_free(hw->os, &sport->dma);
10345 ocs_sm_transition(ctx, __ocs_hw_port_done, data);
10349 case OCS_EVT_RESPONSE:
10350 payload = sport->dma.virt;
10352 ocs_display_sparams(sport->display_name, "sport sparm64", 0, NULL, payload);
10354 ocs_memcpy(&sport->sli_wwpn, payload + SLI4_READ_SPARM64_WWPN_OFFSET,
10355 sizeof(sport->sli_wwpn));
10356 ocs_memcpy(&sport->sli_wwnn, payload + SLI4_READ_SPARM64_WWNN_OFFSET,
10357 sizeof(sport->sli_wwnn));
10359 ocs_dma_free(hw->os, &sport->dma);
10360 ocs_sm_transition(ctx, __ocs_hw_port_alloc_init_vpi, data);
10362 case OCS_EVT_ERROR:
10363 ocs_dma_free(hw->os, &sport->dma);
10364 ocs_sm_transition(ctx, __ocs_hw_port_alloc_report_fail, data);
10366 case OCS_EVT_HW_PORT_REQ_FREE:
10367 /* Wait for attach response and then free */
10368 sport->sm_free_req_pending = 1;
10373 __ocs_hw_port_common(__func__, ctx, evt, data);
10381 __ocs_hw_port_alloc_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10383 ocs_sli_port_t *sport = ctx->app;
10388 case OCS_EVT_ENTER:
10391 case OCS_EVT_HW_PORT_ALLOC_OK:
10392 ocs_sm_transition(ctx, __ocs_hw_port_allocated, NULL);
10394 case OCS_EVT_HW_PORT_ALLOC_FAIL:
10395 ocs_sm_transition(ctx, __ocs_hw_port_alloc_report_fail, NULL);
10397 case OCS_EVT_HW_PORT_REQ_FREE:
10398 /* Wait for attach response and then free */
10399 sport->sm_free_req_pending = 1;
10402 __ocs_hw_port_common(__func__, ctx, evt, data);
10410 __ocs_hw_port_alloc_init_vpi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10412 ocs_sli_port_t *sport = ctx->app;
10413 ocs_hw_t *hw = sport->hw;
10418 case OCS_EVT_ENTER:
10419 /* If there is a pending free request, then handle it now */
10420 if (sport->sm_free_req_pending) {
10421 ocs_sm_transition(ctx, __ocs_hw_port_freed, NULL);
10425 /* TODO XXX transitioning to done only works if this is called
10426 * directly from ocs_hw_port_alloc BUT not if called from
10427 * read_sparm64. In the later case, we actually want to go
10428 * through report_ok/fail
10430 if (0 == sli_cmd_init_vpi(&hw->sli, data, SLI4_BMBX_SIZE,
10431 sport->indicator, sport->domain->indicator)) {
10432 ocs_log_err(hw->os, "INIT_VPI allocation failure\n");
10433 ocs_sm_transition(ctx, __ocs_hw_port_done, data);
10437 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) {
10438 ocs_log_err(hw->os, "INIT_VPI command failure\n");
10439 ocs_sm_transition(ctx, __ocs_hw_port_done, data);
10443 case OCS_EVT_RESPONSE:
10444 ocs_sm_transition(ctx, __ocs_hw_port_allocated, data);
10446 case OCS_EVT_ERROR:
10447 ocs_sm_transition(ctx, __ocs_hw_port_alloc_report_fail, data);
10449 case OCS_EVT_HW_PORT_REQ_FREE:
10450 /* Wait for attach response and then free */
10451 sport->sm_free_req_pending = 1;
10456 __ocs_hw_port_common(__func__, ctx, evt, data);
10464 __ocs_hw_port_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
10466 ocs_sli_port_t *sport = arg;
10467 sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe;
10468 ocs_sm_event_t evt;
10470 if (status || hdr->status) {
10471 ocs_log_debug(hw->os, "bad status vpi=%#x st=%x hdr=%x\n",
10472 sport->indicator, status, hdr->status);
10473 evt = OCS_EVT_ERROR;
10475 evt = OCS_EVT_RESPONSE;
10478 ocs_sm_post_event(&sport->ctx, evt, mqe);
10484 __ocs_hw_port_realloc_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
10486 ocs_sli_port_t *sport = arg;
10487 sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe;
10488 ocs_sm_event_t evt;
10491 if (status || hdr->status) {
10492 ocs_log_debug(hw->os, "bad status vpi=%#x st=%x hdr=%x\n",
10493 sport->indicator, status, hdr->status);
10494 evt = OCS_EVT_ERROR;
10496 evt = OCS_EVT_RESPONSE;
10500 * In this case we have to malloc a mailbox command buffer, as it is reused
10501 * in the state machine post event call, and eventually freed
10503 mqecpy = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
10504 if (mqecpy == NULL) {
10505 ocs_log_err(hw->os, "malloc mqecpy failed\n");
10508 ocs_memcpy(mqecpy, mqe, SLI4_BMBX_SIZE);
10510 ocs_sm_post_event(&sport->ctx, evt, mqecpy);
10515 /***************************************************************************
10516 * Domain state machine
10520 __ocs_hw_domain_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10522 ocs_domain_t *domain = ctx->app;
10523 ocs_hw_t *hw = domain->hw;
10533 ocs_log_test(hw->os, "%s %-20s not handled\n", funcname, ocs_sm_event_name(evt));
10541 __ocs_hw_domain_alloc_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10543 ocs_domain_t *domain = ctx->app;
10544 ocs_hw_t *hw = domain->hw;
10549 case OCS_EVT_ENTER:
10550 /* free command buffer */
10551 if (data != NULL) {
10552 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10554 /* free SLI resources */
10555 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI, domain->indicator);
10556 /* TODO how to free FCFI (or do we at all)? */
10558 if (hw->callback.domain != NULL) {
10559 hw->callback.domain(hw->args.domain,
10560 OCS_HW_DOMAIN_ALLOC_FAIL,
10565 __ocs_hw_domain_common(__func__, ctx, evt, data);
10573 __ocs_hw_domain_attached(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10575 ocs_domain_t *domain = ctx->app;
10576 ocs_hw_t *hw = domain->hw;
10581 case OCS_EVT_ENTER:
10582 /* free mailbox buffer and send alloc ok to physical sport */
10583 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10584 ocs_sm_post_event(&domain->sport->ctx, OCS_EVT_HW_PORT_ATTACH_OK, NULL);
10586 /* now inform registered callbacks */
10587 if (hw->callback.domain != NULL) {
10588 hw->callback.domain(hw->args.domain,
10589 OCS_HW_DOMAIN_ATTACH_OK,
10593 case OCS_EVT_HW_DOMAIN_REQ_FREE:
10594 ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_vfi, NULL);
10597 __ocs_hw_domain_common(__func__, ctx, evt, data);
10605 __ocs_hw_domain_attach_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10607 ocs_domain_t *domain = ctx->app;
10608 ocs_hw_t *hw = domain->hw;
10613 case OCS_EVT_ENTER:
10614 if (data != NULL) {
10615 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10617 /* free SLI resources */
10618 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI, domain->indicator);
10619 /* TODO how to free FCFI (or do we at all)? */
10621 if (hw->callback.domain != NULL) {
10622 hw->callback.domain(hw->args.domain,
10623 OCS_HW_DOMAIN_ATTACH_FAIL,
10630 __ocs_hw_domain_common(__func__, ctx, evt, data);
10638 __ocs_hw_domain_attach_reg_vfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10640 ocs_domain_t *domain = ctx->app;
10641 ocs_hw_t *hw = domain->hw;
10646 case OCS_EVT_ENTER:
10648 ocs_display_sparams("", "reg vpi", 0, NULL, domain->dma.virt);
10650 if (0 == sli_cmd_reg_vfi(&hw->sli, data, SLI4_BMBX_SIZE, domain)) {
10651 ocs_log_err(hw->os, "REG_VFI format failure\n");
10652 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10656 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
10657 ocs_log_err(hw->os, "REG_VFI command failure\n");
10658 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10662 case OCS_EVT_RESPONSE:
10663 ocs_sm_transition(ctx, __ocs_hw_domain_attached, data);
10665 case OCS_EVT_ERROR:
10666 ocs_sm_transition(ctx, __ocs_hw_domain_attach_report_fail, data);
10669 __ocs_hw_domain_common(__func__, ctx, evt, data);
10677 __ocs_hw_domain_allocated(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10679 ocs_domain_t *domain = ctx->app;
10680 ocs_hw_t *hw = domain->hw;
10685 case OCS_EVT_ENTER:
10686 /* free mailbox buffer and send alloc ok to physical sport */
10687 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10688 ocs_sm_post_event(&domain->sport->ctx, OCS_EVT_HW_PORT_ALLOC_OK, NULL);
10690 ocs_hw_domain_add(hw, domain);
10692 /* now inform registered callbacks */
10693 if (hw->callback.domain != NULL) {
10694 hw->callback.domain(hw->args.domain,
10695 OCS_HW_DOMAIN_ALLOC_OK,
10699 case OCS_EVT_HW_DOMAIN_REQ_ATTACH:
10700 ocs_sm_transition(ctx, __ocs_hw_domain_attach_reg_vfi, data);
10702 case OCS_EVT_HW_DOMAIN_REQ_FREE:
10703 /* unreg_fcfi/vfi */
10704 if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) {
10705 ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_fcfi, NULL);
10707 ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_vfi, NULL);
10711 __ocs_hw_domain_common(__func__, ctx, evt, data);
10719 __ocs_hw_domain_alloc_read_sparm64(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10721 ocs_domain_t *domain = ctx->app;
10722 ocs_hw_t *hw = domain->hw;
10727 case OCS_EVT_ENTER:
10728 if (0 == sli_cmd_read_sparm64(&hw->sli, data, SLI4_BMBX_SIZE,
10729 &domain->dma, SLI4_READ_SPARM64_VPI_DEFAULT)) {
10730 ocs_log_err(hw->os, "READ_SPARM64 format failure\n");
10731 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10735 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
10736 ocs_log_err(hw->os, "READ_SPARM64 command failure\n");
10737 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10743 case OCS_EVT_RESPONSE:
10744 ocs_display_sparams(domain->display_name, "domain sparm64", 0, NULL, domain->dma.virt);
10746 ocs_sm_transition(ctx, __ocs_hw_domain_allocated, data);
10748 case OCS_EVT_ERROR:
10749 ocs_sm_transition(ctx, __ocs_hw_domain_alloc_report_fail, data);
10752 __ocs_hw_domain_common(__func__, ctx, evt, data);
10760 __ocs_hw_domain_alloc_init_vfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10762 ocs_domain_t *domain = ctx->app;
10763 ocs_sli_port_t *sport = domain->sport;
10764 ocs_hw_t *hw = domain->hw;
10769 case OCS_EVT_ENTER:
10770 if (0 == sli_cmd_init_vfi(&hw->sli, data, SLI4_BMBX_SIZE, domain->indicator,
10771 domain->fcf_indicator, sport->indicator)) {
10772 ocs_log_err(hw->os, "INIT_VFI format failure\n");
10773 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10776 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
10777 ocs_log_err(hw->os, "INIT_VFI command failure\n");
10778 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10784 case OCS_EVT_RESPONSE:
10785 ocs_sm_transition(ctx, __ocs_hw_domain_alloc_read_sparm64, data);
10787 case OCS_EVT_ERROR:
10788 ocs_sm_transition(ctx, __ocs_hw_domain_alloc_report_fail, data);
10791 __ocs_hw_domain_common(__func__, ctx, evt, data);
10799 __ocs_hw_domain_alloc_reg_fcfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10801 ocs_domain_t *domain = ctx->app;
10802 ocs_hw_t *hw = domain->hw;
10807 case OCS_EVT_ENTER: {
10808 sli4_cmd_rq_cfg_t rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG];
10811 /* Set the filter match/mask values from hw's filter_def values */
10812 for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
10813 rq_cfg[i].rq_id = 0xffff;
10814 rq_cfg[i].r_ctl_mask = (uint8_t) hw->config.filter_def[i];
10815 rq_cfg[i].r_ctl_match = (uint8_t) (hw->config.filter_def[i] >> 8);
10816 rq_cfg[i].type_mask = (uint8_t) (hw->config.filter_def[i] >> 16);
10817 rq_cfg[i].type_match = (uint8_t) (hw->config.filter_def[i] >> 24);
10820 /* Set the rq_id for each, in order of RQ definition */
10821 for (i = 0; i < hw->hw_rq_count; i++) {
10822 if (i >= ARRAY_SIZE(rq_cfg)) {
10823 ocs_log_warn(hw->os, "more RQs than REG_FCFI filter entries\n");
10826 rq_cfg[i].rq_id = hw->hw_rq[i]->hdr->id;
10830 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10834 if (hw->hw_mrq_count) {
10835 if (OCS_HW_RTN_SUCCESS != ocs_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_FCFI_MODE,
10836 domain->vlan_id, domain->fcf)) {
10837 ocs_log_err(hw->os, "REG_FCFI_MRQ format failure\n");
10838 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10843 if (0 == sli_cmd_reg_fcfi(&hw->sli, data, SLI4_BMBX_SIZE, domain->fcf,
10844 rq_cfg, domain->vlan_id)) {
10845 ocs_log_err(hw->os, "REG_FCFI format failure\n");
10846 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10851 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
10852 ocs_log_err(hw->os, "REG_FCFI command failure\n");
10853 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10860 case OCS_EVT_RESPONSE:
10862 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10866 domain->fcf_indicator = ((sli4_cmd_reg_fcfi_t *)data)->fcfi;
10869 * IF_TYPE 0 devices do not support explicit VFI and VPI initialization
10870 * and instead rely on implicit initialization during VFI registration.
10871 * Short circuit normal processing here for those devices.
10873 if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) {
10874 ocs_sm_transition(ctx, __ocs_hw_domain_alloc_read_sparm64, data);
10876 ocs_sm_transition(ctx, __ocs_hw_domain_alloc_init_vfi, data);
10879 case OCS_EVT_ERROR:
10880 ocs_sm_transition(ctx, __ocs_hw_domain_alloc_report_fail, data);
10883 __ocs_hw_domain_common(__func__, ctx, evt, data);
10891 __ocs_hw_domain_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10893 ocs_domain_t *domain = ctx->app;
10894 ocs_hw_t *hw = domain->hw;
10899 case OCS_EVT_ENTER:
10900 if (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC) {
10902 * For FC, the HW alread registered a FCFI
10903 * Copy FCF information into the domain and jump to INIT_VFI
10905 domain->fcf_indicator = hw->fcf_indicator;
10906 ocs_sm_transition(&domain->sm, __ocs_hw_domain_alloc_init_vfi, data);
10908 ocs_sm_transition(&domain->sm, __ocs_hw_domain_alloc_reg_fcfi, data);
10912 __ocs_hw_domain_common(__func__, ctx, evt, data);
10920 __ocs_hw_domain_free_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10922 ocs_domain_t *domain = ctx->app;
10927 case OCS_EVT_ENTER:
10928 if (domain != NULL) {
10929 ocs_hw_t *hw = domain->hw;
10931 ocs_hw_domain_del(hw, domain);
10933 if (hw->callback.domain != NULL) {
10934 hw->callback.domain(hw->args.domain,
10935 OCS_HW_DOMAIN_FREE_FAIL,
10940 /* free command buffer */
10941 if (data != NULL) {
10942 ocs_free(domain != NULL ? domain->hw->os : NULL, data, SLI4_BMBX_SIZE);
10948 __ocs_hw_domain_common(__func__, ctx, evt, data);
10956 __ocs_hw_domain_freed(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10958 ocs_domain_t *domain = ctx->app;
10963 case OCS_EVT_ENTER:
10964 /* Free DMA and mailbox buffer */
10965 if (domain != NULL) {
10966 ocs_hw_t *hw = domain->hw;
10968 /* free VFI resource */
10969 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI,
10970 domain->indicator);
10972 ocs_hw_domain_del(hw, domain);
10974 /* inform registered callbacks */
10975 if (hw->callback.domain != NULL) {
10976 hw->callback.domain(hw->args.domain,
10977 OCS_HW_DOMAIN_FREE_OK,
10981 if (data != NULL) {
10982 ocs_free(NULL, data, SLI4_BMBX_SIZE);
10988 __ocs_hw_domain_common(__func__, ctx, evt, data);
10996 __ocs_hw_domain_free_redisc_fcf(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10998 ocs_domain_t *domain = ctx->app;
10999 ocs_hw_t *hw = domain->hw;
11004 case OCS_EVT_ENTER:
11005 /* if we're in the middle of a teardown, skip sending rediscover */
11006 if (hw->state == OCS_HW_STATE_TEARDOWN_IN_PROGRESS) {
11007 ocs_sm_transition(ctx, __ocs_hw_domain_freed, data);
11010 if (0 == sli_cmd_fcoe_rediscover_fcf(&hw->sli, data, SLI4_BMBX_SIZE, domain->fcf)) {
11011 ocs_log_err(hw->os, "REDISCOVER_FCF format failure\n");
11012 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11016 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
11017 ocs_log_err(hw->os, "REDISCOVER_FCF command failure\n");
11018 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11021 case OCS_EVT_RESPONSE:
11022 case OCS_EVT_ERROR:
11023 /* REDISCOVER_FCF can fail if none exist */
11024 ocs_sm_transition(ctx, __ocs_hw_domain_freed, data);
11029 __ocs_hw_domain_common(__func__, ctx, evt, data);
11037 __ocs_hw_domain_free_unreg_fcfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
11039 ocs_domain_t *domain = ctx->app;
11040 ocs_hw_t *hw = domain->hw;
11045 case OCS_EVT_ENTER:
11046 if (data == NULL) {
11047 data = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
11049 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11054 if (0 == sli_cmd_unreg_fcfi(&hw->sli, data, SLI4_BMBX_SIZE, domain->fcf_indicator)) {
11055 ocs_log_err(hw->os, "UNREG_FCFI format failure\n");
11056 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
11057 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11061 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
11062 ocs_log_err(hw->os, "UNREG_FCFI command failure\n");
11063 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
11064 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11068 case OCS_EVT_RESPONSE:
11069 if (domain->req_rediscover_fcf) {
11070 domain->req_rediscover_fcf = FALSE;
11071 ocs_sm_transition(ctx, __ocs_hw_domain_free_redisc_fcf, data);
11073 ocs_sm_transition(ctx, __ocs_hw_domain_freed, data);
11076 case OCS_EVT_ERROR:
11077 ocs_sm_transition(ctx, __ocs_hw_domain_free_report_fail, data);
11082 __ocs_hw_domain_common(__func__, ctx, evt, data);
11090 __ocs_hw_domain_free_unreg_vfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
11092 ocs_domain_t *domain = ctx->app;
11093 ocs_hw_t *hw = domain->hw;
11094 uint8_t is_fc = FALSE;
11098 is_fc = (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC);
11101 case OCS_EVT_ENTER:
11102 if (data == NULL) {
11103 data = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
11105 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11110 if (0 == sli_cmd_unreg_vfi(&hw->sli, data, SLI4_BMBX_SIZE, domain,
11111 SLI4_UNREG_TYPE_DOMAIN)) {
11112 ocs_log_err(hw->os, "UNREG_VFI format failure\n");
11113 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
11114 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11118 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
11119 ocs_log_err(hw->os, "UNREG_VFI command failure\n");
11120 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
11121 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11125 case OCS_EVT_ERROR:
11127 ocs_sm_transition(ctx, __ocs_hw_domain_free_report_fail, data);
11129 ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_fcfi, data);
11132 case OCS_EVT_RESPONSE:
11134 ocs_sm_transition(ctx, __ocs_hw_domain_freed, data);
11136 ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_fcfi, data);
11140 __ocs_hw_domain_common(__func__, ctx, evt, data);
11147 /* callback for domain alloc/attach/free */
11149 __ocs_hw_domain_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
11151 ocs_domain_t *domain = arg;
11152 sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe;
11153 ocs_sm_event_t evt;
11155 if (status || hdr->status) {
11156 ocs_log_debug(hw->os, "bad status vfi=%#x st=%x hdr=%x\n",
11157 domain->indicator, status, hdr->status);
11158 evt = OCS_EVT_ERROR;
11160 evt = OCS_EVT_RESPONSE;
11163 ocs_sm_post_event(&domain->sm, evt, mqe);
11169 target_wqe_timer_nop_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
11171 ocs_hw_io_t *io = NULL;
11172 ocs_hw_io_t *io_next = NULL;
11173 uint64_t ticks_current = ocs_get_os_ticks();
11174 uint32_t sec_elapsed;
11177 sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe;
11179 if (status || hdr->status) {
11180 ocs_log_debug(hw->os, "bad status st=%x hdr=%x\n",
11181 status, hdr->status);
11182 /* go ahead and proceed with wqe timer checks... */
11185 /* loop through active WQE list and check for timeouts */
11186 ocs_lock(&hw->io_lock);
11187 ocs_list_foreach_safe(&hw->io_timed_wqe, io, io_next) {
11188 sec_elapsed = ((ticks_current - io->submit_ticks) / ocs_get_os_tick_freq());
11191 * If elapsed time > timeout, abort it. No need to check type since
11192 * it wouldn't be on this list unless it was a target WQE
11194 if (sec_elapsed > io->tgt_wqe_timeout) {
11195 ocs_log_test(hw->os, "IO timeout xri=0x%x tag=0x%x type=%d\n",
11196 io->indicator, io->reqtag, io->type);
11198 /* remove from active_wqe list so won't try to abort again */
11199 ocs_list_remove(&hw->io_timed_wqe, io);
11201 /* save status of "timed out" for when abort completes */
11202 io->status_saved = 1;
11203 io->saved_status = SLI4_FC_WCQE_STATUS_TARGET_WQE_TIMEOUT;
11207 /* now abort outstanding IO */
11208 rc = ocs_hw_io_abort(hw, io, FALSE, NULL, NULL);
11210 ocs_log_test(hw->os,
11211 "abort failed xri=%#x tag=%#x rc=%d\n",
11212 io->indicator, io->reqtag, rc);
11216 * need to go through entire list since each IO could have a
11217 * different timeout value
11220 ocs_unlock(&hw->io_lock);
11222 /* if we're not in the middle of shutting down, schedule next timer */
11223 if (!hw->active_wqe_timer_shutdown) {
11224 ocs_setup_timer(hw->os, &hw->wqe_timer, target_wqe_timer_cb, hw, OCS_HW_WQ_TIMER_PERIOD_MS);
11226 hw->in_active_wqe_timer = FALSE;
11231 target_wqe_timer_cb(void *arg)
11233 ocs_hw_t *hw = (ocs_hw_t *)arg;
11235 /* delete existing timer; will kick off new timer after checking wqe timeouts */
11236 hw->in_active_wqe_timer = TRUE;
11237 ocs_del_timer(&hw->wqe_timer);
11239 /* Forward timer callback to execute in the mailbox completion processing context */
11240 if (ocs_hw_async_call(hw, target_wqe_timer_nop_cb, hw)) {
11241 ocs_log_test(hw->os, "ocs_hw_async_call failed\n");
11246 shutdown_target_wqe_timer(ocs_hw_t *hw)
11248 uint32_t iters = 100;
11250 if (hw->config.emulate_tgt_wqe_timeout) {
11251 /* request active wqe timer shutdown, then wait for it to complete */
11252 hw->active_wqe_timer_shutdown = TRUE;
11254 /* delete WQE timer and wait for timer handler to complete (if necessary) */
11255 ocs_del_timer(&hw->wqe_timer);
11257 /* now wait for timer handler to complete (if necessary) */
11258 while (hw->in_active_wqe_timer && iters) {
11260 * if we happen to have just sent NOP mailbox command, make sure
11261 * completions are being processed
11268 ocs_log_test(hw->os, "Failed to shutdown active wqe timer\n");
11274 * @brief Determine if HW IO is owned by the port.
11277 * Determines if the given HW IO has been posted to the chip.
11279 * @param hw Hardware context allocated by the caller.
11282 * @return Returns TRUE if given HW IO is port-owned.
11285 ocs_hw_is_io_port_owned(ocs_hw_t *hw, ocs_hw_io_t *io)
11287 /* Check to see if this is a port owned XRI */
11288 return io->is_port_owned;
11292 * @brief Return TRUE if exchange is port-owned.
11295 * Test to see if the xri is a port-owned xri.
11297 * @param hw Hardware context.
11298 * @param xri Exchange indicator.
11300 * @return Returns TRUE if XRI is a port owned XRI.
11304 ocs_hw_is_xri_port_owned(ocs_hw_t *hw, uint32_t xri)
11306 ocs_hw_io_t *io = ocs_hw_io_lookup(hw, xri);
11307 return (io == NULL ? FALSE : io->is_port_owned);
11311 * @brief Returns an XRI from the port owned list to the host.
11314 * Used when the POST_XRI command fails as well as when the RELEASE_XRI completes.
11316 * @param hw Hardware context.
11317 * @param xri_base The starting XRI number.
11318 * @param xri_count The number of XRIs to free from the base.
11321 ocs_hw_reclaim_xri(ocs_hw_t *hw, uint16_t xri_base, uint16_t xri_count)
11326 for (i = 0; i < xri_count; i++) {
11327 io = ocs_hw_io_lookup(hw, xri_base + i);
11330 * if this is an auto xfer rdy XRI, then we need to release any
11331 * buffer attached to the XRI before moving the XRI back to the free pool.
11333 if (hw->auto_xfer_rdy_enabled) {
11334 ocs_hw_rqpair_auto_xfer_rdy_move_to_host(hw, io);
11337 ocs_lock(&hw->io_lock);
11338 ocs_list_remove(&hw->io_port_owned, io);
11339 io->is_port_owned = 0;
11340 ocs_list_add_tail(&hw->io_free, io);
11341 ocs_unlock(&hw->io_lock);
11346 * @brief Called when the POST_XRI command completes.
11349 * Free the mailbox command buffer and reclaim the XRIs on failure.
11351 * @param hw Hardware context.
11352 * @param status Status field from the mbox completion.
11353 * @param mqe Mailbox response structure.
11354 * @param arg Pointer to a callback function that signals the caller that the command is done.
11356 * @return Returns 0.
11359 ocs_hw_cb_post_xri(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
11361 sli4_cmd_post_xri_t *post_xri = (sli4_cmd_post_xri_t*)mqe;
11363 /* Reclaim the XRIs as host owned if the command fails */
11365 ocs_log_debug(hw->os, "Status 0x%x for XRI base 0x%x, cnt =x%x\n",
11366 status, post_xri->xri_base, post_xri->xri_count);
11367 ocs_hw_reclaim_xri(hw, post_xri->xri_base, post_xri->xri_count);
11370 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
11375 * @brief Issues a mailbox command to move XRIs from the host-controlled pool to the port.
11377 * @param hw Hardware context.
11378 * @param xri_start The starting XRI to post.
11379 * @param num_to_post The number of XRIs to post.
11381 * @return Returns OCS_HW_RTN_NO_MEMORY, OCS_HW_RTN_ERROR, or OCS_HW_RTN_SUCCESS.
11384 static ocs_hw_rtn_e
11385 ocs_hw_post_xri(ocs_hw_t *hw, uint32_t xri_start, uint32_t num_to_post)
11388 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
11390 /* Since we need to allocate for mailbox queue, just always allocate */
11391 post_xri = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
11392 if (post_xri == NULL) {
11393 ocs_log_err(hw->os, "no buffer for command\n");
11394 return OCS_HW_RTN_NO_MEMORY;
11397 /* Register the XRIs */
11398 if (sli_cmd_post_xri(&hw->sli, post_xri, SLI4_BMBX_SIZE,
11399 xri_start, num_to_post)) {
11400 rc = ocs_hw_command(hw, post_xri, OCS_CMD_NOWAIT, ocs_hw_cb_post_xri, NULL);
11401 if (rc != OCS_HW_RTN_SUCCESS) {
11402 ocs_free(hw->os, post_xri, SLI4_BMBX_SIZE);
11403 ocs_log_err(hw->os, "post_xri failed\n");
11410 * @brief Move XRIs from the host-controlled pool to the port.
11413 * Removes IOs from the free list and moves them to the port.
11415 * @param hw Hardware context.
11416 * @param num_xri The number of XRIs being requested to move to the chip.
11418 * @return Returns the number of XRIs that were moved.
11422 ocs_hw_xri_move_to_port_owned(ocs_hw_t *hw, uint32_t num_xri)
11426 uint32_t num_posted = 0;
11429 * Note: We cannot use ocs_hw_io_alloc() because that would place the
11430 * IO on the io_inuse list. We need to move from the io_free to
11431 * the io_port_owned list.
11433 ocs_lock(&hw->io_lock);
11435 for (i = 0; i < num_xri; i++) {
11436 if (NULL != (io = ocs_list_remove_head(&hw->io_free))) {
11440 * if this is an auto xfer rdy XRI, then we need to attach a
11441 * buffer to the XRI before submitting it to the chip. If a
11442 * buffer is unavailable, then we cannot post it, so return it
11443 * to the free pool.
11445 if (hw->auto_xfer_rdy_enabled) {
11446 /* Note: uses the IO lock to get the auto xfer rdy buffer */
11447 ocs_unlock(&hw->io_lock);
11448 rc = ocs_hw_rqpair_auto_xfer_rdy_move_to_port(hw, io);
11449 ocs_lock(&hw->io_lock);
11450 if (rc != OCS_HW_RTN_SUCCESS) {
11451 ocs_list_add_head(&hw->io_free, io);
11455 ocs_lock_init(hw->os, &io->axr_lock, "HW_axr_lock[%d]", io->indicator);
11456 io->is_port_owned = 1;
11457 ocs_list_add_tail(&hw->io_port_owned, io);
11460 if (ocs_hw_post_xri(hw, io->indicator, 1) != OCS_HW_RTN_SUCCESS ) {
11461 ocs_hw_reclaim_xri(hw, io->indicator, i);
11466 /* no more free XRIs */
11470 ocs_unlock(&hw->io_lock);
11476 * @brief Called when the RELEASE_XRI command completes.
11479 * Move the IOs back to the free pool on success.
11481 * @param hw Hardware context.
11482 * @param status Status field from the mbox completion.
11483 * @param mqe Mailbox response structure.
11484 * @param arg Pointer to a callback function that signals the caller that the command is done.
11486 * @return Returns 0.
11489 ocs_hw_cb_release_xri(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
11491 sli4_cmd_release_xri_t *release_xri = (sli4_cmd_release_xri_t*)mqe;
11494 /* Reclaim the XRIs as host owned if the command fails */
11496 ocs_log_err(hw->os, "Status 0x%x\n", status);
11498 for (i = 0; i < release_xri->released_xri_count; i++) {
11499 uint16_t xri = ((i & 1) == 0 ? release_xri->xri_tbl[i/2].xri_tag0 :
11500 release_xri->xri_tbl[i/2].xri_tag1);
11501 ocs_hw_reclaim_xri(hw, xri, 1);
11505 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
11510 * @brief Move XRIs from the port-controlled pool to the host.
11512 * Requests XRIs from the FW to return to the host-owned pool.
11514 * @param hw Hardware context.
11515 * @param num_xri The number of XRIs being requested to moved from the chip.
11517 * @return Returns 0 for success, or a negative error code value for failure.
11521 ocs_hw_xri_move_to_host_owned(ocs_hw_t *hw, uint8_t num_xri)
11523 uint8_t *release_xri;
11524 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
11526 /* non-local buffer required for mailbox queue */
11527 release_xri = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
11528 if (release_xri == NULL) {
11529 ocs_log_err(hw->os, "no buffer for command\n");
11530 return OCS_HW_RTN_NO_MEMORY;
11533 /* release the XRIs */
11534 if (sli_cmd_release_xri(&hw->sli, release_xri, SLI4_BMBX_SIZE, num_xri)) {
11535 rc = ocs_hw_command(hw, release_xri, OCS_CMD_NOWAIT, ocs_hw_cb_release_xri, NULL);
11536 if (rc != OCS_HW_RTN_SUCCESS) {
11537 ocs_log_err(hw->os, "release_xri failed\n");
11540 /* If we are polling or an error occurred, then free the mailbox buffer */
11541 if (release_xri != NULL && rc != OCS_HW_RTN_SUCCESS) {
11542 ocs_free(hw->os, release_xri, SLI4_BMBX_SIZE);
11548 * @brief Allocate an ocs_hw_rx_buffer_t array.
11551 * An ocs_hw_rx_buffer_t array is allocated, along with the required DMA memory.
11553 * @param hw Pointer to HW object.
11554 * @param rqindex RQ index for this buffer.
11555 * @param count Count of buffers in array.
11556 * @param size Size of buffer.
11558 * @return Returns the pointer to the allocated ocs_hw_rq_buffer_t array.
11560 static ocs_hw_rq_buffer_t *
11561 ocs_hw_rx_buffer_alloc(ocs_hw_t *hw, uint32_t rqindex, uint32_t count, uint32_t size)
11563 ocs_t *ocs = hw->os;
11564 ocs_hw_rq_buffer_t *rq_buf = NULL;
11565 ocs_hw_rq_buffer_t *prq;
11569 rq_buf = ocs_malloc(hw->os, sizeof(*rq_buf) * count, OCS_M_NOWAIT | OCS_M_ZERO);
11570 if (rq_buf == NULL) {
11571 ocs_log_err(hw->os, "Failure to allocate unsolicited DMA trackers\n");
11575 for (i = 0, prq = rq_buf; i < count; i ++, prq++) {
11576 prq->rqindex = rqindex;
11577 if (ocs_dma_alloc(ocs, &prq->dma, size, OCS_MIN_DMA_ALIGNMENT)) {
11578 ocs_log_err(hw->os, "DMA allocation failed\n");
11579 ocs_free(hw->os, rq_buf, sizeof(*rq_buf) * count);
11589 * @brief Free an ocs_hw_rx_buffer_t array.
11592 * The ocs_hw_rx_buffer_t array is freed, along with allocated DMA memory.
11594 * @param hw Pointer to HW object.
11595 * @param rq_buf Pointer to ocs_hw_rx_buffer_t array.
11596 * @param count Count of buffers in array.
11601 ocs_hw_rx_buffer_free(ocs_hw_t *hw, ocs_hw_rq_buffer_t *rq_buf, uint32_t count)
11603 ocs_t *ocs = hw->os;
11605 ocs_hw_rq_buffer_t *prq;
11607 if (rq_buf != NULL) {
11608 for (i = 0, prq = rq_buf; i < count; i++, prq++) {
11609 ocs_dma_free(ocs, &prq->dma);
11611 ocs_free(hw->os, rq_buf, sizeof(*rq_buf) * count);
11616 * @brief Allocate the RQ data buffers.
11618 * @param hw Pointer to HW object.
11620 * @return Returns 0 on success, or a non-zero value on failure.
11623 ocs_hw_rx_allocate(ocs_hw_t *hw)
11625 ocs_t *ocs = hw->os;
11627 int32_t rc = OCS_HW_RTN_SUCCESS;
11628 uint32_t rqindex = 0;
11630 uint32_t hdr_size = OCS_HW_RQ_SIZE_HDR;
11631 uint32_t payload_size = hw->config.rq_default_buffer_size;
11635 for (i = 0; i < hw->hw_rq_count; i++) {
11638 /* Allocate header buffers */
11639 rq->hdr_buf = ocs_hw_rx_buffer_alloc(hw, rqindex, rq->entry_count, hdr_size);
11640 if (rq->hdr_buf == NULL) {
11641 ocs_log_err(ocs, "ocs_hw_rx_buffer_alloc hdr_buf failed\n");
11642 rc = OCS_HW_RTN_ERROR;
11646 ocs_log_debug(hw->os, "rq[%2d] rq_id %02d header %4d by %4d bytes\n", i, rq->hdr->id,
11647 rq->entry_count, hdr_size);
11651 /* Allocate payload buffers */
11652 rq->payload_buf = ocs_hw_rx_buffer_alloc(hw, rqindex, rq->entry_count, payload_size);
11653 if (rq->payload_buf == NULL) {
11654 ocs_log_err(ocs, "ocs_hw_rx_buffer_alloc fb_buf failed\n");
11655 rc = OCS_HW_RTN_ERROR;
11658 ocs_log_debug(hw->os, "rq[%2d] rq_id %02d default %4d by %4d bytes\n", i, rq->data->id,
11659 rq->entry_count, payload_size);
11663 return rc ? OCS_HW_RTN_ERROR : OCS_HW_RTN_SUCCESS;
11667 * @brief Post the RQ data buffers to the chip.
11669 * @param hw Pointer to HW object.
11671 * @return Returns 0 on success, or a non-zero value on failure.
11674 ocs_hw_rx_post(ocs_hw_t *hw)
11682 * In RQ pair mode, we MUST post the header and payload buffer at the
11685 for (rq_idx = 0, idx = 0; rq_idx < hw->hw_rq_count; rq_idx++) {
11686 hw_rq_t *rq = hw->hw_rq[rq_idx];
11688 for (i = 0; i < rq->entry_count-1; i++) {
11689 ocs_hw_sequence_t *seq = ocs_array_get(hw->seq_pool, idx++);
11690 ocs_hw_assert(seq != NULL);
11692 seq->header = &rq->hdr_buf[i];
11694 seq->payload = &rq->payload_buf[i];
11696 rc = ocs_hw_sequence_free(hw, seq);
11710 * @brief Free the RQ data buffers.
11712 * @param hw Pointer to HW object.
11716 ocs_hw_rx_free(ocs_hw_t *hw)
11721 /* Free hw_rq buffers */
11722 for (i = 0; i < hw->hw_rq_count; i++) {
11725 ocs_hw_rx_buffer_free(hw, rq->hdr_buf, rq->entry_count);
11726 rq->hdr_buf = NULL;
11727 ocs_hw_rx_buffer_free(hw, rq->payload_buf, rq->entry_count);
11728 rq->payload_buf = NULL;
11734 * @brief HW async call context structure.
11737 ocs_hw_async_cb_t callback;
11739 uint8_t cmd[SLI4_BMBX_SIZE];
11740 } ocs_hw_async_call_ctx_t;
11743 * @brief HW async callback handler
11746 * This function is called when the NOP mailbox command completes. The callback stored
11747 * in the requesting context is invoked.
11749 * @param hw Pointer to HW object.
11750 * @param status Completion status.
11751 * @param mqe Pointer to mailbox completion queue entry.
11752 * @param arg Caller-provided argument.
11757 ocs_hw_async_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
11759 ocs_hw_async_call_ctx_t *ctx = arg;
11762 if (ctx->callback != NULL) {
11763 (*ctx->callback)(hw, status, mqe, ctx->arg);
11765 ocs_free(hw->os, ctx, sizeof(*ctx));
11770 * @brief Make an async callback using NOP mailbox command
11773 * Post a NOP mailbox command; the callback with argument is invoked upon completion
11774 * while in the event processing context.
11776 * @param hw Pointer to HW object.
11777 * @param callback Pointer to callback function.
11778 * @param arg Caller-provided callback.
11780 * @return Returns 0 on success, or a negative error code value on failure.
11783 ocs_hw_async_call(ocs_hw_t *hw, ocs_hw_async_cb_t callback, void *arg)
11785 ocs_hw_async_call_ctx_t *ctx;
11788 * Allocate a callback context (which includes the mailbox command buffer), we need
11789 * this to be persistent as the mailbox command submission may be queued and executed later
11792 ctx = ocs_malloc(hw->os, sizeof(*ctx), OCS_M_ZERO | OCS_M_NOWAIT);
11794 ocs_log_err(hw->os, "failed to malloc async call context\n");
11795 return OCS_HW_RTN_NO_MEMORY;
11797 ctx->callback = callback;
11800 /* Build and send a NOP mailbox command */
11801 if (sli_cmd_common_nop(&hw->sli, ctx->cmd, sizeof(ctx->cmd), 0) == 0) {
11802 ocs_log_err(hw->os, "COMMON_NOP format failure\n");
11803 ocs_free(hw->os, ctx, sizeof(*ctx));
11804 return OCS_HW_RTN_ERROR;
11807 if (ocs_hw_command(hw, ctx->cmd, OCS_CMD_NOWAIT, ocs_hw_async_cb, ctx)) {
11808 ocs_log_err(hw->os, "COMMON_NOP command failure\n");
11809 ocs_free(hw->os, ctx, sizeof(*ctx));
11810 return OCS_HW_RTN_ERROR;
11812 return OCS_HW_RTN_SUCCESS;
11816 * @brief Initialize the reqtag pool.
11819 * The WQ request tag pool is initialized.
11821 * @param hw Pointer to HW object.
11823 * @return Returns 0 on success, or a negative error code value on failure.
11826 ocs_hw_reqtag_init(ocs_hw_t *hw)
11828 if (hw->wq_reqtag_pool == NULL) {
11829 hw->wq_reqtag_pool = ocs_pool_alloc(hw->os, sizeof(hw_wq_callback_t), 65536, TRUE);
11830 if (hw->wq_reqtag_pool == NULL) {
11831 ocs_log_err(hw->os, "ocs_pool_alloc hw_wq_callback_t failed\n");
11832 return OCS_HW_RTN_NO_MEMORY;
11835 ocs_hw_reqtag_reset(hw);
11836 return OCS_HW_RTN_SUCCESS;
11840 * @brief Allocate a WQ request tag.
11842 * Allocate and populate a WQ request tag from the WQ request tag pool.
11844 * @param hw Pointer to HW object.
11845 * @param callback Callback function.
11846 * @param arg Pointer to callback argument.
11848 * @return Returns pointer to allocated WQ request tag, or NULL if object cannot be allocated.
11851 ocs_hw_reqtag_alloc(ocs_hw_t *hw, void (*callback)(void *arg, uint8_t *cqe, int32_t status), void *arg)
11853 hw_wq_callback_t *wqcb;
11855 ocs_hw_assert(callback != NULL);
11857 wqcb = ocs_pool_get(hw->wq_reqtag_pool);
11858 if (wqcb != NULL) {
11859 ocs_hw_assert(wqcb->callback == NULL);
11860 wqcb->callback = callback;
11867 * @brief Free a WQ request tag.
11869 * Free the passed in WQ request tag.
11871 * @param hw Pointer to HW object.
11872 * @param wqcb Pointer to WQ request tag object to free.
11877 ocs_hw_reqtag_free(ocs_hw_t *hw, hw_wq_callback_t *wqcb)
11879 ocs_hw_assert(wqcb->callback != NULL);
11880 wqcb->callback = NULL;
11882 ocs_pool_put(hw->wq_reqtag_pool, wqcb);
11886 * @brief Return WQ request tag by index.
11889 * Return pointer to WQ request tag object given an index.
11891 * @param hw Pointer to HW object.
11892 * @param instance_index Index of WQ request tag to return.
11894 * @return Pointer to WQ request tag, or NULL.
11897 ocs_hw_reqtag_get_instance(ocs_hw_t *hw, uint32_t instance_index)
11899 hw_wq_callback_t *wqcb;
11901 wqcb = ocs_pool_get_instance(hw->wq_reqtag_pool, instance_index);
11902 if (wqcb == NULL) {
11903 ocs_log_err(hw->os, "wqcb for instance %d is null\n", instance_index);
11909 * @brief Reset the WQ request tag pool.
11912 * Reset the WQ request tag pool, returning all to the free list.
11914 * @param hw pointer to HW object.
11919 ocs_hw_reqtag_reset(ocs_hw_t *hw)
11921 hw_wq_callback_t *wqcb;
11924 /* Remove all from freelist */
11925 while(ocs_pool_get(hw->wq_reqtag_pool) != NULL) {
11929 /* Put them all back */
11930 for (i = 0; ((wqcb = ocs_pool_get_instance(hw->wq_reqtag_pool, i)) != NULL); i++) {
11931 wqcb->instance_index = i;
11932 wqcb->callback = NULL;
11934 ocs_pool_put(hw->wq_reqtag_pool, wqcb);
11939 * @brief Handle HW assertion
11941 * HW assert, display diagnostic message, and abort.
11943 * @param cond string describing failing assertion condition
11944 * @param filename file name
11945 * @param linenum line number
11950 _ocs_hw_assert(const char *cond, const char *filename, int linenum)
11952 ocs_printf("%s(%d): HW assertion (%s) failed\n", filename, linenum, cond);
11958 * @brief Handle HW verify
11960 * HW verify, display diagnostic message, dump stack and return.
11962 * @param cond string describing failing verify condition
11963 * @param filename file name
11964 * @param linenum line number
11969 _ocs_hw_verify(const char *cond, const char *filename, int linenum)
11971 ocs_printf("%s(%d): HW verify (%s) failed\n", filename, linenum, cond);
11981 * @param hw Pointer to HW object.
11982 * @param io Pointer to HW IO
11984 * @return Return 0 if successful else returns -1
11987 ocs_hw_reque_xri( ocs_hw_t *hw, ocs_hw_io_t *io )
11991 rc = ocs_hw_rqpair_auto_xfer_rdy_buffer_post(hw, io, 1);
11993 ocs_list_add_tail(&hw->io_port_dnrx, io);
11995 goto exit_ocs_hw_reque_xri;
11998 io->auto_xfer_rdy_dnrx = 0;
11999 io->type = OCS_HW_IO_DNRX_REQUEUE;
12000 if (sli_requeue_xri_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->indicator, OCS_HW_REQUE_XRI_REGTAG, SLI4_CQ_DEFAULT)) {
12001 /* Clear buffer from XRI */
12002 ocs_pool_put(hw->auto_xfer_rdy_buf_pool, io->axr_buf);
12003 io->axr_buf = NULL;
12005 ocs_log_err(hw->os, "requeue_xri WQE error\n");
12006 ocs_list_add_tail(&hw->io_port_dnrx, io);
12009 goto exit_ocs_hw_reque_xri;
12012 if (io->wq == NULL) {
12013 io->wq = ocs_hw_queue_next_wq(hw, io);
12014 ocs_hw_assert(io->wq != NULL);
12018 * Add IO to active io wqe list before submitting, in case the
12019 * wcqe processing preempts this thread.
12021 OCS_STAT(hw->tcmd_wq_submit[io->wq->instance]++);
12022 OCS_STAT(io->wq->use_count++);
12024 rc = hw_wq_write(io->wq, &io->wqe);
12026 ocs_log_err(hw->os, "sli_queue_write reque xri failed: %d\n", rc);
12030 exit_ocs_hw_reque_xri:
12035 ocs_hw_get_def_wwn(ocs_t *ocs, uint32_t chan, uint64_t *wwpn, uint64_t *wwnn)
12037 sli4_t *sli4 = &ocs->hw.sli;
12039 uint8_t *payload = NULL;
12041 int indicator = sli4->config.extent[SLI_RSRC_FCOE_VPI].base[0] + chan;
12043 /* allocate memory for the service parameters */
12044 if (ocs_dma_alloc(ocs, &dma, 112, 4)) {
12045 ocs_log_err(ocs, "Failed to allocate DMA memory\n");
12049 if (0 == sli_cmd_read_sparm64(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE,
12050 &dma, indicator)) {
12051 ocs_log_err(ocs, "READ_SPARM64 allocation failure\n");
12052 ocs_dma_free(ocs, &dma);
12056 if (sli_bmbx_command(sli4)) {
12057 ocs_log_err(ocs, "READ_SPARM64 command failure\n");
12058 ocs_dma_free(ocs, &dma);
12062 payload = dma.virt;
12063 ocs_memcpy(wwpn, payload + SLI4_READ_SPARM64_WWPN_OFFSET, sizeof(*wwpn));
12064 ocs_memcpy(wwnn, payload + SLI4_READ_SPARM64_WWNN_OFFSET, sizeof(*wwnn));
12065 ocs_dma_free(ocs, &dma);
12070 * @page fc_hw_api_overview HW APIs
12071 * - @ref devInitShutdown
12078 * <div class="overview">
12079 * The Hardware Abstraction Layer (HW) insulates the higher-level code from the SLI-4
12080 * message details, but the higher level code must still manage domains, ports,
12081 * IT nexuses, and IOs. The HW API is designed to help the higher level manage
12082 * these objects.<br><br>
12084 * The HW uses function callbacks to notify the higher-level code of events
12085 * that are received from the chip. There are currently three types of
12086 * functions that may be registered:
12088 * <ul><li>domain – This function is called whenever a domain event is generated
12089 * within the HW. Examples include a new FCF is discovered, a connection
12090 * to a domain is disrupted, and allocation callbacks.</li>
12091 * <li>unsolicited – This function is called whenever new data is received in
12092 * the SLI-4 receive queue.</li>
12093 * <li>rnode – This function is called for remote node events, such as attach status
12094 * and allocation callbacks.</li></ul>
12096 * Upper layer functions may be registered by using the ocs_hw_callback() function.
12098 * <img src="elx_fc_hw.jpg" alt="FC/FCoE HW" title="FC/FCoE HW" align="right"/>
12099 * <h2>FC/FCoE HW API</h2>
12100 * The FC/FCoE HW component builds upon the SLI-4 component to establish a flexible
12101 * interface for creating the necessary common objects and sending I/Os. It may be used
12102 * “as is” in customer implementations or it can serve as an example of typical interactions
12103 * between a driver and the SLI-4 hardware. The broad categories of functionality include:
12105 * <ul><li>Setting-up and tearing-down of the HW.</li>
12106 * <li>Allocating and using the common objects (SLI Port, domain, remote node).</li>
12107 * <li>Sending and receiving I/Os.</li></ul>
12109 * <h3>HW Setup</h3>
12110 * To set up the HW:
12113 * <li>Set up the HW object using ocs_hw_setup().<br>
12114 * This step performs a basic configuration of the SLI-4 component and the HW to
12115 * enable querying the hardware for its capabilities. At this stage, the HW is not
12116 * capable of general operations (such as, receiving events or sending I/Os).</li><br><br>
12117 * <li>Configure the HW according to the driver requirements.<br>
12118 * The HW provides functions to discover hardware capabilities (ocs_hw_get()), as
12119 * well as configures the amount of resources required (ocs_hw_set()). The driver
12120 * must also register callback functions (ocs_hw_callback()) to receive notification of
12121 * various asynchronous events.<br><br>
12122 * @b Note: Once configured, the driver must initialize the HW (ocs_hw_init()). This
12123 * step creates the underlying queues, commits resources to the hardware, and
12124 * prepares the hardware for operation. While the hardware is operational, the
12125 * port is not online, and cannot send or receive data.</li><br><br>
12127 * <li>Finally, the driver can bring the port online (ocs_hw_port_control()).<br>
12128 * When the link comes up, the HW determines if a domain is present and notifies the
12129 * driver using the domain callback function. This is the starting point of the driver's
12130 * interaction with the common objects.<br><br>
12131 * @b Note: For FCoE, there may be more than one domain available and, therefore,
12132 * more than one callback.</li>
12135 * <h3>Allocating and Using Common Objects</h3>
12136 * Common objects provide a mechanism through which the various OneCore Storage
12137 * driver components share and track information. These data structures are primarily
12138 * used to track SLI component information but can be extended by other components, if
12139 * needed. The main objects are:
12141 * <ul><li>DMA – the ocs_dma_t object describes a memory region suitable for direct
12142 * memory access (DMA) transactions.</li>
12143 * <li>SCSI domain – the ocs_domain_t object represents the SCSI domain, including
12144 * any infrastructure devices such as FC switches and FC forwarders. The domain
12145 * object contains both an FCFI and a VFI.</li>
12146 * <li>SLI Port (sport) – the ocs_sli_port_t object represents the connection between
12147 * the driver and the SCSI domain. The SLI Port object contains a VPI.</li>
12148 * <li>Remote node – the ocs_remote_node_t represents a connection between the SLI
12149 * Port and another device in the SCSI domain. The node object contains an RPI.</li></ul>
12151 * Before the driver can send I/Os, it must allocate the SCSI domain, SLI Port, and remote
12152 * node common objects and establish the connections between them. The goal is to
12153 * connect the driver to the SCSI domain to exchange I/Os with other devices. These
12154 * common object connections are shown in the following figure, FC Driver Common Objects:
12155 * <img src="elx_fc_common_objects.jpg"
12156 * alt="FC Driver Common Objects" title="FC Driver Common Objects" align="center"/>
12158 * The first step is to create a connection to the domain by allocating an SLI Port object.
12159 * The SLI Port object represents a particular FC ID and must be initialized with one. With
12160 * the SLI Port object, the driver can discover the available SCSI domain(s). On identifying
12161 * a domain, the driver allocates a domain object and attaches to it using the previous SLI
12162 * port object.<br><br>
12164 * @b Note: In some cases, the driver may need to negotiate service parameters (that is,
12165 * FLOGI) with the domain before attaching.<br><br>
12167 * Once attached to the domain, the driver can discover and attach to other devices
12168 * (remote nodes). The exact discovery method depends on the driver, but it typically
12169 * includes using a position map, querying the fabric name server, or an out-of-band
12170 * method. In most cases, it is necessary to log in with devices before performing I/Os.
12171 * Prior to sending login-related ELS commands (ocs_hw_srrs_send()), the driver must
12172 * allocate a remote node object (ocs_hw_node_alloc()). If the login negotiation is
12173 * successful, the driver must attach the nodes (ocs_hw_node_attach()) to the SLI Port
12174 * before exchanging FCP I/O.<br><br>
12176 * @b Note: The HW manages both the well known fabric address and the name server as
12177 * nodes in the domain. Therefore, the driver must allocate node objects prior to
12178 * communicating with either of these entities.
12180 * <h3>Sending and Receiving I/Os</h3>
12181 * The HW provides separate interfaces for sending BLS/ ELS/ FC-CT and FCP, but the
12182 * commands are conceptually similar. Since the commands complete asynchronously,
12183 * the caller must provide a HW I/O object that maintains the I/O state, as well as
12184 * provide a callback function. The driver may use the same callback function for all I/O
12185 * operations, but each operation must use a unique HW I/O object. In the SLI-4
12186 * architecture, there is a direct association between the HW I/O object and the SGL used
12187 * to describe the data. Therefore, a driver typically performs the following operations:
12189 * <ul><li>Allocates a HW I/O object (ocs_hw_io_alloc()).</li>
12190 * <li>Formats the SGL, specifying both the HW I/O object and the SGL.
12191 * (ocs_hw_io_init_sges() and ocs_hw_io_add_sge()).</li>
12192 * <li>Sends the HW I/O (ocs_hw_io_send()).</li></ul>
12194 * <h3>HW Tear Down</h3>
12195 * To tear-down the HW:
12197 * <ol><li>Take the port offline (ocs_hw_port_control()) to prevent receiving further
12198 * data andevents.</li>
12199 * <li>Destroy the HW object (ocs_hw_teardown()).</li>
12200 * <li>Free any memory used by the HW, such as buffers for unsolicited data.</li></ol>
12202 * </div><!-- overview -->
12207 * This contains all hw runtime workaround code. Based on the asic type,
12208 * asic revision, and range of fw revisions, a particular workaround may be enabled.
12210 * A workaround may consist of overriding a particular HW/SLI4 value that was initialized
12211 * during ocs_hw_setup() (for example the MAX_QUEUE overrides for mis-reported queue
12212 * sizes). Or if required, elements of the ocs_hw_workaround_t structure may be set to
12213 * control specific runtime behavior.
12215 * It is intended that the controls in ocs_hw_workaround_t be defined functionally. So we
12216 * would have the driver look like: "if (hw->workaround.enable_xxx) then ...", rather than
12217 * what we might previously see as "if this is a BE3, then do xxx"
12221 #define HW_FWREV_ZERO (0ull)
12222 #define HW_FWREV_MAX (~0ull)
12224 #define SLI4_ASIC_TYPE_ANY 0
12225 #define SLI4_ASIC_REV_ANY 0
12228 * @brief Internal definition of workarounds
12232 HW_WORKAROUND_TEST = 1,
12233 HW_WORKAROUND_MAX_QUEUE, /**< Limits all queues */
12234 HW_WORKAROUND_MAX_RQ, /**< Limits only the RQ */
12235 HW_WORKAROUND_RETAIN_TSEND_IO_LENGTH,
12236 HW_WORKAROUND_WQE_COUNT_METHOD,
12237 HW_WORKAROUND_RQE_COUNT_METHOD,
12238 HW_WORKAROUND_USE_UNREGISTERD_RPI,
12239 HW_WORKAROUND_DISABLE_AR_TGT_DIF, /**< Disable of auto-response target DIF */
12240 HW_WORKAROUND_DISABLE_SET_DUMP_LOC,
12241 HW_WORKAROUND_USE_DIF_QUARANTINE,
12242 HW_WORKAROUND_USE_DIF_SEC_XRI, /**< Use secondary xri for multiple data phases */
12243 HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB, /**< FCFI reported in SRB not correct, use "first" registered domain */
12244 HW_WORKAROUND_FW_VERSION_TOO_LOW, /**< The FW version is not the min version supported by this driver */
12245 HW_WORKAROUND_SGLC_MISREPORTED, /**< Chip supports SGL Chaining but SGLC is not set in SLI4_PARAMS */
12246 HW_WORKAROUND_IGNORE_SEND_FRAME_CAPABLE, /**< Don't use SEND_FRAME capable if FW version is too old */
12250 * @brief Internal workaround structure instance
12254 sli4_asic_type_e asic_type;
12255 sli4_asic_rev_e asic_rev;
12256 uint64_t fwrev_low;
12257 uint64_t fwrev_high;
12259 hw_workaround_e workaround;
12263 static hw_workaround_t hw_workarounds[] = {
12264 {SLI4_ASIC_TYPE_ANY, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12265 HW_WORKAROUND_TEST, 999},
12267 /* Bug: 127585: if_type == 2 returns 0 for total length placed on
12268 * FCP_TSEND64_WQE completions. Note, original driver code enables this
12269 * workaround for all asic types
12271 {SLI4_ASIC_TYPE_ANY, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12272 HW_WORKAROUND_RETAIN_TSEND_IO_LENGTH, 0},
12274 /* Bug: unknown, Lancer A0 has mis-reported max queue depth */
12275 {SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_A0, HW_FWREV_ZERO, HW_FWREV_MAX,
12276 HW_WORKAROUND_MAX_QUEUE, 2048},
12278 /* Bug: 143399, BE3 has mis-reported max RQ queue depth */
12279 {SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(4,6,293,0),
12280 HW_WORKAROUND_MAX_RQ, 2048},
12282 /* Bug: 143399, skyhawk has mis-reported max RQ queue depth */
12283 {SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(10,0,594,0),
12284 HW_WORKAROUND_MAX_RQ, 2048},
12286 /* Bug: 103487, BE3 before f/w 4.2.314.0 has mis-reported WQE count method */
12287 {SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(4,2,314,0),
12288 HW_WORKAROUND_WQE_COUNT_METHOD, 1},
12290 /* Bug: 103487, BE3 before f/w 4.2.314.0 has mis-reported RQE count method */
12291 {SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(4,2,314,0),
12292 HW_WORKAROUND_RQE_COUNT_METHOD, 1},
12294 /* Bug: 142968, BE3 UE with RPI == 0xffff */
12295 {SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12296 HW_WORKAROUND_USE_UNREGISTERD_RPI, 0},
12298 /* Bug: unknown, Skyhawk won't support auto-response on target T10-PI */
12299 {SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12300 HW_WORKAROUND_DISABLE_AR_TGT_DIF, 0},
12302 {SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(1,1,65,0),
12303 HW_WORKAROUND_DISABLE_SET_DUMP_LOC, 0},
12305 /* Bug: 160124, Skyhawk quarantine DIF XRIs */
12306 {SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12307 HW_WORKAROUND_USE_DIF_QUARANTINE, 0},
12309 /* Bug: 161832, Skyhawk use secondary XRI for multiple data phase TRECV */
12310 {SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12311 HW_WORKAROUND_USE_DIF_SEC_XRI, 0},
12313 /* Bug: xxxxxx, FCFI reported in SRB not corrrect */
12314 {SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12315 HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB, 0},
12317 /* Bug: 165642, FW version check for driver */
12318 {SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_1(OCS_MIN_FW_VER_LANCER),
12319 HW_WORKAROUND_FW_VERSION_TOO_LOW, 0},
12321 {SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_1(OCS_MIN_FW_VER_SKYHAWK),
12322 HW_WORKAROUND_FW_VERSION_TOO_LOW, 0},
12324 /* Bug 177061, Lancer FW does not set the SGLC bit */
12325 {SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12326 HW_WORKAROUND_SGLC_MISREPORTED, 0},
12328 /* BZ 181208/183914, enable this workaround for ALL revisions */
12329 {SLI4_ASIC_TYPE_ANY, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12330 HW_WORKAROUND_IGNORE_SEND_FRAME_CAPABLE, 0},
12334 * @brief Function prototypes
12337 static int32_t ocs_hw_workaround_match(ocs_hw_t *hw, hw_workaround_t *w);
12340 * @brief Parse the firmware version (name)
12342 * Parse a string of the form a.b.c.d, returning a uint64_t packed as defined
12343 * by the HW_FWREV() macro
12345 * @param fwrev_string pointer to the firmware string
12347 * @return packed firmware revision value
12351 parse_fw_version(const char *fwrev_string)
12357 for (p = fwrev_string, i = 0; *p && (i < 4); i ++) {
12358 v[i] = ocs_strtoul(p, 0, 0);
12359 while(*p && *p != '.') {
12367 /* Special case for bootleg releases with f/w rev 0.0.9999.0, set to max value */
12368 if (v[2] == 9999) {
12369 return HW_FWREV_MAX;
12371 return HW_FWREV(v[0], v[1], v[2], v[3]);
12376 * @brief Test for a workaround match
12378 * Looks at the asic type, asic revision, and fw revision, and returns TRUE if match.
12380 * @param hw Pointer to the HW structure
12381 * @param w Pointer to a workaround structure entry
12383 * @return Return TRUE for a match
12387 ocs_hw_workaround_match(ocs_hw_t *hw, hw_workaround_t *w)
12389 return (((w->asic_type == SLI4_ASIC_TYPE_ANY) || (w->asic_type == hw->sli.asic_type)) &&
12390 ((w->asic_rev == SLI4_ASIC_REV_ANY) || (w->asic_rev == hw->sli.asic_rev)) &&
12391 (w->fwrev_low <= hw->workaround.fwrev) &&
12392 ((w->fwrev_high == HW_FWREV_MAX) || (hw->workaround.fwrev < w->fwrev_high)));
12396 * @brief Setup HW runtime workarounds
12398 * The function is called at the end of ocs_hw_setup() to setup any runtime workarounds
12399 * based on the HW/SLI setup.
12401 * @param hw Pointer to HW structure
12407 ocs_hw_workaround_setup(struct ocs_hw_s *hw)
12409 hw_workaround_t *w;
12410 sli4_t *sli4 = &hw->sli;
12413 /* Initialize the workaround settings */
12414 ocs_memset(&hw->workaround, 0, sizeof(hw->workaround));
12416 /* If hw_war_version is non-null, then its a value that was set by a module parameter
12417 * (sorry for the break in abstraction, but workarounds are ... well, workarounds)
12420 if (hw->hw_war_version) {
12421 hw->workaround.fwrev = parse_fw_version(hw->hw_war_version);
12423 hw->workaround.fwrev = parse_fw_version((char*) sli4->config.fw_name[0]);
12426 /* Walk the workaround list, if a match is found, then handle it */
12427 for (i = 0, w = hw_workarounds; i < ARRAY_SIZE(hw_workarounds); i++, w++) {
12428 if (ocs_hw_workaround_match(hw, w)) {
12429 switch(w->workaround) {
12430 case HW_WORKAROUND_TEST: {
12431 ocs_log_debug(hw->os, "Override: test: %d\n", w->value);
12435 case HW_WORKAROUND_RETAIN_TSEND_IO_LENGTH: {
12436 ocs_log_debug(hw->os, "HW Workaround: retain TSEND IO length\n");
12437 hw->workaround.retain_tsend_io_length = 1;
12440 case HW_WORKAROUND_MAX_QUEUE: {
12443 ocs_log_debug(hw->os, "HW Workaround: override max_qentries: %d\n", w->value);
12444 for (q = SLI_QTYPE_EQ; q < SLI_QTYPE_MAX; q++) {
12445 if (hw->num_qentries[q] > w->value) {
12446 hw->num_qentries[q] = w->value;
12451 case HW_WORKAROUND_MAX_RQ: {
12452 ocs_log_debug(hw->os, "HW Workaround: override RQ max_qentries: %d\n", w->value);
12453 if (hw->num_qentries[SLI_QTYPE_RQ] > w->value) {
12454 hw->num_qentries[SLI_QTYPE_RQ] = w->value;
12458 case HW_WORKAROUND_WQE_COUNT_METHOD: {
12459 ocs_log_debug(hw->os, "HW Workaround: set WQE count method=%d\n", w->value);
12460 sli4->config.count_method[SLI_QTYPE_WQ] = w->value;
12461 sli_calc_max_qentries(sli4);
12464 case HW_WORKAROUND_RQE_COUNT_METHOD: {
12465 ocs_log_debug(hw->os, "HW Workaround: set RQE count method=%d\n", w->value);
12466 sli4->config.count_method[SLI_QTYPE_RQ] = w->value;
12467 sli_calc_max_qentries(sli4);
12470 case HW_WORKAROUND_USE_UNREGISTERD_RPI:
12471 ocs_log_debug(hw->os, "HW Workaround: use unreg'd RPI if rnode->indicator == 0xFFFF\n");
12472 hw->workaround.use_unregistered_rpi = TRUE;
12474 * Allocate an RPI that is never registered, to be used in the case where
12475 * a node has been unregistered, and its indicator (RPI) value is set to 0xFFFF
12477 if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_RPI, &hw->workaround.unregistered_rid,
12478 &hw->workaround.unregistered_index)) {
12479 ocs_log_err(hw->os, "sli_resource_alloc unregistered RPI failed\n");
12480 hw->workaround.use_unregistered_rpi = FALSE;
12483 case HW_WORKAROUND_DISABLE_AR_TGT_DIF:
12484 ocs_log_debug(hw->os, "HW Workaround: disable AR on T10-PI TSEND\n");
12485 hw->workaround.disable_ar_tgt_dif = TRUE;
12487 case HW_WORKAROUND_DISABLE_SET_DUMP_LOC:
12488 ocs_log_debug(hw->os, "HW Workaround: disable set_dump_loc\n");
12489 hw->workaround.disable_dump_loc = TRUE;
12491 case HW_WORKAROUND_USE_DIF_QUARANTINE:
12492 ocs_log_debug(hw->os, "HW Workaround: use DIF quarantine\n");
12493 hw->workaround.use_dif_quarantine = TRUE;
12495 case HW_WORKAROUND_USE_DIF_SEC_XRI:
12496 ocs_log_debug(hw->os, "HW Workaround: use DIF secondary xri\n");
12497 hw->workaround.use_dif_sec_xri = TRUE;
12499 case HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB:
12500 ocs_log_debug(hw->os, "HW Workaround: override FCFI in SRB\n");
12501 hw->workaround.override_fcfi = TRUE;
12504 case HW_WORKAROUND_FW_VERSION_TOO_LOW:
12505 ocs_log_debug(hw->os, "HW Workaround: fw version is below the minimum for this driver\n");
12506 hw->workaround.fw_version_too_low = TRUE;
12508 case HW_WORKAROUND_SGLC_MISREPORTED:
12509 ocs_log_debug(hw->os, "HW Workaround: SGLC misreported - chaining is enabled\n");
12510 hw->workaround.sglc_misreported = TRUE;
12512 case HW_WORKAROUND_IGNORE_SEND_FRAME_CAPABLE:
12513 ocs_log_debug(hw->os, "HW Workaround: not SEND_FRAME capable - disabled\n");
12514 hw->workaround.ignore_send_frame = TRUE;
12516 } /* switch(w->workaround) */