2 * Copyright (c) 2017 Broadcom. All rights reserved.
3 * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
15 * 3. Neither the name of the copyright holder nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
36 * Defines and implements the Hardware Abstraction Layer (HW).
37 * All interaction with the hardware is performed through the HW, which abstracts
38 * the details of the underlying SLI-4 implementation.
42 * @defgroup devInitShutdown Device Initialization and Shutdown
43 * @defgroup domain Domain Functions
44 * @defgroup port Port Functions
45 * @defgroup node Remote Node Functions
46 * @defgroup io IO Functions
47 * @defgroup interrupt Interrupt handling
48 * @defgroup os OS Required Functions
54 #include "ocs_hw_queues.h"
56 #define OCS_HW_MQ_DEPTH 128
57 #define OCS_HW_READ_FCF_SIZE 4096
58 #define OCS_HW_DEFAULT_AUTO_XFER_RDY_IOS 256
59 #define OCS_HW_WQ_TIMER_PERIOD_MS 500
61 /* values used for setting the auto xfer rdy parameters */
62 #define OCS_HW_AUTO_XFER_RDY_BLK_SIZE_DEFAULT 0 /* 512 bytes */
63 #define OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA_DEFAULT TRUE
64 #define OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID_DEFAULT FALSE
65 #define OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE_DEFAULT 0
66 #define OCS_HW_REQUE_XRI_REGTAG 65534
67 /* max command and response buffer lengths -- arbitrary at the moment */
68 #define OCS_HW_DMTF_CLP_CMD_MAX 256
69 #define OCS_HW_DMTF_CLP_RSP_MAX 256
72 ocs_hw_global_t hw_global;
74 static void ocs_hw_queue_hash_add(ocs_queue_hash_t *, uint16_t, uint16_t);
75 static void ocs_hw_adjust_wqs(ocs_hw_t *hw);
76 static uint32_t ocs_hw_get_num_chutes(ocs_hw_t *hw);
77 static int32_t ocs_hw_cb_link(void *, void *);
78 static int32_t ocs_hw_cb_fip(void *, void *);
79 static int32_t ocs_hw_command_process(ocs_hw_t *, int32_t, uint8_t *, size_t);
80 static int32_t ocs_hw_mq_process(ocs_hw_t *, int32_t, sli4_queue_t *);
81 static int32_t ocs_hw_cb_read_fcf(ocs_hw_t *, int32_t, uint8_t *, void *);
82 static int32_t ocs_hw_cb_node_attach(ocs_hw_t *, int32_t, uint8_t *, void *);
83 static int32_t ocs_hw_cb_node_free(ocs_hw_t *, int32_t, uint8_t *, void *);
84 static int32_t ocs_hw_cb_node_free_all(ocs_hw_t *, int32_t, uint8_t *, void *);
85 static ocs_hw_rtn_e ocs_hw_setup_io(ocs_hw_t *);
86 static ocs_hw_rtn_e ocs_hw_init_io(ocs_hw_t *);
87 static int32_t ocs_hw_flush(ocs_hw_t *);
88 static int32_t ocs_hw_command_cancel(ocs_hw_t *);
89 static int32_t ocs_hw_io_cancel(ocs_hw_t *);
90 static void ocs_hw_io_quarantine(ocs_hw_t *hw, hw_wq_t *wq, ocs_hw_io_t *io);
91 static void ocs_hw_io_restore_sgl(ocs_hw_t *, ocs_hw_io_t *);
92 static int32_t ocs_hw_io_ini_sge(ocs_hw_t *, ocs_hw_io_t *, ocs_dma_t *, uint32_t, ocs_dma_t *);
93 static ocs_hw_rtn_e ocs_hw_firmware_write_lancer(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, int last, ocs_hw_fw_cb_t cb, void *arg);
94 static int32_t ocs_hw_cb_fw_write(ocs_hw_t *, int32_t, uint8_t *, void *);
95 static int32_t ocs_hw_cb_sfp(ocs_hw_t *, int32_t, uint8_t *, void *);
96 static int32_t ocs_hw_cb_temp(ocs_hw_t *, int32_t, uint8_t *, void *);
97 static int32_t ocs_hw_cb_link_stat(ocs_hw_t *, int32_t, uint8_t *, void *);
98 static int32_t ocs_hw_cb_host_stat(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg);
99 static void ocs_hw_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg);
100 static int32_t ocs_hw_clp_resp_get_value(ocs_hw_t *hw, const char *keyword, char *value, uint32_t value_len, const char *resp, uint32_t resp_len);
101 typedef void (*ocs_hw_dmtf_clp_cb_t)(ocs_hw_t *hw, int32_t status, uint32_t result_len, void *arg);
102 static ocs_hw_rtn_e ocs_hw_exec_dmtf_clp_cmd(ocs_hw_t *hw, ocs_dma_t *dma_cmd, ocs_dma_t *dma_resp, uint32_t opts, ocs_hw_dmtf_clp_cb_t cb, void *arg);
103 static void ocs_hw_linkcfg_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint32_t result_len, void *arg);
105 static int32_t __ocs_read_topology_cb(ocs_hw_t *, int32_t, uint8_t *, void *);
106 static ocs_hw_rtn_e ocs_hw_get_linkcfg(ocs_hw_t *, uint32_t, ocs_hw_port_control_cb_t, void *);
107 static ocs_hw_rtn_e ocs_hw_get_linkcfg_lancer(ocs_hw_t *, uint32_t, ocs_hw_port_control_cb_t, void *);
108 static ocs_hw_rtn_e ocs_hw_get_linkcfg_skyhawk(ocs_hw_t *, uint32_t, ocs_hw_port_control_cb_t, void *);
109 static ocs_hw_rtn_e ocs_hw_set_linkcfg(ocs_hw_t *, ocs_hw_linkcfg_e, uint32_t, ocs_hw_port_control_cb_t, void *);
110 static ocs_hw_rtn_e ocs_hw_set_linkcfg_lancer(ocs_hw_t *, ocs_hw_linkcfg_e, uint32_t, ocs_hw_port_control_cb_t, void *);
111 static ocs_hw_rtn_e ocs_hw_set_linkcfg_skyhawk(ocs_hw_t *, ocs_hw_linkcfg_e, uint32_t, ocs_hw_port_control_cb_t, void *);
112 static void ocs_hw_init_linkcfg_cb(int32_t status, uintptr_t value, void *arg);
113 static ocs_hw_rtn_e ocs_hw_set_eth_license(ocs_hw_t *hw, uint32_t license);
114 static ocs_hw_rtn_e ocs_hw_set_dif_seed(ocs_hw_t *hw);
115 static ocs_hw_rtn_e ocs_hw_set_dif_mode(ocs_hw_t *hw);
116 static void ocs_hw_io_free_internal(void *arg);
117 static void ocs_hw_io_free_port_owned(void *arg);
118 static ocs_hw_rtn_e ocs_hw_config_auto_xfer_rdy_t10pi(ocs_hw_t *hw, uint8_t *buf);
119 static ocs_hw_rtn_e ocs_hw_config_set_fdt_xfer_hint(ocs_hw_t *hw, uint32_t fdt_xfer_hint);
120 static void ocs_hw_wq_process_abort(void *arg, uint8_t *cqe, int32_t status);
121 static int32_t ocs_hw_config_mrq(ocs_hw_t *hw, uint8_t, uint16_t, uint16_t);
122 static ocs_hw_rtn_e ocs_hw_config_watchdog_timer(ocs_hw_t *hw);
123 static ocs_hw_rtn_e ocs_hw_config_sli_port_health_check(ocs_hw_t *hw, uint8_t query, uint8_t enable);
125 /* HW domain database operations */
126 static int32_t ocs_hw_domain_add(ocs_hw_t *, ocs_domain_t *);
127 static int32_t ocs_hw_domain_del(ocs_hw_t *, ocs_domain_t *);
130 /* Port state machine */
131 static void *__ocs_hw_port_alloc_init(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
132 static void *__ocs_hw_port_alloc_read_sparm64(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
133 static void *__ocs_hw_port_alloc_init_vpi(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
134 static void *__ocs_hw_port_done(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
135 static void *__ocs_hw_port_free_unreg_vpi(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
137 /* Domain state machine */
138 static void *__ocs_hw_domain_init(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
139 static void *__ocs_hw_domain_alloc_reg_fcfi(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
140 static void * __ocs_hw_domain_alloc_init_vfi(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
141 static void *__ocs_hw_domain_free_unreg_vfi(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
142 static void *__ocs_hw_domain_free_unreg_fcfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data);
143 static int32_t __ocs_hw_domain_cb(ocs_hw_t *, int32_t, uint8_t *, void *);
144 static int32_t __ocs_hw_port_cb(ocs_hw_t *, int32_t, uint8_t *, void *);
145 static int32_t __ocs_hw_port_realloc_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg);
148 static void ocs_hw_check_sec_hio_list(ocs_hw_t *hw);
151 static void target_wqe_timer_cb(void *arg);
152 static void shutdown_target_wqe_timer(ocs_hw_t *hw);
155 ocs_hw_add_io_timed_wqe(ocs_hw_t *hw, ocs_hw_io_t *io)
157 if (hw->config.emulate_tgt_wqe_timeout && io->tgt_wqe_timeout) {
159 * Active WQE list currently only used for
160 * target WQE timeouts.
162 ocs_lock(&hw->io_lock);
163 ocs_list_add_tail(&hw->io_timed_wqe, io);
164 io->submit_ticks = ocs_get_os_ticks();
165 ocs_unlock(&hw->io_lock);
170 ocs_hw_remove_io_timed_wqe(ocs_hw_t *hw, ocs_hw_io_t *io)
172 if (hw->config.emulate_tgt_wqe_timeout) {
174 * If target wqe timeouts are enabled,
175 * remove from active wqe list.
177 ocs_lock(&hw->io_lock);
178 if (ocs_list_on_list(&io->wqe_link)) {
179 ocs_list_remove(&hw->io_timed_wqe, io);
181 ocs_unlock(&hw->io_lock);
185 static uint8_t ocs_hw_iotype_is_originator(uint16_t io_type)
188 case OCS_HW_IO_INITIATOR_READ:
189 case OCS_HW_IO_INITIATOR_WRITE:
190 case OCS_HW_IO_INITIATOR_NODATA:
199 static uint8_t ocs_hw_wcqe_abort_needed(uint16_t status, uint8_t ext, uint8_t xb)
201 /* if exchange not active, nothing to abort */
205 if (status == SLI4_FC_WCQE_STATUS_LOCAL_REJECT) {
207 /* exceptions where abort is not needed */
208 case SLI4_FC_LOCAL_REJECT_INVALID_RPI: /* lancer returns this after unreg_rpi */
209 case SLI4_FC_LOCAL_REJECT_ABORT_REQUESTED: /* abort already in progress */
219 * @brief Determine the number of chutes on the device.
222 * Some devices require queue resources allocated per protocol processor
223 * (chute). This function returns the number of chutes on this device.
225 * @param hw Hardware context allocated by the caller.
227 * @return Returns the number of chutes on the device for protocol.
230 ocs_hw_get_num_chutes(ocs_hw_t *hw)
232 uint32_t num_chutes = 1;
234 if (sli_get_is_dual_ulp_capable(&hw->sli) &&
235 sli_get_is_ulp_enabled(&hw->sli, 0) &&
236 sli_get_is_ulp_enabled(&hw->sli, 1)) {
243 ocs_hw_link_event_init(ocs_hw_t *hw)
246 ocs_log_err(hw->os, "bad parameter hw=%p\n", hw);
247 return OCS_HW_RTN_ERROR;
250 hw->link.status = SLI_LINK_STATUS_MAX;
251 hw->link.topology = SLI_LINK_TOPO_NONE;
252 hw->link.medium = SLI_LINK_MEDIUM_MAX;
254 hw->link.loop_map = NULL;
255 hw->link.fc_id = UINT32_MAX;
257 return OCS_HW_RTN_SUCCESS;
261 * @ingroup devInitShutdown
262 * @brief If this is physical port 0, then read the max dump size.
265 * Queries the FW for the maximum dump size
267 * @param hw Hardware context allocated by the caller.
269 * @return Returns 0 on success, or a non-zero value on failure.
272 ocs_hw_read_max_dump_size(ocs_hw_t *hw)
274 uint8_t buf[SLI4_BMBX_SIZE];
275 uint8_t bus, dev, func;
279 if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
280 ocs_log_debug(hw->os, "Function only supported for I/F type 2\n");
281 return OCS_HW_RTN_ERROR;
285 * Make sure the FW is new enough to support this command. If the FW
286 * is too old, the FW will UE.
288 if (hw->workaround.disable_dump_loc) {
289 ocs_log_test(hw->os, "FW version is too old for this feature\n");
290 return OCS_HW_RTN_ERROR;
293 /* attempt to detemine the dump size for function 0 only. */
294 ocs_get_bus_dev_func(hw->os, &bus, &dev, &func);
296 if (sli_cmd_common_set_dump_location(&hw->sli, buf,
297 SLI4_BMBX_SIZE, 1, 0, NULL, 0)) {
298 sli4_res_common_set_dump_location_t *rsp =
299 (sli4_res_common_set_dump_location_t *)
300 (buf + offsetof(sli4_cmd_sli_config_t,
303 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
304 if (rc != OCS_HW_RTN_SUCCESS) {
305 ocs_log_test(hw->os, "set dump location command failed\n");
308 hw->dump_size = rsp->buffer_length;
309 ocs_log_debug(hw->os, "Dump size %x\n", rsp->buffer_length);
313 return OCS_HW_RTN_SUCCESS;
317 * @ingroup devInitShutdown
318 * @brief Set up the Hardware Abstraction Layer module.
321 * Calls set up to configure the hardware.
323 * @param hw Hardware context allocated by the caller.
324 * @param os Device abstraction.
325 * @param port_type Protocol type of port, such as FC and NIC.
327 * @todo Why is port_type a parameter?
329 * @return Returns 0 on success, or a non-zero value on failure.
332 ocs_hw_setup(ocs_hw_t *hw, ocs_os_handle_t os, sli4_port_type_e port_type)
338 ocs_log_err(os, "bad parameter(s) hw=%p\n", hw);
339 return OCS_HW_RTN_ERROR;
342 if (hw->hw_setup_called) {
343 /* Setup run-time workarounds.
344 * Call for each setup, to allow for hw_war_version
346 ocs_hw_workaround_setup(hw);
347 return OCS_HW_RTN_SUCCESS;
351 * ocs_hw_init() relies on NULL pointers indicating that a structure
352 * needs allocation. If a structure is non-NULL, ocs_hw_init() won't
353 * free/realloc that memory
355 ocs_memset(hw, 0, sizeof(ocs_hw_t));
357 hw->hw_setup_called = TRUE;
361 ocs_lock_init(hw->os, &hw->cmd_lock, "HW_cmd_lock[%d]", ocs_instance(hw->os));
362 ocs_list_init(&hw->cmd_head, ocs_command_ctx_t, link);
363 ocs_list_init(&hw->cmd_pending, ocs_command_ctx_t, link);
364 hw->cmd_head_count = 0;
366 ocs_lock_init(hw->os, &hw->io_lock, "HW_io_lock[%d]", ocs_instance(hw->os));
367 ocs_lock_init(hw->os, &hw->io_abort_lock, "HW_io_abort_lock[%d]", ocs_instance(hw->os));
369 ocs_atomic_init(&hw->io_alloc_failed_count, 0);
371 hw->config.speed = FC_LINK_SPEED_AUTO_16_8_4;
372 hw->config.dif_seed = 0;
373 hw->config.auto_xfer_rdy_blk_size_chip = OCS_HW_AUTO_XFER_RDY_BLK_SIZE_DEFAULT;
374 hw->config.auto_xfer_rdy_ref_tag_is_lba = OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA_DEFAULT;
375 hw->config.auto_xfer_rdy_app_tag_valid = OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID_DEFAULT;
376 hw->config.auto_xfer_rdy_app_tag_value = OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE_DEFAULT;
379 if (sli_setup(&hw->sli, hw->os, port_type)) {
380 ocs_log_err(hw->os, "SLI setup failed\n");
381 return OCS_HW_RTN_ERROR;
384 ocs_memset(hw->domains, 0, sizeof(hw->domains));
386 ocs_memset(hw->fcf_index_fcfi, 0, sizeof(hw->fcf_index_fcfi));
388 ocs_hw_link_event_init(hw);
390 sli_callback(&hw->sli, SLI4_CB_LINK, ocs_hw_cb_link, hw);
391 sli_callback(&hw->sli, SLI4_CB_FIP, ocs_hw_cb_fip, hw);
394 * Set all the queue sizes to the maximum allowed. These values may
395 * be changes later by the adjust and workaround functions.
397 for (i = 0; i < ARRAY_SIZE(hw->num_qentries); i++) {
398 hw->num_qentries[i] = sli_get_max_qentries(&hw->sli, i);
402 * The RQ assignment for RQ pair mode.
404 hw->config.rq_default_buffer_size = OCS_HW_RQ_SIZE_PAYLOAD;
405 hw->config.n_io = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI);
406 if (ocs_get_property("auto_xfer_rdy_xri_cnt", prop_buf, sizeof(prop_buf)) == 0) {
407 hw->config.auto_xfer_rdy_xri_cnt = ocs_strtoul(prop_buf, 0, 0);
410 /* by default, enable initiator-only auto-ABTS emulation */
411 hw->config.i_only_aab = TRUE;
413 /* Setup run-time workarounds */
414 ocs_hw_workaround_setup(hw);
416 /* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */
417 if (hw->workaround.override_fcfi) {
418 hw->first_domain_idx = -1;
421 /* Must be done after the workaround setup */
422 if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) {
423 (void)ocs_hw_read_max_dump_size(hw);
426 /* calculate the number of WQs required. */
427 ocs_hw_adjust_wqs(hw);
429 /* Set the default dif mode */
430 if (! sli_is_dif_inline_capable(&hw->sli)) {
431 ocs_log_test(hw->os, "not inline capable, setting mode to separate\n");
432 hw->config.dif_mode = OCS_HW_DIF_MODE_SEPARATE;
434 /* Workaround: BZ 161832 */
435 if (hw->workaround.use_dif_sec_xri) {
436 ocs_list_init(&hw->sec_hio_wait_list, ocs_hw_io_t, link);
440 * Figure out the starting and max ULP to spread the WQs across the
443 if (sli_get_is_dual_ulp_capable(&hw->sli)) {
444 if (sli_get_is_ulp_enabled(&hw->sli, 0) &&
445 sli_get_is_ulp_enabled(&hw->sli, 1)) {
448 } else if (sli_get_is_ulp_enabled(&hw->sli, 0)) {
456 if (sli_get_is_ulp_enabled(&hw->sli, 0)) {
464 ocs_log_debug(hw->os, "ulp_start %d, ulp_max %d\n",
465 hw->ulp_start, hw->ulp_max);
466 hw->config.queue_topology = hw_global.queue_topology_string;
468 hw->qtop = ocs_hw_qtop_parse(hw, hw->config.queue_topology);
470 hw->config.n_eq = hw->qtop->entry_counts[QTOP_EQ];
471 hw->config.n_cq = hw->qtop->entry_counts[QTOP_CQ];
472 hw->config.n_rq = hw->qtop->entry_counts[QTOP_RQ];
473 hw->config.n_wq = hw->qtop->entry_counts[QTOP_WQ];
474 hw->config.n_mq = hw->qtop->entry_counts[QTOP_MQ];
476 /* Verify qtop configuration against driver supported configuration */
477 if (hw->config.n_rq > OCE_HW_MAX_NUM_MRQ_PAIRS) {
478 ocs_log_crit(hw->os, "Max supported MRQ pairs = %d\n",
479 OCE_HW_MAX_NUM_MRQ_PAIRS);
480 return OCS_HW_RTN_ERROR;
483 if (hw->config.n_eq > OCS_HW_MAX_NUM_EQ) {
484 ocs_log_crit(hw->os, "Max supported EQs = %d\n",
486 return OCS_HW_RTN_ERROR;
489 if (hw->config.n_cq > OCS_HW_MAX_NUM_CQ) {
490 ocs_log_crit(hw->os, "Max supported CQs = %d\n",
492 return OCS_HW_RTN_ERROR;
495 if (hw->config.n_wq > OCS_HW_MAX_NUM_WQ) {
496 ocs_log_crit(hw->os, "Max supported WQs = %d\n",
498 return OCS_HW_RTN_ERROR;
501 if (hw->config.n_mq > OCS_HW_MAX_NUM_MQ) {
502 ocs_log_crit(hw->os, "Max supported MQs = %d\n",
504 return OCS_HW_RTN_ERROR;
507 return OCS_HW_RTN_SUCCESS;
511 * @ingroup devInitShutdown
512 * @brief Allocate memory structures to prepare for the device operation.
515 * Allocates memory structures needed by the device and prepares the device
517 * @n @n @b Note: This function may be called more than once (for example, at
518 * initialization and then after a reset), but the size of the internal resources
519 * may not be changed without tearing down the HW (ocs_hw_teardown()).
521 * @param hw Hardware context allocated by the caller.
523 * @return Returns 0 on success, or a non-zero value on failure.
526 ocs_hw_init(ocs_hw_t *hw)
530 uint8_t buf[SLI4_BMBX_SIZE];
533 int written_size = 0;
536 uint32_t ramdisc_blocksize = 512;
537 uint32_t q_count = 0;
539 * Make sure the command lists are empty. If this is start-of-day,
540 * they'll be empty since they were just initialized in ocs_hw_setup.
541 * If we've just gone through a reset, the command and command pending
542 * lists should have been cleaned up as part of the reset (ocs_hw_reset()).
544 ocs_lock(&hw->cmd_lock);
545 if (!ocs_list_empty(&hw->cmd_head)) {
546 ocs_log_test(hw->os, "command found on cmd list\n");
547 ocs_unlock(&hw->cmd_lock);
548 return OCS_HW_RTN_ERROR;
550 if (!ocs_list_empty(&hw->cmd_pending)) {
551 ocs_log_test(hw->os, "command found on pending list\n");
552 ocs_unlock(&hw->cmd_lock);
553 return OCS_HW_RTN_ERROR;
555 ocs_unlock(&hw->cmd_lock);
557 /* Free RQ buffers if prevously allocated */
561 * The IO queues must be initialized here for the reset case. The
562 * ocs_hw_init_io() function will re-add the IOs to the free list.
563 * The cmd_head list should be OK since we free all entries in
564 * ocs_hw_command_cancel() that is called in the ocs_hw_reset().
567 /* If we are in this function due to a reset, there may be stale items
568 * on lists that need to be removed. Clean them up.
571 if (ocs_list_valid(&hw->io_wait_free)) {
572 while ((!ocs_list_empty(&hw->io_wait_free))) {
574 ocs_list_remove_head(&hw->io_wait_free);
577 ocs_log_debug(hw->os, "removed %d items from io_wait_free list\n", rem_count);
581 if (ocs_list_valid(&hw->io_inuse)) {
582 while ((!ocs_list_empty(&hw->io_inuse))) {
584 ocs_list_remove_head(&hw->io_inuse);
587 ocs_log_debug(hw->os, "removed %d items from io_inuse list\n", rem_count);
591 if (ocs_list_valid(&hw->io_free)) {
592 while ((!ocs_list_empty(&hw->io_free))) {
594 ocs_list_remove_head(&hw->io_free);
597 ocs_log_debug(hw->os, "removed %d items from io_free list\n", rem_count);
600 if (ocs_list_valid(&hw->io_port_owned)) {
601 while ((!ocs_list_empty(&hw->io_port_owned))) {
602 ocs_list_remove_head(&hw->io_port_owned);
605 ocs_list_init(&hw->io_inuse, ocs_hw_io_t, link);
606 ocs_list_init(&hw->io_free, ocs_hw_io_t, link);
607 ocs_list_init(&hw->io_port_owned, ocs_hw_io_t, link);
608 ocs_list_init(&hw->io_wait_free, ocs_hw_io_t, link);
609 ocs_list_init(&hw->io_timed_wqe, ocs_hw_io_t, wqe_link);
610 ocs_list_init(&hw->io_port_dnrx, ocs_hw_io_t, dnrx_link);
612 /* If MRQ not required, Make sure we dont request feature. */
613 if (hw->config.n_rq == 1) {
614 hw->sli.config.features.flag.mrqp = FALSE;
617 if (sli_init(&hw->sli)) {
618 ocs_log_err(hw->os, "SLI failed to initialize\n");
619 return OCS_HW_RTN_ERROR;
623 * Enable the auto xfer rdy feature if requested.
625 hw->auto_xfer_rdy_enabled = FALSE;
626 if (sli_get_auto_xfer_rdy_capable(&hw->sli) &&
627 hw->config.auto_xfer_rdy_size > 0) {
628 if (hw->config.esoc){
629 if (ocs_get_property("ramdisc_blocksize", prop_buf, sizeof(prop_buf)) == 0) {
630 ramdisc_blocksize = ocs_strtoul(prop_buf, 0, 0);
632 written_size = sli_cmd_config_auto_xfer_rdy_hp(&hw->sli, buf, SLI4_BMBX_SIZE, hw->config.auto_xfer_rdy_size, 1, ramdisc_blocksize);
634 written_size = sli_cmd_config_auto_xfer_rdy(&hw->sli, buf, SLI4_BMBX_SIZE, hw->config.auto_xfer_rdy_size);
637 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
638 if (rc != OCS_HW_RTN_SUCCESS) {
639 ocs_log_err(hw->os, "config auto xfer rdy failed\n");
643 hw->auto_xfer_rdy_enabled = TRUE;
645 if (hw->config.auto_xfer_rdy_t10_enable) {
646 rc = ocs_hw_config_auto_xfer_rdy_t10pi(hw, buf);
647 if (rc != OCS_HW_RTN_SUCCESS) {
648 ocs_log_err(hw->os, "set parameters auto xfer rdy T10 PI failed\n");
654 if(hw->sliport_healthcheck) {
655 rc = ocs_hw_config_sli_port_health_check(hw, 0, 1);
656 if (rc != OCS_HW_RTN_SUCCESS) {
657 ocs_log_err(hw->os, "Enabling Sliport Health check failed \n");
663 * Set FDT transfer hint, only works on Lancer
665 if ((hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) && (OCS_HW_FDT_XFER_HINT != 0)) {
667 * Non-fatal error. In particular, we can disregard failure to set OCS_HW_FDT_XFER_HINT on
668 * devices with legacy firmware that do not support OCS_HW_FDT_XFER_HINT feature.
670 ocs_hw_config_set_fdt_xfer_hint(hw, OCS_HW_FDT_XFER_HINT);
674 * Verify that we have not exceeded any queue sizes
676 q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_EQ),
678 if (hw->config.n_eq > q_count) {
679 ocs_log_err(hw->os, "requested %d EQ but %d allowed\n",
680 hw->config.n_eq, q_count);
681 return OCS_HW_RTN_ERROR;
684 q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_CQ),
686 if (hw->config.n_cq > q_count) {
687 ocs_log_err(hw->os, "requested %d CQ but %d allowed\n",
688 hw->config.n_cq, q_count);
689 return OCS_HW_RTN_ERROR;
692 q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_MQ),
694 if (hw->config.n_mq > q_count) {
695 ocs_log_err(hw->os, "requested %d MQ but %d allowed\n",
696 hw->config.n_mq, q_count);
697 return OCS_HW_RTN_ERROR;
700 q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_RQ),
702 if (hw->config.n_rq > q_count) {
703 ocs_log_err(hw->os, "requested %d RQ but %d allowed\n",
704 hw->config.n_rq, q_count);
705 return OCS_HW_RTN_ERROR;
708 q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_WQ),
710 if (hw->config.n_wq > q_count) {
711 ocs_log_err(hw->os, "requested %d WQ but %d allowed\n",
712 hw->config.n_wq, q_count);
713 return OCS_HW_RTN_ERROR;
716 /* zero the hashes */
717 ocs_memset(hw->cq_hash, 0, sizeof(hw->cq_hash));
718 ocs_log_debug(hw->os, "Max CQs %d, hash size = %d\n",
719 OCS_HW_MAX_NUM_CQ, OCS_HW_Q_HASH_SIZE);
721 ocs_memset(hw->rq_hash, 0, sizeof(hw->rq_hash));
722 ocs_log_debug(hw->os, "Max RQs %d, hash size = %d\n",
723 OCS_HW_MAX_NUM_RQ, OCS_HW_Q_HASH_SIZE);
725 ocs_memset(hw->wq_hash, 0, sizeof(hw->wq_hash));
726 ocs_log_debug(hw->os, "Max WQs %d, hash size = %d\n",
727 OCS_HW_MAX_NUM_WQ, OCS_HW_Q_HASH_SIZE);
730 rc = ocs_hw_init_queues(hw, hw->qtop);
731 if (rc != OCS_HW_RTN_SUCCESS) {
735 max_rpi = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI);
736 i = sli_fc_get_rpi_requirements(&hw->sli, max_rpi);
738 ocs_dma_t payload_memory;
740 rc = OCS_HW_RTN_ERROR;
742 if (hw->rnode_mem.size) {
743 ocs_dma_free(hw->os, &hw->rnode_mem);
746 if (ocs_dma_alloc(hw->os, &hw->rnode_mem, i, 4096)) {
747 ocs_log_err(hw->os, "remote node memory allocation fail\n");
748 return OCS_HW_RTN_NO_MEMORY;
751 payload_memory.size = 0;
752 if (sli_cmd_fcoe_post_hdr_templates(&hw->sli, buf, SLI4_BMBX_SIZE,
753 &hw->rnode_mem, UINT16_MAX, &payload_memory)) {
754 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
756 if (payload_memory.size != 0) {
757 /* The command was non-embedded - need to free the dma buffer */
758 ocs_dma_free(hw->os, &payload_memory);
762 if (rc != OCS_HW_RTN_SUCCESS) {
763 ocs_log_err(hw->os, "header template registration failed\n");
768 /* Allocate and post RQ buffers */
769 rc = ocs_hw_rx_allocate(hw);
771 ocs_log_err(hw->os, "rx_allocate failed\n");
775 /* Populate hw->seq_free_list */
776 if (hw->seq_pool == NULL) {
780 /* Sum up the total number of RQ entries, to use to allocate the sequence object pool */
781 for (i = 0; i < hw->hw_rq_count; i++) {
782 count += hw->hw_rq[i]->entry_count;
785 hw->seq_pool = ocs_array_alloc(hw->os, sizeof(ocs_hw_sequence_t), count);
786 if (hw->seq_pool == NULL) {
787 ocs_log_err(hw->os, "malloc seq_pool failed\n");
788 return OCS_HW_RTN_NO_MEMORY;
792 if(ocs_hw_rx_post(hw)) {
793 ocs_log_err(hw->os, "WARNING - error posting RQ buffers\n");
796 /* Allocate rpi_ref if not previously allocated */
797 if (hw->rpi_ref == NULL) {
798 hw->rpi_ref = ocs_malloc(hw->os, max_rpi * sizeof(*hw->rpi_ref),
799 OCS_M_ZERO | OCS_M_NOWAIT);
800 if (hw->rpi_ref == NULL) {
801 ocs_log_err(hw->os, "rpi_ref allocation failure (%d)\n", i);
802 return OCS_HW_RTN_NO_MEMORY;
806 for (i = 0; i < max_rpi; i ++) {
807 ocs_atomic_init(&hw->rpi_ref[i].rpi_count, 0);
808 ocs_atomic_init(&hw->rpi_ref[i].rpi_attached, 0);
811 ocs_memset(hw->domains, 0, sizeof(hw->domains));
813 /* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */
814 if (hw->workaround.override_fcfi) {
815 hw->first_domain_idx = -1;
818 ocs_memset(hw->fcf_index_fcfi, 0, sizeof(hw->fcf_index_fcfi));
820 /* Register a FCFI to allow unsolicited frames to be routed to the driver */
821 if (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC) {
823 if (hw->hw_mrq_count) {
824 ocs_log_debug(hw->os, "using REG_FCFI MRQ\n");
826 rc = ocs_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_FCFI_MODE, 0, 0);
827 if (rc != OCS_HW_RTN_SUCCESS) {
828 ocs_log_err(hw->os, "REG_FCFI_MRQ FCFI registration failed\n");
832 rc = ocs_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_MRQ_MODE, 0, 0);
833 if (rc != OCS_HW_RTN_SUCCESS) {
834 ocs_log_err(hw->os, "REG_FCFI_MRQ MRQ registration failed\n");
838 sli4_cmd_rq_cfg_t rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG];
840 ocs_log_debug(hw->os, "using REG_FCFI standard\n");
842 /* Set the filter match/mask values from hw's filter_def values */
843 for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
844 rq_cfg[i].rq_id = 0xffff;
845 rq_cfg[i].r_ctl_mask = (uint8_t) hw->config.filter_def[i];
846 rq_cfg[i].r_ctl_match = (uint8_t) (hw->config.filter_def[i] >> 8);
847 rq_cfg[i].type_mask = (uint8_t) (hw->config.filter_def[i] >> 16);
848 rq_cfg[i].type_match = (uint8_t) (hw->config.filter_def[i] >> 24);
852 * Update the rq_id's of the FCF configuration (don't update more than the number
853 * of rq_cfg elements)
855 for (i = 0; i < OCS_MIN(hw->hw_rq_count, SLI4_CMD_REG_FCFI_NUM_RQ_CFG); i++) {
856 hw_rq_t *rq = hw->hw_rq[i];
858 for (j = 0; j < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; j++) {
859 uint32_t mask = (rq->filter_mask != 0) ? rq->filter_mask : 1;
860 if (mask & (1U << j)) {
861 rq_cfg[j].rq_id = rq->hdr->id;
862 ocs_log_debug(hw->os, "REG_FCFI: filter[%d] %08X -> RQ[%d] id=%d\n",
863 j, hw->config.filter_def[j], i, rq->hdr->id);
868 rc = OCS_HW_RTN_ERROR;
870 if (sli_cmd_reg_fcfi(&hw->sli, buf, SLI4_BMBX_SIZE, 0, rq_cfg, 0)) {
871 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
874 if (rc != OCS_HW_RTN_SUCCESS) {
875 ocs_log_err(hw->os, "FCFI registration failed\n");
878 hw->fcf_indicator = ((sli4_cmd_reg_fcfi_t *)buf)->fcfi;
884 * Allocate the WQ request tag pool, if not previously allocated (the request tag value is 16 bits,
885 * thus the pool allocation size of 64k)
887 rc = ocs_hw_reqtag_init(hw);
889 ocs_log_err(hw->os, "ocs_pool_alloc hw_wq_callback_t failed: %d\n", rc);
893 rc = ocs_hw_setup_io(hw);
895 ocs_log_err(hw->os, "IO allocation failure\n");
899 rc = ocs_hw_init_io(hw);
901 ocs_log_err(hw->os, "IO initialization failure\n");
905 ocs_queue_history_init(hw->os, &hw->q_hist);
907 /* get hw link config; polling, so callback will be called immediately */
908 hw->linkcfg = OCS_HW_LINKCFG_NA;
909 ocs_hw_get_linkcfg(hw, OCS_CMD_POLL, ocs_hw_init_linkcfg_cb, hw);
911 /* if lancer ethernet, ethernet ports need to be enabled */
912 if ((hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) &&
913 (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_ETHERNET)) {
914 if (ocs_hw_set_eth_license(hw, hw->eth_license)) {
915 /* log warning but continue */
916 ocs_log_err(hw->os, "Failed to set ethernet license\n");
920 /* Set the DIF seed - only for lancer right now */
921 if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli) &&
922 ocs_hw_set_dif_seed(hw) != OCS_HW_RTN_SUCCESS) {
923 ocs_log_err(hw->os, "Failed to set DIF seed value\n");
927 /* Set the DIF mode - skyhawk only */
928 if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli) &&
929 sli_get_dif_capable(&hw->sli)) {
930 rc = ocs_hw_set_dif_mode(hw);
931 if (rc != OCS_HW_RTN_SUCCESS) {
932 ocs_log_err(hw->os, "Failed to set DIF mode value\n");
938 * Arming the EQ allows (e.g.) interrupts when CQ completions write EQ entries
940 for (i = 0; i < hw->eq_count; i++) {
941 sli_queue_arm(&hw->sli, &hw->eq[i], TRUE);
947 for (i = 0; i < hw->rq_count; i++) {
948 ocs_hw_queue_hash_add(hw->rq_hash, hw->rq[i].id, i);
954 for (i = 0; i < hw->wq_count; i++) {
955 ocs_hw_queue_hash_add(hw->wq_hash, hw->wq[i].id, i);
959 * Arming the CQ allows (e.g.) MQ completions to write CQ entries
961 for (i = 0; i < hw->cq_count; i++) {
962 ocs_hw_queue_hash_add(hw->cq_hash, hw->cq[i].id, i);
963 sli_queue_arm(&hw->sli, &hw->cq[i], TRUE);
966 /* record the fact that the queues are functional */
967 hw->state = OCS_HW_STATE_ACTIVE;
969 /* Note: Must be after the IOs are setup and the state is active*/
970 if (ocs_hw_rqpair_init(hw)) {
971 ocs_log_err(hw->os, "WARNING - error initializing RQ pair\n");
974 /* finally kick off periodic timer to check for timed out target WQEs */
975 if (hw->config.emulate_tgt_wqe_timeout) {
976 ocs_setup_timer(hw->os, &hw->wqe_timer, target_wqe_timer_cb, hw,
977 OCS_HW_WQ_TIMER_PERIOD_MS);
981 * Allocate a HW IOs for send frame. Allocate one for each Class 1 WQ, or if there
982 * are none of those, allocate one for WQ[0]
984 if ((count = ocs_varray_get_count(hw->wq_class_array[1])) > 0) {
985 for (i = 0; i < count; i++) {
986 hw_wq_t *wq = ocs_varray_iter_next(hw->wq_class_array[1]);
987 wq->send_frame_io = ocs_hw_io_alloc(hw);
988 if (wq->send_frame_io == NULL) {
989 ocs_log_err(hw->os, "ocs_hw_io_alloc for send_frame_io failed\n");
993 hw->hw_wq[0]->send_frame_io = ocs_hw_io_alloc(hw);
994 if (hw->hw_wq[0]->send_frame_io == NULL) {
995 ocs_log_err(hw->os, "ocs_hw_io_alloc for send_frame_io failed\n");
999 /* Initialize send frame frame sequence id */
1000 ocs_atomic_init(&hw->send_frame_seq_id, 0);
1002 /* Initialize watchdog timer if enabled by user */
1003 hw->expiration_logged = 0;
1004 if(hw->watchdog_timeout) {
1005 if((hw->watchdog_timeout < 1) || (hw->watchdog_timeout > 65534)) {
1006 ocs_log_err(hw->os, "watchdog_timeout out of range: Valid range is 1 - 65534\n");
1007 }else if(!ocs_hw_config_watchdog_timer(hw)) {
1008 ocs_log_info(hw->os, "watchdog timer configured with timeout = %d seconds \n", hw->watchdog_timeout);
1012 if (ocs_dma_alloc(hw->os, &hw->domain_dmem, 112, 4)) {
1013 ocs_log_err(hw->os, "domain node memory allocation fail\n");
1014 return OCS_HW_RTN_NO_MEMORY;
1017 if (ocs_dma_alloc(hw->os, &hw->fcf_dmem, OCS_HW_READ_FCF_SIZE, OCS_HW_READ_FCF_SIZE)) {
1018 ocs_log_err(hw->os, "domain fcf memory allocation fail\n");
1019 return OCS_HW_RTN_NO_MEMORY;
1022 if ((0 == hw->loop_map.size) && ocs_dma_alloc(hw->os, &hw->loop_map,
1023 SLI4_MIN_LOOP_MAP_BYTES, 4)) {
1024 ocs_log_err(hw->os, "Loop dma alloc failed size:%d \n", hw->loop_map.size);
1027 return OCS_HW_RTN_SUCCESS;
1031 * @brief Configure Multi-RQ
1033 * @param hw Hardware context allocated by the caller.
1034 * @param mode 1 to set MRQ filters and 0 to set FCFI index
1035 * @param vlanid valid in mode 0
1036 * @param fcf_index valid in mode 0
1038 * @return Returns 0 on success, or a non-zero value on failure.
1041 ocs_hw_config_mrq(ocs_hw_t *hw, uint8_t mode, uint16_t vlanid, uint16_t fcf_index)
1043 uint8_t buf[SLI4_BMBX_SIZE], mrq_bitmask = 0;
1045 sli4_cmd_reg_fcfi_mrq_t *rsp = NULL;
1047 sli4_cmd_rq_cfg_t rq_filter[SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG];
1050 if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE) {
1054 /* Set the filter match/mask values from hw's filter_def values */
1055 for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
1056 rq_filter[i].rq_id = 0xffff;
1057 rq_filter[i].r_ctl_mask = (uint8_t) hw->config.filter_def[i];
1058 rq_filter[i].r_ctl_match = (uint8_t) (hw->config.filter_def[i] >> 8);
1059 rq_filter[i].type_mask = (uint8_t) (hw->config.filter_def[i] >> 16);
1060 rq_filter[i].type_match = (uint8_t) (hw->config.filter_def[i] >> 24);
1063 /* Accumulate counts for each filter type used, build rq_ids[] list */
1064 for (i = 0; i < hw->hw_rq_count; i++) {
1066 for (j = 0; j < SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG; j++) {
1067 if (rq->filter_mask & (1U << j)) {
1068 if (rq_filter[j].rq_id != 0xffff) {
1069 /* Already used. Bailout ifts not RQset case */
1070 if (!rq->is_mrq || (rq_filter[j].rq_id != rq->base_mrq_id)) {
1071 ocs_log_err(hw->os, "Wrong queue topology.\n");
1072 return OCS_HW_RTN_ERROR;
1078 rq_filter[j].rq_id = rq->base_mrq_id;
1079 mrq_bitmask |= (1U << j);
1081 rq_filter[j].rq_id = rq->hdr->id;
1088 /* Invoke REG_FCFI_MRQ */
1089 rc = sli_cmd_reg_fcfi_mrq(&hw->sli,
1091 SLI4_BMBX_SIZE, /* size */
1093 fcf_index, /* fcf_index */
1094 vlanid, /* vlan_id */
1095 hw->config.rq_selection_policy, /* RQ selection policy*/
1096 mrq_bitmask, /* MRQ bitmask */
1097 hw->hw_mrq_count, /* num_mrqs */
1098 rq_filter); /* RQ filter */
1100 ocs_log_err(hw->os, "sli_cmd_reg_fcfi_mrq() failed: %d\n", rc);
1101 return OCS_HW_RTN_ERROR;
1104 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
1106 rsp = (sli4_cmd_reg_fcfi_mrq_t *)buf;
1108 if ((rc != OCS_HW_RTN_SUCCESS) || (rsp->hdr.status)) {
1109 ocs_log_err(hw->os, "FCFI MRQ registration failed. cmd = %x status = %x\n",
1110 rsp->hdr.command, rsp->hdr.status);
1111 return OCS_HW_RTN_ERROR;
1114 if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE) {
1115 hw->fcf_indicator = rsp->fcfi;
1121 * @brief Callback function for getting linkcfg during HW initialization.
1123 * @param status Status of the linkcfg get operation.
1124 * @param value Link configuration enum to which the link configuration is set.
1125 * @param arg Callback argument (ocs_hw_t *).
1130 ocs_hw_init_linkcfg_cb(int32_t status, uintptr_t value, void *arg)
1132 ocs_hw_t *hw = (ocs_hw_t *)arg;
1134 hw->linkcfg = (ocs_hw_linkcfg_e)value;
1136 hw->linkcfg = OCS_HW_LINKCFG_NA;
1138 ocs_log_debug(hw->os, "linkcfg=%d\n", hw->linkcfg);
1142 * @ingroup devInitShutdown
1143 * @brief Tear down the Hardware Abstraction Layer module.
1146 * Frees memory structures needed by the device, and shuts down the device. Does
1147 * not free the HW context memory (which is done by the caller).
1149 * @param hw Hardware context allocated by the caller.
1151 * @return Returns 0 on success, or a non-zero value on failure.
1154 ocs_hw_teardown(ocs_hw_t *hw)
1157 uint32_t iters = 10;/*XXX*/
1159 uint32_t destroy_queues;
1160 uint32_t free_memory;
1163 ocs_log_err(NULL, "bad parameter(s) hw=%p\n", hw);
1164 return OCS_HW_RTN_ERROR;
1167 destroy_queues = (hw->state == OCS_HW_STATE_ACTIVE);
1168 free_memory = (hw->state != OCS_HW_STATE_UNINITIALIZED);
1170 /* shutdown target wqe timer */
1171 shutdown_target_wqe_timer(hw);
1173 /* Cancel watchdog timer if enabled */
1174 if(hw->watchdog_timeout) {
1175 hw->watchdog_timeout = 0;
1176 ocs_hw_config_watchdog_timer(hw);
1179 /* Cancel Sliport Healthcheck */
1180 if(hw->sliport_healthcheck) {
1181 hw->sliport_healthcheck = 0;
1182 ocs_hw_config_sli_port_health_check(hw, 0, 0);
1185 if (hw->state != OCS_HW_STATE_QUEUES_ALLOCATED) {
1187 hw->state = OCS_HW_STATE_TEARDOWN_IN_PROGRESS;
1191 /* If there are outstanding commands, wait for them to complete */
1192 while (!ocs_list_empty(&hw->cmd_head) && iters) {
1198 if (ocs_list_empty(&hw->cmd_head)) {
1199 ocs_log_debug(hw->os, "All commands completed on MQ queue\n");
1201 ocs_log_debug(hw->os, "Some commands still pending on MQ queue\n");
1204 /* Cancel any remaining commands */
1205 ocs_hw_command_cancel(hw);
1207 hw->state = OCS_HW_STATE_TEARDOWN_IN_PROGRESS;
1210 ocs_lock_free(&hw->cmd_lock);
1212 /* Free unregistered RPI if workaround is in force */
1213 if (hw->workaround.use_unregistered_rpi) {
1214 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, hw->workaround.unregistered_rid);
1217 max_rpi = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI);
1219 for (i = 0; i < max_rpi; i++) {
1220 if (ocs_atomic_read(&hw->rpi_ref[i].rpi_count)) {
1221 ocs_log_debug(hw->os, "non-zero ref [%d]=%d\n",
1222 i, ocs_atomic_read(&hw->rpi_ref[i].rpi_count));
1225 ocs_free(hw->os, hw->rpi_ref, max_rpi * sizeof(*hw->rpi_ref));
1229 ocs_dma_free(hw->os, &hw->rnode_mem);
1232 for (i = 0; i < hw->config.n_io; i++) {
1233 if (hw->io[i] && (hw->io[i]->sgl != NULL) &&
1234 (hw->io[i]->sgl->virt != NULL)) {
1235 if(hw->io[i]->is_port_owned) {
1236 ocs_lock_free(&hw->io[i]->axr_lock);
1238 ocs_dma_free(hw->os, hw->io[i]->sgl);
1240 ocs_free(hw->os, hw->io[i], sizeof(ocs_hw_io_t));
1243 ocs_free(hw->os, hw->wqe_buffs, hw->config.n_io * hw->sli.config.wqe_size);
1244 hw->wqe_buffs = NULL;
1245 ocs_free(hw->os, hw->io, hw->config.n_io * sizeof(ocs_hw_io_t *));
1249 ocs_dma_free(hw->os, &hw->xfer_rdy);
1250 ocs_dma_free(hw->os, &hw->dump_sges);
1251 ocs_dma_free(hw->os, &hw->loop_map);
1253 ocs_lock_free(&hw->io_lock);
1254 ocs_lock_free(&hw->io_abort_lock);
1257 for (i = 0; i < hw->wq_count; i++) {
1258 sli_queue_free(&hw->sli, &hw->wq[i], destroy_queues, free_memory);
1262 for (i = 0; i < hw->rq_count; i++) {
1263 sli_queue_free(&hw->sli, &hw->rq[i], destroy_queues, free_memory);
1266 for (i = 0; i < hw->mq_count; i++) {
1267 sli_queue_free(&hw->sli, &hw->mq[i], destroy_queues, free_memory);
1270 for (i = 0; i < hw->cq_count; i++) {
1271 sli_queue_free(&hw->sli, &hw->cq[i], destroy_queues, free_memory);
1274 for (i = 0; i < hw->eq_count; i++) {
1275 sli_queue_free(&hw->sli, &hw->eq[i], destroy_queues, free_memory);
1278 ocs_hw_qtop_free(hw->qtop);
1280 /* Free rq buffers */
1283 hw_queue_teardown(hw);
1285 ocs_hw_rqpair_teardown(hw);
1287 if (sli_teardown(&hw->sli)) {
1288 ocs_log_err(hw->os, "SLI teardown failed\n");
1291 ocs_queue_history_free(&hw->q_hist);
1293 /* record the fact that the queues are non-functional */
1294 hw->state = OCS_HW_STATE_UNINITIALIZED;
1296 /* free sequence free pool */
1297 ocs_array_free(hw->seq_pool);
1298 hw->seq_pool = NULL;
1300 /* free hw_wq_callback pool */
1301 ocs_pool_free(hw->wq_reqtag_pool);
1303 ocs_dma_free(hw->os, &hw->domain_dmem);
1304 ocs_dma_free(hw->os, &hw->fcf_dmem);
1305 /* Mark HW setup as not having been called */
1306 hw->hw_setup_called = FALSE;
1308 return OCS_HW_RTN_SUCCESS;
1312 ocs_hw_reset(ocs_hw_t *hw, ocs_hw_reset_e reset)
1315 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
1317 ocs_hw_state_e prev_state = hw->state;
1319 if (hw->state != OCS_HW_STATE_ACTIVE) {
1320 ocs_log_test(hw->os, "HW state %d is not active\n", hw->state);
1323 hw->state = OCS_HW_STATE_RESET_IN_PROGRESS;
1325 /* shutdown target wqe timer */
1326 shutdown_target_wqe_timer(hw);
1331 * If an mailbox command requiring a DMA is outstanding (i.e. SFP/DDM),
1332 * then the FW will UE when the reset is issued. So attempt to complete
1333 * all mailbox commands.
1336 while (!ocs_list_empty(&hw->cmd_head) && iters) {
1342 if (ocs_list_empty(&hw->cmd_head)) {
1343 ocs_log_debug(hw->os, "All commands completed on MQ queue\n");
1345 ocs_log_debug(hw->os, "Some commands still pending on MQ queue\n");
1348 /* Reset the chip */
1350 case OCS_HW_RESET_FUNCTION:
1351 ocs_log_debug(hw->os, "issuing function level reset\n");
1352 if (sli_reset(&hw->sli)) {
1353 ocs_log_err(hw->os, "sli_reset failed\n");
1354 rc = OCS_HW_RTN_ERROR;
1357 case OCS_HW_RESET_FIRMWARE:
1358 ocs_log_debug(hw->os, "issuing firmware reset\n");
1359 if (sli_fw_reset(&hw->sli)) {
1360 ocs_log_err(hw->os, "sli_soft_reset failed\n");
1361 rc = OCS_HW_RTN_ERROR;
1364 * Because the FW reset leaves the FW in a non-running state,
1365 * follow that with a regular reset.
1367 ocs_log_debug(hw->os, "issuing function level reset\n");
1368 if (sli_reset(&hw->sli)) {
1369 ocs_log_err(hw->os, "sli_reset failed\n");
1370 rc = OCS_HW_RTN_ERROR;
1374 ocs_log_test(hw->os, "unknown reset type - no reset performed\n");
1375 hw->state = prev_state;
1376 return OCS_HW_RTN_ERROR;
1379 /* Not safe to walk command/io lists unless they've been initialized */
1380 if (prev_state != OCS_HW_STATE_UNINITIALIZED) {
1381 ocs_hw_command_cancel(hw);
1383 /* Clean up the inuse list, the free list and the wait free list */
1384 ocs_hw_io_cancel(hw);
1386 ocs_memset(hw->domains, 0, sizeof(hw->domains));
1387 ocs_memset(hw->fcf_index_fcfi, 0, sizeof(hw->fcf_index_fcfi));
1389 ocs_hw_link_event_init(hw);
1391 ocs_lock(&hw->io_lock);
1392 /* The io lists should be empty, but remove any that didn't get cleaned up. */
1393 while (!ocs_list_empty(&hw->io_timed_wqe)) {
1394 ocs_list_remove_head(&hw->io_timed_wqe);
1396 /* Don't clean up the io_inuse list, the backend will do that when it finishes the IO */
1398 while (!ocs_list_empty(&hw->io_free)) {
1399 ocs_list_remove_head(&hw->io_free);
1401 while (!ocs_list_empty(&hw->io_wait_free)) {
1402 ocs_list_remove_head(&hw->io_wait_free);
1405 /* Reset the request tag pool, the HW IO request tags are reassigned in ocs_hw_setup_io() */
1406 ocs_hw_reqtag_reset(hw);
1408 ocs_unlock(&hw->io_lock);
1411 if (prev_state != OCS_HW_STATE_UNINITIALIZED) {
1412 for (i = 0; i < hw->wq_count; i++) {
1413 sli_queue_reset(&hw->sli, &hw->wq[i]);
1416 for (i = 0; i < hw->rq_count; i++) {
1417 sli_queue_reset(&hw->sli, &hw->rq[i]);
1420 for (i = 0; i < hw->hw_rq_count; i++) {
1421 hw_rq_t *rq = hw->hw_rq[i];
1422 if (rq->rq_tracker != NULL) {
1425 for (j = 0; j < rq->entry_count; j++) {
1426 rq->rq_tracker[j] = NULL;
1431 for (i = 0; i < hw->mq_count; i++) {
1432 sli_queue_reset(&hw->sli, &hw->mq[i]);
1435 for (i = 0; i < hw->cq_count; i++) {
1436 sli_queue_reset(&hw->sli, &hw->cq[i]);
1439 for (i = 0; i < hw->eq_count; i++) {
1440 sli_queue_reset(&hw->sli, &hw->eq[i]);
1443 /* Free rq buffers */
1446 /* Teardown the HW queue topology */
1447 hw_queue_teardown(hw);
1450 /* Free rq buffers */
1455 * Re-apply the run-time workarounds after clearing the SLI config
1456 * fields in sli_reset.
1458 ocs_hw_workaround_setup(hw);
1459 hw->state = OCS_HW_STATE_QUEUES_ALLOCATED;
1465 ocs_hw_get_num_eq(ocs_hw_t *hw)
1467 return hw->eq_count;
1471 ocs_hw_get_fw_timed_out(ocs_hw_t *hw)
1473 /* The error values below are taken from LOWLEVEL_SET_WATCHDOG_TIMER_rev1.pdf
1474 * No further explanation is given in the document.
1476 return (sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR1) == 0x2 &&
1477 sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR2) == 0x10);
1482 ocs_hw_get(ocs_hw_t *hw, ocs_hw_property_e prop, uint32_t *value)
1484 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
1488 return OCS_HW_RTN_ERROR;
1495 *value = hw->config.n_io;
1498 *value = (hw->config.n_sgl - SLI4_SGE_MAX_RESERVED);
1501 *value = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI);
1503 case OCS_HW_MAX_NODES:
1504 *value = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI);
1506 case OCS_HW_MAX_RQ_ENTRIES:
1507 *value = hw->num_qentries[SLI_QTYPE_RQ];
1509 case OCS_HW_RQ_DEFAULT_BUFFER_SIZE:
1510 *value = hw->config.rq_default_buffer_size;
1512 case OCS_HW_AUTO_XFER_RDY_CAPABLE:
1513 *value = sli_get_auto_xfer_rdy_capable(&hw->sli);
1515 case OCS_HW_AUTO_XFER_RDY_XRI_CNT:
1516 *value = hw->config.auto_xfer_rdy_xri_cnt;
1518 case OCS_HW_AUTO_XFER_RDY_SIZE:
1519 *value = hw->config.auto_xfer_rdy_size;
1521 case OCS_HW_AUTO_XFER_RDY_BLK_SIZE:
1522 switch (hw->config.auto_xfer_rdy_blk_size_chip) {
1540 rc = OCS_HW_RTN_ERROR;
1544 case OCS_HW_AUTO_XFER_RDY_T10_ENABLE:
1545 *value = hw->config.auto_xfer_rdy_t10_enable;
1547 case OCS_HW_AUTO_XFER_RDY_P_TYPE:
1548 *value = hw->config.auto_xfer_rdy_p_type;
1550 case OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA:
1551 *value = hw->config.auto_xfer_rdy_ref_tag_is_lba;
1553 case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID:
1554 *value = hw->config.auto_xfer_rdy_app_tag_valid;
1556 case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE:
1557 *value = hw->config.auto_xfer_rdy_app_tag_value;
1559 case OCS_HW_MAX_SGE:
1560 *value = sli_get_max_sge(&hw->sli);
1562 case OCS_HW_MAX_SGL:
1563 *value = sli_get_max_sgl(&hw->sli);
1565 case OCS_HW_TOPOLOGY:
1567 * Infer link.status based on link.speed.
1568 * Report OCS_HW_TOPOLOGY_NONE if the link is down.
1570 if (hw->link.speed == 0) {
1571 *value = OCS_HW_TOPOLOGY_NONE;
1574 switch (hw->link.topology) {
1575 case SLI_LINK_TOPO_NPORT:
1576 *value = OCS_HW_TOPOLOGY_NPORT;
1578 case SLI_LINK_TOPO_LOOP:
1579 *value = OCS_HW_TOPOLOGY_LOOP;
1581 case SLI_LINK_TOPO_NONE:
1582 *value = OCS_HW_TOPOLOGY_NONE;
1585 ocs_log_test(hw->os, "unsupported topology %#x\n", hw->link.topology);
1586 rc = OCS_HW_RTN_ERROR;
1590 case OCS_HW_CONFIG_TOPOLOGY:
1591 *value = hw->config.topology;
1593 case OCS_HW_LINK_SPEED:
1594 *value = hw->link.speed;
1596 case OCS_HW_LINK_CONFIG_SPEED:
1597 switch (hw->config.speed) {
1598 case FC_LINK_SPEED_10G:
1601 case FC_LINK_SPEED_AUTO_16_8_4:
1604 case FC_LINK_SPEED_2G:
1607 case FC_LINK_SPEED_4G:
1610 case FC_LINK_SPEED_8G:
1613 case FC_LINK_SPEED_16G:
1616 case FC_LINK_SPEED_32G:
1620 ocs_log_test(hw->os, "unsupported speed %#x\n", hw->config.speed);
1621 rc = OCS_HW_RTN_ERROR;
1625 case OCS_HW_IF_TYPE:
1626 *value = sli_get_if_type(&hw->sli);
1628 case OCS_HW_SLI_REV:
1629 *value = sli_get_sli_rev(&hw->sli);
1631 case OCS_HW_SLI_FAMILY:
1632 *value = sli_get_sli_family(&hw->sli);
1634 case OCS_HW_DIF_CAPABLE:
1635 *value = sli_get_dif_capable(&hw->sli);
1637 case OCS_HW_DIF_SEED:
1638 *value = hw->config.dif_seed;
1640 case OCS_HW_DIF_MODE:
1641 *value = hw->config.dif_mode;
1643 case OCS_HW_DIF_MULTI_SEPARATE:
1644 /* Lancer supports multiple DIF separates */
1645 if (hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) {
1651 case OCS_HW_DUMP_MAX_SIZE:
1652 *value = hw->dump_size;
1654 case OCS_HW_DUMP_READY:
1655 *value = sli_dump_is_ready(&hw->sli);
1657 case OCS_HW_DUMP_PRESENT:
1658 *value = sli_dump_is_present(&hw->sli);
1660 case OCS_HW_RESET_REQUIRED:
1661 tmp = sli_reset_required(&hw->sli);
1663 rc = OCS_HW_RTN_ERROR;
1668 case OCS_HW_FW_ERROR:
1669 *value = sli_fw_error_status(&hw->sli);
1671 case OCS_HW_FW_READY:
1672 *value = sli_fw_ready(&hw->sli);
1674 case OCS_HW_FW_TIMED_OUT:
1675 *value = ocs_hw_get_fw_timed_out(hw);
1677 case OCS_HW_HIGH_LOGIN_MODE:
1678 *value = sli_get_hlm_capable(&hw->sli);
1680 case OCS_HW_PREREGISTER_SGL:
1681 *value = sli_get_sgl_preregister_required(&hw->sli);
1683 case OCS_HW_HW_REV1:
1684 *value = sli_get_hw_revision(&hw->sli, 0);
1686 case OCS_HW_HW_REV2:
1687 *value = sli_get_hw_revision(&hw->sli, 1);
1689 case OCS_HW_HW_REV3:
1690 *value = sli_get_hw_revision(&hw->sli, 2);
1692 case OCS_HW_LINKCFG:
1693 *value = hw->linkcfg;
1695 case OCS_HW_ETH_LICENSE:
1696 *value = hw->eth_license;
1698 case OCS_HW_LINK_MODULE_TYPE:
1699 *value = sli_get_link_module_type(&hw->sli);
1701 case OCS_HW_NUM_CHUTES:
1702 *value = ocs_hw_get_num_chutes(hw);
1704 case OCS_HW_DISABLE_AR_TGT_DIF:
1705 *value = hw->workaround.disable_ar_tgt_dif;
1707 case OCS_HW_EMULATE_I_ONLY_AAB:
1708 *value = hw->config.i_only_aab;
1710 case OCS_HW_EMULATE_TARGET_WQE_TIMEOUT:
1711 *value = hw->config.emulate_tgt_wqe_timeout;
1713 case OCS_HW_VPD_LEN:
1714 *value = sli_get_vpd_len(&hw->sli);
1716 case OCS_HW_SGL_CHAINING_CAPABLE:
1717 *value = sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported;
1719 case OCS_HW_SGL_CHAINING_ALLOWED:
1721 * SGL Chaining is allowed in the following cases:
1722 * 1. Lancer with host SGL Lists
1723 * 2. Skyhawk with pre-registered SGL Lists
1726 if ((sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported) &&
1727 !sli_get_sgl_preregister(&hw->sli) &&
1728 SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) {
1732 if ((sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported) &&
1733 sli_get_sgl_preregister(&hw->sli) &&
1734 ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) ||
1735 (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli)))) {
1739 case OCS_HW_SGL_CHAINING_HOST_ALLOCATED:
1740 /* Only lancer supports host allocated SGL Chaining buffers. */
1741 *value = ((sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported) &&
1742 (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)));
1744 case OCS_HW_SEND_FRAME_CAPABLE:
1745 if (hw->workaround.ignore_send_frame) {
1748 /* Only lancer is capable */
1749 *value = sli_get_if_type(&hw->sli) == SLI4_IF_TYPE_LANCER_FC_ETH;
1752 case OCS_HW_RQ_SELECTION_POLICY:
1753 *value = hw->config.rq_selection_policy;
1755 case OCS_HW_RR_QUANTA:
1756 *value = hw->config.rr_quanta;
1758 case OCS_HW_MAX_VPORTS:
1759 *value = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_VPI);
1761 ocs_log_test(hw->os, "unsupported property %#x\n", prop);
1762 rc = OCS_HW_RTN_ERROR;
1769 ocs_hw_get_ptr(ocs_hw_t *hw, ocs_hw_property_e prop)
1774 case OCS_HW_WWN_NODE:
1775 rc = sli_get_wwn_node(&hw->sli);
1777 case OCS_HW_WWN_PORT:
1778 rc = sli_get_wwn_port(&hw->sli);
1781 /* make sure VPD length is non-zero */
1782 if (sli_get_vpd_len(&hw->sli)) {
1783 rc = sli_get_vpd(&hw->sli);
1787 rc = sli_get_fw_name(&hw->sli, 0);
1789 case OCS_HW_FW_REV2:
1790 rc = sli_get_fw_name(&hw->sli, 1);
1793 rc = sli_get_ipl_name(&hw->sli);
1795 case OCS_HW_PORTNUM:
1796 rc = sli_get_portnum(&hw->sli);
1798 case OCS_HW_BIOS_VERSION_STRING:
1799 rc = sli_get_bios_version_string(&hw->sli);
1802 ocs_log_test(hw->os, "unsupported property %#x\n", prop);
1811 ocs_hw_set(ocs_hw_t *hw, ocs_hw_property_e prop, uint32_t value)
1813 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
1817 if (value > sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI) ||
1819 ocs_log_test(hw->os, "IO value out of range %d vs %d\n",
1820 value, sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI));
1821 rc = OCS_HW_RTN_ERROR;
1823 hw->config.n_io = value;
1827 value += SLI4_SGE_MAX_RESERVED;
1828 if (value > sli_get_max_sgl(&hw->sli)) {
1829 ocs_log_test(hw->os, "SGL value out of range %d vs %d\n",
1830 value, sli_get_max_sgl(&hw->sli));
1831 rc = OCS_HW_RTN_ERROR;
1833 hw->config.n_sgl = value;
1836 case OCS_HW_TOPOLOGY:
1837 if ((sli_get_medium(&hw->sli) != SLI_LINK_MEDIUM_FC) &&
1838 (value != OCS_HW_TOPOLOGY_AUTO)) {
1839 ocs_log_test(hw->os, "unsupported topology=%#x medium=%#x\n",
1840 value, sli_get_medium(&hw->sli));
1841 rc = OCS_HW_RTN_ERROR;
1846 case OCS_HW_TOPOLOGY_AUTO:
1847 if (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC) {
1848 sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FC);
1850 sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FCOE);
1853 case OCS_HW_TOPOLOGY_NPORT:
1854 sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FC_DA);
1856 case OCS_HW_TOPOLOGY_LOOP:
1857 sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FC_AL);
1860 ocs_log_test(hw->os, "unsupported topology %#x\n", value);
1861 rc = OCS_HW_RTN_ERROR;
1863 hw->config.topology = value;
1865 case OCS_HW_LINK_SPEED:
1866 if (sli_get_medium(&hw->sli) != SLI_LINK_MEDIUM_FC) {
1868 case 0: /* Auto-speed negotiation */
1869 case 10000: /* FCoE speed */
1870 hw->config.speed = FC_LINK_SPEED_10G;
1873 ocs_log_test(hw->os, "unsupported speed=%#x medium=%#x\n",
1874 value, sli_get_medium(&hw->sli));
1875 rc = OCS_HW_RTN_ERROR;
1881 case 0: /* Auto-speed negotiation */
1882 hw->config.speed = FC_LINK_SPEED_AUTO_16_8_4;
1884 case 2000: /* FC speeds */
1885 hw->config.speed = FC_LINK_SPEED_2G;
1888 hw->config.speed = FC_LINK_SPEED_4G;
1891 hw->config.speed = FC_LINK_SPEED_8G;
1894 hw->config.speed = FC_LINK_SPEED_16G;
1897 hw->config.speed = FC_LINK_SPEED_32G;
1900 ocs_log_test(hw->os, "unsupported speed %d\n", value);
1901 rc = OCS_HW_RTN_ERROR;
1904 case OCS_HW_DIF_SEED:
1905 /* Set the DIF seed - only for lancer right now */
1906 if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
1907 ocs_log_test(hw->os, "DIF seed not supported for this device\n");
1908 rc = OCS_HW_RTN_ERROR;
1910 hw->config.dif_seed = value;
1913 case OCS_HW_DIF_MODE:
1915 case OCS_HW_DIF_MODE_INLINE:
1917 * Make sure we support inline DIF.
1919 * Note: Having both bits clear means that we have old
1920 * FW that doesn't set the bits.
1922 if (sli_is_dif_inline_capable(&hw->sli)) {
1923 hw->config.dif_mode = value;
1925 ocs_log_test(hw->os, "chip does not support DIF inline\n");
1926 rc = OCS_HW_RTN_ERROR;
1929 case OCS_HW_DIF_MODE_SEPARATE:
1930 /* Make sure we support DIF separates. */
1931 if (sli_is_dif_separate_capable(&hw->sli)) {
1932 hw->config.dif_mode = value;
1934 ocs_log_test(hw->os, "chip does not support DIF separate\n");
1935 rc = OCS_HW_RTN_ERROR;
1939 case OCS_HW_RQ_PROCESS_LIMIT: {
1943 /* For each hw_rq object, set its parent CQ limit value */
1944 for (i = 0; i < hw->hw_rq_count; i++) {
1946 hw->cq[rq->cq->instance].proc_limit = value;
1950 case OCS_HW_RQ_DEFAULT_BUFFER_SIZE:
1951 hw->config.rq_default_buffer_size = value;
1953 case OCS_HW_AUTO_XFER_RDY_XRI_CNT:
1954 hw->config.auto_xfer_rdy_xri_cnt = value;
1956 case OCS_HW_AUTO_XFER_RDY_SIZE:
1957 hw->config.auto_xfer_rdy_size = value;
1959 case OCS_HW_AUTO_XFER_RDY_BLK_SIZE:
1962 hw->config.auto_xfer_rdy_blk_size_chip = 0;
1965 hw->config.auto_xfer_rdy_blk_size_chip = 1;
1968 hw->config.auto_xfer_rdy_blk_size_chip = 2;
1971 hw->config.auto_xfer_rdy_blk_size_chip = 3;
1974 hw->config.auto_xfer_rdy_blk_size_chip = 4;
1977 ocs_log_err(hw->os, "Invalid block size %d\n",
1979 rc = OCS_HW_RTN_ERROR;
1982 case OCS_HW_AUTO_XFER_RDY_T10_ENABLE:
1983 hw->config.auto_xfer_rdy_t10_enable = value;
1985 case OCS_HW_AUTO_XFER_RDY_P_TYPE:
1986 hw->config.auto_xfer_rdy_p_type = value;
1988 case OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA:
1989 hw->config.auto_xfer_rdy_ref_tag_is_lba = value;
1991 case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID:
1992 hw->config.auto_xfer_rdy_app_tag_valid = value;
1994 case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE:
1995 hw->config.auto_xfer_rdy_app_tag_value = value;
1998 hw->config.esoc = value;
1999 case OCS_HW_HIGH_LOGIN_MODE:
2000 rc = sli_set_hlm(&hw->sli, value);
2002 case OCS_HW_PREREGISTER_SGL:
2003 rc = sli_set_sgl_preregister(&hw->sli, value);
2005 case OCS_HW_ETH_LICENSE:
2006 hw->eth_license = value;
2008 case OCS_HW_EMULATE_I_ONLY_AAB:
2009 hw->config.i_only_aab = value;
2011 case OCS_HW_EMULATE_TARGET_WQE_TIMEOUT:
2012 hw->config.emulate_tgt_wqe_timeout = value;
2015 hw->config.bounce = value;
2017 case OCS_HW_RQ_SELECTION_POLICY:
2018 hw->config.rq_selection_policy = value;
2020 case OCS_HW_RR_QUANTA:
2021 hw->config.rr_quanta = value;
2024 ocs_log_test(hw->os, "unsupported property %#x\n", prop);
2025 rc = OCS_HW_RTN_ERROR;
2033 ocs_hw_set_ptr(ocs_hw_t *hw, ocs_hw_property_e prop, void *value)
2035 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
2038 case OCS_HW_WAR_VERSION:
2039 hw->hw_war_version = value;
2041 case OCS_HW_FILTER_DEF: {
2045 for (idx = 0; idx < ARRAY_SIZE(hw->config.filter_def); idx++) {
2046 hw->config.filter_def[idx] = 0;
2049 for (idx = 0; (idx < ARRAY_SIZE(hw->config.filter_def)) && (p != NULL) && *p; ) {
2050 hw->config.filter_def[idx++] = ocs_strtoul(p, 0, 0);
2051 p = ocs_strchr(p, ',');
2060 ocs_log_test(hw->os, "unsupported property %#x\n", prop);
2061 rc = OCS_HW_RTN_ERROR;
2067 * @ingroup interrupt
2068 * @brief Check for the events associated with the interrupt vector.
2070 * @param hw Hardware context.
2071 * @param vector Zero-based interrupt vector number.
2073 * @return Returns 0 on success, or a non-zero value on failure.
2076 ocs_hw_event_check(ocs_hw_t *hw, uint32_t vector)
2081 ocs_log_err(NULL, "HW context NULL?!?\n");
2085 if (vector > hw->eq_count) {
2086 ocs_log_err(hw->os, "vector %d. max %d\n",
2087 vector, hw->eq_count);
2092 * The caller should disable interrupts if they wish to prevent us
2093 * from processing during a shutdown. The following states are defined:
2094 * OCS_HW_STATE_UNINITIALIZED - No queues allocated
2095 * OCS_HW_STATE_QUEUES_ALLOCATED - The state after a chip reset,
2096 * queues are cleared.
2097 * OCS_HW_STATE_ACTIVE - Chip and queues are operational
2098 * OCS_HW_STATE_RESET_IN_PROGRESS - reset, we still want completions
2099 * OCS_HW_STATE_TEARDOWN_IN_PROGRESS - We still want mailbox
2102 if (hw->state != OCS_HW_STATE_UNINITIALIZED) {
2103 rc = sli_queue_is_empty(&hw->sli, &hw->eq[vector]);
2105 /* Re-arm queue if there are no entries */
2107 sli_queue_arm(&hw->sli, &hw->eq[vector], TRUE);
2114 ocs_hw_unsol_process_bounce(void *arg)
2116 ocs_hw_sequence_t *seq = arg;
2117 ocs_hw_t *hw = seq->hw;
2119 ocs_hw_assert(hw != NULL);
2120 ocs_hw_assert(hw->callback.unsolicited != NULL);
2122 hw->callback.unsolicited(hw->args.unsolicited, seq);
2126 ocs_hw_process(ocs_hw_t *hw, uint32_t vector, uint32_t max_isr_time_msec)
2134 * The caller should disable interrupts if they wish to prevent us
2135 * from processing during a shutdown. The following states are defined:
2136 * OCS_HW_STATE_UNINITIALIZED - No queues allocated
2137 * OCS_HW_STATE_QUEUES_ALLOCATED - The state after a chip reset,
2138 * queues are cleared.
2139 * OCS_HW_STATE_ACTIVE - Chip and queues are operational
2140 * OCS_HW_STATE_RESET_IN_PROGRESS - reset, we still want completions
2141 * OCS_HW_STATE_TEARDOWN_IN_PROGRESS - We still want mailbox
2144 if (hw->state == OCS_HW_STATE_UNINITIALIZED) {
2148 /* Get pointer to hw_eq_t */
2149 eq = hw->hw_eq[vector];
2151 OCS_STAT(eq->use_count++);
2153 rc = ocs_hw_eq_process(hw, eq, max_isr_time_msec);
2159 * @ingroup interrupt
2160 * @brief Process events associated with an EQ.
2164 * @n @n Without a mechanism to terminate the completion processing loop, it
2165 * is possible under some workload conditions for the loop to never terminate
2166 * (or at least take longer than the OS is happy to have an interrupt handler
2167 * or kernel thread context hold a CPU without yielding).
2168 * @n @n The approach taken here is to periodically check how much time
2169 * we have been in this
2170 * processing loop, and if we exceed a predetermined time (multiple seconds), the
2171 * loop is terminated, and ocs_hw_process() returns.
2173 * @param hw Hardware context.
2174 * @param eq Pointer to HW EQ object.
2175 * @param max_isr_time_msec Maximum time in msec to stay in this function.
2177 * @return Returns 0 on success, or a non-zero value on failure.
2180 ocs_hw_eq_process(ocs_hw_t *hw, hw_eq_t *eq, uint32_t max_isr_time_msec)
2182 uint8_t eqe[sizeof(sli4_eqe_t)] = { 0 };
2183 uint32_t done = FALSE;
2184 uint32_t tcheck_count;
2188 tcheck_count = OCS_HW_TIMECHECK_ITERATIONS;
2189 tstart = ocs_msectime();
2193 while (!done && !sli_queue_read(&hw->sli, eq->queue, eqe)) {
2197 rc = sli_eq_parse(&hw->sli, eqe, &cq_id);
2203 * Received a sentinel EQE indicating the EQ is full.
2206 for (i = 0; i < hw->cq_count; i++) {
2207 ocs_hw_cq_process(hw, hw->hw_cq[i]);
2214 int32_t index = ocs_hw_queue_hash_find(hw->cq_hash, cq_id);
2215 if (likely(index >= 0)) {
2216 ocs_hw_cq_process(hw, hw->hw_cq[index]);
2218 ocs_log_err(hw->os, "bad CQ_ID %#06x\n", cq_id);
2223 if (eq->queue->n_posted > (eq->queue->posted_limit)) {
2224 sli_queue_arm(&hw->sli, eq->queue, FALSE);
2227 if (tcheck_count && (--tcheck_count == 0)) {
2228 tcheck_count = OCS_HW_TIMECHECK_ITERATIONS;
2229 telapsed = ocs_msectime() - tstart;
2230 if (telapsed >= max_isr_time_msec) {
2235 sli_queue_eq_arm(&hw->sli, eq->queue, TRUE);
2241 * @brief Submit queued (pending) mbx commands.
2244 * Submit queued mailbox commands.
2245 * --- Assumes that hw->cmd_lock is held ---
2247 * @param hw Hardware context.
2249 * @return Returns 0 on success, or a negative error code value on failure.
2252 ocs_hw_cmd_submit_pending(ocs_hw_t *hw)
2254 ocs_command_ctx_t *ctx;
2257 /* Assumes lock held */
2259 /* Only submit MQE if there's room */
2260 while (hw->cmd_head_count < (OCS_HW_MQ_DEPTH - 1)) {
2261 ctx = ocs_list_remove_head(&hw->cmd_pending);
2265 ocs_list_add_tail(&hw->cmd_head, ctx);
2266 hw->cmd_head_count++;
2267 if (sli_queue_write(&hw->sli, hw->mq, ctx->buf) < 0) {
2268 ocs_log_test(hw->os, "sli_queue_write failed: %d\n", rc);
2278 * @brief Issue a SLI command.
2281 * Send a mailbox command to the hardware, and either wait for a completion
2282 * (OCS_CMD_POLL) or get an optional asynchronous completion (OCS_CMD_NOWAIT).
2284 * @param hw Hardware context.
2285 * @param cmd Buffer containing a formatted command and results.
2286 * @param opts Command options:
2287 * - OCS_CMD_POLL - Command executes synchronously and busy-waits for the completion.
2288 * - OCS_CMD_NOWAIT - Command executes asynchronously. Uses callback.
2289 * @param cb Function callback used for asynchronous mode. May be NULL.
2290 * @n Prototype is <tt>(*cb)(void *arg, uint8_t *cmd)</tt>.
2291 * @n @n @b Note: If the
2292 * callback function pointer is NULL, the results of the command are silently
2293 * discarded, allowing this pointer to exist solely on the stack.
2294 * @param arg Argument passed to an asynchronous callback.
2296 * @return Returns 0 on success, or a non-zero value on failure.
2299 ocs_hw_command(ocs_hw_t *hw, uint8_t *cmd, uint32_t opts, void *cb, void *arg)
2301 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
2304 * If the chip is in an error state (UE'd) then reject this mailbox
2307 if (sli_fw_error_status(&hw->sli) > 0) {
2308 uint32_t err1 = sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR1);
2309 uint32_t err2 = sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR2);
2310 if (hw->expiration_logged == 0 && err1 == 0x2 && err2 == 0x10) {
2311 hw->expiration_logged = 1;
2312 ocs_log_crit(hw->os,"Emulex: Heartbeat expired after %d seconds\n",
2313 hw->watchdog_timeout);
2315 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2316 ocs_log_crit(hw->os, "status=%#x error1=%#x error2=%#x\n",
2317 sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_STATUS),
2320 return OCS_HW_RTN_ERROR;
2323 if (OCS_CMD_POLL == opts) {
2325 ocs_lock(&hw->cmd_lock);
2326 if (hw->mq->length && !sli_queue_is_empty(&hw->sli, hw->mq)) {
2328 * Can't issue Boot-strap mailbox command with other
2329 * mail-queue commands pending as this interaction is
2332 rc = OCS_HW_RTN_ERROR;
2334 void *bmbx = hw->sli.bmbx.virt;
2336 ocs_memset(bmbx, 0, SLI4_BMBX_SIZE);
2337 ocs_memcpy(bmbx, cmd, SLI4_BMBX_SIZE);
2339 if (sli_bmbx_command(&hw->sli) == 0) {
2340 rc = OCS_HW_RTN_SUCCESS;
2341 ocs_memcpy(cmd, bmbx, SLI4_BMBX_SIZE);
2344 ocs_unlock(&hw->cmd_lock);
2345 } else if (OCS_CMD_NOWAIT == opts) {
2346 ocs_command_ctx_t *ctx = NULL;
2348 ctx = ocs_malloc(hw->os, sizeof(ocs_command_ctx_t), OCS_M_ZERO | OCS_M_NOWAIT);
2350 ocs_log_err(hw->os, "can't allocate command context\n");
2351 return OCS_HW_RTN_NO_RESOURCES;
2354 if (hw->state != OCS_HW_STATE_ACTIVE) {
2355 ocs_log_err(hw->os, "Can't send command, HW state=%d\n", hw->state);
2356 ocs_free(hw->os, ctx, sizeof(*ctx));
2357 return OCS_HW_RTN_ERROR;
2367 ocs_lock(&hw->cmd_lock);
2369 /* Add to pending list */
2370 ocs_list_add_tail(&hw->cmd_pending, ctx);
2372 /* Submit as much of the pending list as we can */
2373 if (ocs_hw_cmd_submit_pending(hw) == 0) {
2374 rc = OCS_HW_RTN_SUCCESS;
2377 ocs_unlock(&hw->cmd_lock);
2384 * @ingroup devInitShutdown
2385 * @brief Register a callback for the given event.
2387 * @param hw Hardware context.
2388 * @param which Event of interest.
2389 * @param func Function to call when the event occurs.
2390 * @param arg Argument passed to the callback function.
2392 * @return Returns 0 on success, or a non-zero value on failure.
2395 ocs_hw_callback(ocs_hw_t *hw, ocs_hw_callback_e which, void *func, void *arg)
2398 if (!hw || !func || (which >= OCS_HW_CB_MAX)) {
2399 ocs_log_err(NULL, "bad parameter hw=%p which=%#x func=%p\n",
2401 return OCS_HW_RTN_ERROR;
2405 case OCS_HW_CB_DOMAIN:
2406 hw->callback.domain = func;
2407 hw->args.domain = arg;
2409 case OCS_HW_CB_PORT:
2410 hw->callback.port = func;
2411 hw->args.port = arg;
2413 case OCS_HW_CB_UNSOLICITED:
2414 hw->callback.unsolicited = func;
2415 hw->args.unsolicited = arg;
2417 case OCS_HW_CB_REMOTE_NODE:
2418 hw->callback.rnode = func;
2419 hw->args.rnode = arg;
2421 case OCS_HW_CB_BOUNCE:
2422 hw->callback.bounce = func;
2423 hw->args.bounce = arg;
2426 ocs_log_test(hw->os, "unknown callback %#x\n", which);
2427 return OCS_HW_RTN_ERROR;
2430 return OCS_HW_RTN_SUCCESS;
2435 * @brief Allocate a port object.
2438 * This function allocates a VPI object for the port and stores it in the
2439 * indicator field of the port object.
2441 * @param hw Hardware context.
2442 * @param sport SLI port object used to connect to the domain.
2443 * @param domain Domain object associated with this port (may be NULL).
2444 * @param wwpn Port's WWPN in big-endian order, or NULL to use default.
2446 * @return Returns 0 on success, or a non-zero value on failure.
2449 ocs_hw_port_alloc(ocs_hw_t *hw, ocs_sli_port_t *sport, ocs_domain_t *domain,
2452 uint8_t *cmd = NULL;
2453 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
2456 sport->indicator = UINT32_MAX;
2458 sport->ctx.app = sport;
2459 sport->sm_free_req_pending = 0;
2462 * Check if the chip is in an error state (UE'd) before proceeding.
2464 if (sli_fw_error_status(&hw->sli) > 0) {
2465 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2466 return OCS_HW_RTN_ERROR;
2470 ocs_memcpy(&sport->sli_wwpn, wwpn, sizeof(sport->sli_wwpn));
2473 if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_VPI, &sport->indicator, &index)) {
2474 ocs_log_err(hw->os, "FCOE_VPI allocation failure\n");
2475 return OCS_HW_RTN_ERROR;
2478 if (domain != NULL) {
2479 ocs_sm_function_t next = NULL;
2481 cmd = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
2483 ocs_log_err(hw->os, "command memory allocation failed\n");
2484 rc = OCS_HW_RTN_NO_MEMORY;
2485 goto ocs_hw_port_alloc_out;
2488 /* If the WWPN is NULL, fetch the default WWPN and WWNN before
2489 * initializing the VPI
2492 next = __ocs_hw_port_alloc_read_sparm64;
2494 next = __ocs_hw_port_alloc_init_vpi;
2497 ocs_sm_transition(&sport->ctx, next, cmd);
2499 /* This is the convention for the HW, not SLI */
2500 ocs_log_test(hw->os, "need WWN for physical port\n");
2501 rc = OCS_HW_RTN_ERROR;
2503 /* domain NULL and wwpn non-NULL */
2504 ocs_sm_transition(&sport->ctx, __ocs_hw_port_alloc_init, NULL);
2507 ocs_hw_port_alloc_out:
2508 if (rc != OCS_HW_RTN_SUCCESS) {
2509 ocs_free(hw->os, cmd, SLI4_BMBX_SIZE);
2511 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator);
2519 * @brief Attach a physical/virtual SLI port to a domain.
2522 * This function registers a previously-allocated VPI with the
2525 * @param hw Hardware context.
2526 * @param sport Pointer to the SLI port object.
2527 * @param fc_id Fibre Channel ID to associate with this port.
2529 * @return Returns OCS_HW_RTN_SUCCESS on success, or an error code on failure.
2532 ocs_hw_port_attach(ocs_hw_t *hw, ocs_sli_port_t *sport, uint32_t fc_id)
2534 uint8_t *buf = NULL;
2535 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
2537 if (!hw || !sport) {
2538 ocs_log_err(hw ? hw->os : NULL,
2539 "bad parameter(s) hw=%p sport=%p\n", hw,
2541 return OCS_HW_RTN_ERROR;
2545 * Check if the chip is in an error state (UE'd) before proceeding.
2547 if (sli_fw_error_status(&hw->sli) > 0) {
2548 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2549 return OCS_HW_RTN_ERROR;
2552 buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2554 ocs_log_err(hw->os, "no buffer for command\n");
2555 return OCS_HW_RTN_NO_MEMORY;
2558 sport->fc_id = fc_id;
2559 ocs_sm_post_event(&sport->ctx, OCS_EVT_HW_PORT_REQ_ATTACH, buf);
2564 * @brief Called when the port control command completes.
2567 * We only need to free the mailbox command buffer.
2569 * @param hw Hardware context.
2570 * @param status Status field from the mbox completion.
2571 * @param mqe Mailbox response structure.
2572 * @param arg Pointer to a callback function that signals the caller that the command is done.
2574 * @return Returns 0.
2577 ocs_hw_cb_port_control(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
2579 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
2585 * @brief Control a port (initialize, shutdown, or set link configuration).
2588 * This function controls a port depending on the @c ctrl parameter:
2589 * - @b OCS_HW_PORT_INIT -
2590 * Issues the CONFIG_LINK and INIT_LINK commands for the specified port.
2591 * The HW generates an OCS_HW_DOMAIN_FOUND event when the link comes up.
2593 * - @b OCS_HW_PORT_SHUTDOWN -
2594 * Issues the DOWN_LINK command for the specified port.
2595 * The HW generates an OCS_HW_DOMAIN_LOST event when the link is down.
2597 * - @b OCS_HW_PORT_SET_LINK_CONFIG -
2598 * Sets the link configuration.
2600 * @param hw Hardware context.
2601 * @param ctrl Specifies the operation:
2602 * - OCS_HW_PORT_INIT
2603 * - OCS_HW_PORT_SHUTDOWN
2604 * - OCS_HW_PORT_SET_LINK_CONFIG
2606 * @param value Operation-specific value.
2607 * - OCS_HW_PORT_INIT - Selective reset AL_PA
2608 * - OCS_HW_PORT_SHUTDOWN - N/A
2609 * - OCS_HW_PORT_SET_LINK_CONFIG - An enum #ocs_hw_linkcfg_e value.
2611 * @param cb Callback function to invoke the following operation.
2612 * - OCS_HW_PORT_INIT/OCS_HW_PORT_SHUTDOWN - NULL (link events
2613 * are handled by the OCS_HW_CB_DOMAIN callbacks).
2614 * - OCS_HW_PORT_SET_LINK_CONFIG - Invoked after linkcfg mailbox command
2617 * @param arg Callback argument invoked after the command completes.
2618 * - OCS_HW_PORT_INIT/OCS_HW_PORT_SHUTDOWN - NULL (link events
2619 * are handled by the OCS_HW_CB_DOMAIN callbacks).
2620 * - OCS_HW_PORT_SET_LINK_CONFIG - Invoked after linkcfg mailbox command
2623 * @return Returns 0 on success, or a non-zero value on failure.
2626 ocs_hw_port_control(ocs_hw_t *hw, ocs_hw_port_e ctrl, uintptr_t value, ocs_hw_port_control_cb_t cb, void *arg)
2628 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
2631 case OCS_HW_PORT_INIT:
2635 uint8_t reset_alpa = 0;
2637 if (SLI_LINK_MEDIUM_FC == sli_get_medium(&hw->sli)) {
2640 cfg_link = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2641 if (cfg_link == NULL) {
2642 ocs_log_err(hw->os, "no buffer for command\n");
2643 return OCS_HW_RTN_NO_MEMORY;
2646 if (sli_cmd_config_link(&hw->sli, cfg_link, SLI4_BMBX_SIZE)) {
2647 rc = ocs_hw_command(hw, cfg_link, OCS_CMD_NOWAIT,
2648 ocs_hw_cb_port_control, NULL);
2651 if (rc != OCS_HW_RTN_SUCCESS) {
2652 ocs_free(hw->os, cfg_link, SLI4_BMBX_SIZE);
2653 ocs_log_err(hw->os, "CONFIG_LINK failed\n");
2656 speed = hw->config.speed;
2657 reset_alpa = (uint8_t)(value & 0xff);
2659 speed = FC_LINK_SPEED_10G;
2663 * Bring link up, unless FW version is not supported
2665 if (hw->workaround.fw_version_too_low) {
2666 if (SLI4_IF_TYPE_LANCER_FC_ETH == hw->sli.if_type) {
2667 ocs_log_err(hw->os, "Cannot bring up link. Please update firmware to %s or later (current version is %s)\n",
2668 OCS_FW_VER_STR(OCS_MIN_FW_VER_LANCER), (char *) sli_get_fw_name(&hw->sli,0));
2670 ocs_log_err(hw->os, "Cannot bring up link. Please update firmware to %s or later (current version is %s)\n",
2671 OCS_FW_VER_STR(OCS_MIN_FW_VER_SKYHAWK), (char *) sli_get_fw_name(&hw->sli, 0));
2674 return OCS_HW_RTN_ERROR;
2677 rc = OCS_HW_RTN_ERROR;
2679 /* Allocate a new buffer for the init_link command */
2680 init_link = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2681 if (init_link == NULL) {
2682 ocs_log_err(hw->os, "no buffer for command\n");
2683 return OCS_HW_RTN_NO_MEMORY;
2687 if (sli_cmd_init_link(&hw->sli, init_link, SLI4_BMBX_SIZE, speed, reset_alpa)) {
2688 rc = ocs_hw_command(hw, init_link, OCS_CMD_NOWAIT,
2689 ocs_hw_cb_port_control, NULL);
2691 /* Free buffer on error, since no callback is coming */
2692 if (rc != OCS_HW_RTN_SUCCESS) {
2693 ocs_free(hw->os, init_link, SLI4_BMBX_SIZE);
2694 ocs_log_err(hw->os, "INIT_LINK failed\n");
2698 case OCS_HW_PORT_SHUTDOWN:
2702 down_link = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2703 if (down_link == NULL) {
2704 ocs_log_err(hw->os, "no buffer for command\n");
2705 return OCS_HW_RTN_NO_MEMORY;
2707 if (sli_cmd_down_link(&hw->sli, down_link, SLI4_BMBX_SIZE)) {
2708 rc = ocs_hw_command(hw, down_link, OCS_CMD_NOWAIT,
2709 ocs_hw_cb_port_control, NULL);
2711 /* Free buffer on error, since no callback is coming */
2712 if (rc != OCS_HW_RTN_SUCCESS) {
2713 ocs_free(hw->os, down_link, SLI4_BMBX_SIZE);
2714 ocs_log_err(hw->os, "DOWN_LINK failed\n");
2718 case OCS_HW_PORT_SET_LINK_CONFIG:
2719 rc = ocs_hw_set_linkcfg(hw, (ocs_hw_linkcfg_e)value, OCS_CMD_NOWAIT, cb, arg);
2722 ocs_log_test(hw->os, "unhandled control %#x\n", ctrl);
2732 * @brief Free port resources.
2735 * Issue the UNREG_VPI command to free the assigned VPI context.
2737 * @param hw Hardware context.
2738 * @param sport SLI port object used to connect to the domain.
2740 * @return Returns 0 on success, or a non-zero value on failure.
2743 ocs_hw_port_free(ocs_hw_t *hw, ocs_sli_port_t *sport)
2745 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
2747 if (!hw || !sport) {
2748 ocs_log_err(hw ? hw->os : NULL,
2749 "bad parameter(s) hw=%p sport=%p\n", hw,
2751 return OCS_HW_RTN_ERROR;
2755 * Check if the chip is in an error state (UE'd) before proceeding.
2757 if (sli_fw_error_status(&hw->sli) > 0) {
2758 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2759 return OCS_HW_RTN_ERROR;
2762 ocs_sm_post_event(&sport->ctx, OCS_EVT_HW_PORT_REQ_FREE, NULL);
2768 * @brief Allocate a fabric domain object.
2771 * This function starts a series of commands needed to connect to the domain, including
2776 * @b Note: Not all SLI interface types use all of the above commands.
2777 * @n @n Upon successful allocation, the HW generates a OCS_HW_DOMAIN_ALLOC_OK
2778 * event. On failure, it generates a OCS_HW_DOMAIN_ALLOC_FAIL event.
2780 * @param hw Hardware context.
2781 * @param domain Pointer to the domain object.
2782 * @param fcf FCF index.
2783 * @param vlan VLAN ID.
2785 * @return Returns 0 on success, or a non-zero value on failure.
2788 ocs_hw_domain_alloc(ocs_hw_t *hw, ocs_domain_t *domain, uint32_t fcf, uint32_t vlan)
2790 uint8_t *cmd = NULL;
2793 if (!hw || !domain || !domain->sport) {
2794 ocs_log_err(NULL, "bad parameter(s) hw=%p domain=%p sport=%p\n",
2795 hw, domain, domain ? domain->sport : NULL);
2796 return OCS_HW_RTN_ERROR;
2800 * Check if the chip is in an error state (UE'd) before proceeding.
2802 if (sli_fw_error_status(&hw->sli) > 0) {
2803 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2804 return OCS_HW_RTN_ERROR;
2807 cmd = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
2809 ocs_log_err(hw->os, "command memory allocation failed\n");
2810 return OCS_HW_RTN_NO_MEMORY;
2813 domain->dma = hw->domain_dmem;
2816 domain->sm.app = domain;
2818 domain->fcf_indicator = UINT32_MAX;
2819 domain->vlan_id = vlan;
2820 domain->indicator = UINT32_MAX;
2822 if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_VFI, &domain->indicator, &index)) {
2823 ocs_log_err(hw->os, "FCOE_VFI allocation failure\n");
2825 ocs_free(hw->os, cmd, SLI4_BMBX_SIZE);
2827 return OCS_HW_RTN_ERROR;
2830 ocs_sm_transition(&domain->sm, __ocs_hw_domain_init, cmd);
2831 return OCS_HW_RTN_SUCCESS;
2836 * @brief Attach a SLI port to a domain.
2838 * @param hw Hardware context.
2839 * @param domain Pointer to the domain object.
2840 * @param fc_id Fibre Channel ID to associate with this port.
2842 * @return Returns 0 on success, or a non-zero value on failure.
2845 ocs_hw_domain_attach(ocs_hw_t *hw, ocs_domain_t *domain, uint32_t fc_id)
2847 uint8_t *buf = NULL;
2848 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
2850 if (!hw || !domain) {
2851 ocs_log_err(hw ? hw->os : NULL,
2852 "bad parameter(s) hw=%p domain=%p\n",
2854 return OCS_HW_RTN_ERROR;
2858 * Check if the chip is in an error state (UE'd) before proceeding.
2860 if (sli_fw_error_status(&hw->sli) > 0) {
2861 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2862 return OCS_HW_RTN_ERROR;
2865 buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2867 ocs_log_err(hw->os, "no buffer for command\n");
2868 return OCS_HW_RTN_NO_MEMORY;
2871 domain->sport->fc_id = fc_id;
2872 ocs_sm_post_event(&domain->sm, OCS_EVT_HW_DOMAIN_REQ_ATTACH, buf);
2878 * @brief Free a fabric domain object.
2881 * Free both the driver and SLI port resources associated with the domain.
2883 * @param hw Hardware context.
2884 * @param domain Pointer to the domain object.
2886 * @return Returns 0 on success, or a non-zero value on failure.
2889 ocs_hw_domain_free(ocs_hw_t *hw, ocs_domain_t *domain)
2891 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
2893 if (!hw || !domain) {
2894 ocs_log_err(hw ? hw->os : NULL,
2895 "bad parameter(s) hw=%p domain=%p\n",
2897 return OCS_HW_RTN_ERROR;
2901 * Check if the chip is in an error state (UE'd) before proceeding.
2903 if (sli_fw_error_status(&hw->sli) > 0) {
2904 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2905 return OCS_HW_RTN_ERROR;
2908 ocs_sm_post_event(&domain->sm, OCS_EVT_HW_DOMAIN_REQ_FREE, NULL);
2914 * @brief Free a fabric domain object.
2917 * Free the driver resources associated with the domain. The difference between
2918 * this call and ocs_hw_domain_free() is that this call assumes resources no longer
2919 * exist on the SLI port, due to a reset or after some error conditions.
2921 * @param hw Hardware context.
2922 * @param domain Pointer to the domain object.
2924 * @return Returns 0 on success, or a non-zero value on failure.
2927 ocs_hw_domain_force_free(ocs_hw_t *hw, ocs_domain_t *domain)
2929 if (!hw || !domain) {
2930 ocs_log_err(NULL, "bad parameter(s) hw=%p domain=%p\n", hw, domain);
2931 return OCS_HW_RTN_ERROR;
2934 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI, domain->indicator);
2936 return OCS_HW_RTN_SUCCESS;
2941 * @brief Allocate a remote node object.
2943 * @param hw Hardware context.
2944 * @param rnode Allocated remote node object to initialize.
2945 * @param fc_addr FC address of the remote node.
2946 * @param sport SLI port used to connect to remote node.
2948 * @return Returns 0 on success, or a non-zero value on failure.
2951 ocs_hw_node_alloc(ocs_hw_t *hw, ocs_remote_node_t *rnode, uint32_t fc_addr,
2952 ocs_sli_port_t *sport)
2954 /* Check for invalid indicator */
2955 if (UINT32_MAX != rnode->indicator) {
2956 ocs_log_err(hw->os, "FCOE_RPI allocation failure addr=%#x rpi=%#x\n",
2957 fc_addr, rnode->indicator);
2958 return OCS_HW_RTN_ERROR;
2962 * Check if the chip is in an error state (UE'd) before proceeding.
2964 if (sli_fw_error_status(&hw->sli) > 0) {
2965 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2966 return OCS_HW_RTN_ERROR;
2969 /* NULL SLI port indicates an unallocated remote node */
2970 rnode->sport = NULL;
2972 if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_RPI, &rnode->indicator, &rnode->index)) {
2973 ocs_log_err(hw->os, "FCOE_RPI allocation failure addr=%#x\n",
2975 return OCS_HW_RTN_ERROR;
2978 rnode->fc_id = fc_addr;
2979 rnode->sport = sport;
2981 return OCS_HW_RTN_SUCCESS;
2986 * @brief Update a remote node object with the remote port's service parameters.
2988 * @param hw Hardware context.
2989 * @param rnode Allocated remote node object to initialize.
2990 * @param sparms DMA buffer containing the remote port's service parameters.
2992 * @return Returns 0 on success, or a non-zero value on failure.
2995 ocs_hw_node_attach(ocs_hw_t *hw, ocs_remote_node_t *rnode, ocs_dma_t *sparms)
2997 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
2998 uint8_t *buf = NULL;
3001 if (!hw || !rnode || !sparms) {
3002 ocs_log_err(NULL, "bad parameter(s) hw=%p rnode=%p sparms=%p\n",
3004 return OCS_HW_RTN_ERROR;
3008 * Check if the chip is in an error state (UE'd) before proceeding.
3010 if (sli_fw_error_status(&hw->sli) > 0) {
3011 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
3012 return OCS_HW_RTN_ERROR;
3015 buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
3017 ocs_log_err(hw->os, "no buffer for command\n");
3018 return OCS_HW_RTN_NO_MEMORY;
3022 * If the attach count is non-zero, this RPI has already been registered.
3023 * Otherwise, register the RPI
3025 if (rnode->index == UINT32_MAX) {
3026 ocs_log_err(NULL, "bad parameter rnode->index invalid\n");
3027 ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
3028 return OCS_HW_RTN_ERROR;
3030 count = ocs_atomic_add_return(&hw->rpi_ref[rnode->index].rpi_count, 1);
3033 * Can't attach multiple FC_ID's to a node unless High Login
3036 if (sli_get_hlm(&hw->sli) == FALSE) {
3037 ocs_log_test(hw->os, "attach to already attached node HLM=%d count=%d\n",
3038 sli_get_hlm(&hw->sli), count);
3039 rc = OCS_HW_RTN_SUCCESS;
3041 rnode->node_group = TRUE;
3042 rnode->attached = ocs_atomic_read(&hw->rpi_ref[rnode->index].rpi_attached);
3043 rc = rnode->attached ? OCS_HW_RTN_SUCCESS_SYNC : OCS_HW_RTN_SUCCESS;
3046 rnode->node_group = FALSE;
3048 ocs_display_sparams("", "reg rpi", 0, NULL, sparms->virt);
3049 if (sli_cmd_reg_rpi(&hw->sli, buf, SLI4_BMBX_SIZE, rnode->fc_id,
3050 rnode->indicator, rnode->sport->indicator,
3051 sparms, 0, (hw->auto_xfer_rdy_enabled && hw->config.auto_xfer_rdy_t10_enable))) {
3052 rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT,
3053 ocs_hw_cb_node_attach, rnode);
3058 if (rc < OCS_HW_RTN_SUCCESS) {
3059 ocs_atomic_sub_return(&hw->rpi_ref[rnode->index].rpi_count, 1);
3060 ocs_log_err(hw->os, "%s error\n", count ? "HLM" : "REG_RPI");
3062 ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
3070 * @brief Free a remote node resource.
3072 * @param hw Hardware context.
3073 * @param rnode Remote node object to free.
3075 * @return Returns 0 on success, or a non-zero value on failure.
3078 ocs_hw_node_free_resources(ocs_hw_t *hw, ocs_remote_node_t *rnode)
3080 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
3082 if (!hw || !rnode) {
3083 ocs_log_err(NULL, "bad parameter(s) hw=%p rnode=%p\n",
3085 return OCS_HW_RTN_ERROR;
3089 if (!rnode->attached) {
3090 if (rnode->indicator != UINT32_MAX) {
3091 if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, rnode->indicator)) {
3092 ocs_log_err(hw->os, "FCOE_RPI free failure RPI %d addr=%#x\n",
3093 rnode->indicator, rnode->fc_id);
3094 rc = OCS_HW_RTN_ERROR;
3096 rnode->node_group = FALSE;
3097 rnode->indicator = UINT32_MAX;
3098 rnode->index = UINT32_MAX;
3099 rnode->free_group = FALSE;
3103 ocs_log_err(hw->os, "Error: rnode is still attached\n");
3104 rc = OCS_HW_RTN_ERROR;
3114 * @brief Free a remote node object.
3116 * @param hw Hardware context.
3117 * @param rnode Remote node object to free.
3119 * @return Returns 0 on success, or a non-zero value on failure.
3122 ocs_hw_node_detach(ocs_hw_t *hw, ocs_remote_node_t *rnode)
3124 uint8_t *buf = NULL;
3125 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS_SYNC;
3126 uint32_t index = UINT32_MAX;
3128 if (!hw || !rnode) {
3129 ocs_log_err(NULL, "bad parameter(s) hw=%p rnode=%p\n",
3131 return OCS_HW_RTN_ERROR;
3135 * Check if the chip is in an error state (UE'd) before proceeding.
3137 if (sli_fw_error_status(&hw->sli) > 0) {
3138 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
3139 return OCS_HW_RTN_ERROR;
3142 index = rnode->index;
3148 if (!rnode->attached) {
3149 return OCS_HW_RTN_SUCCESS_SYNC;
3152 buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
3154 ocs_log_err(hw->os, "no buffer for command\n");
3155 return OCS_HW_RTN_NO_MEMORY;
3158 count = ocs_atomic_sub_return(&hw->rpi_ref[index].rpi_count, 1);
3161 /* There are no other references to this RPI
3162 * so unregister it and free the resource. */
3164 rnode->node_group = FALSE;
3165 rnode->free_group = TRUE;
3167 if (sli_get_hlm(&hw->sli) == FALSE) {
3168 ocs_log_test(hw->os, "Invalid count with HLM disabled, count=%d\n",
3171 fc_id = rnode->fc_id & 0x00ffffff;
3174 rc = OCS_HW_RTN_ERROR;
3176 if (sli_cmd_unreg_rpi(&hw->sli, buf, SLI4_BMBX_SIZE, rnode->indicator,
3177 SLI_RSRC_FCOE_RPI, fc_id)) {
3178 rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_node_free, rnode);
3181 if (rc != OCS_HW_RTN_SUCCESS) {
3182 ocs_log_err(hw->os, "UNREG_RPI failed\n");
3183 ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
3184 rc = OCS_HW_RTN_ERROR;
3193 * @brief Free all remote node objects.
3195 * @param hw Hardware context.
3197 * @return Returns 0 on success, or a non-zero value on failure.
3200 ocs_hw_node_free_all(ocs_hw_t *hw)
3202 uint8_t *buf = NULL;
3203 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
3206 ocs_log_err(NULL, "bad parameter hw=%p\n", hw);
3207 return OCS_HW_RTN_ERROR;
3211 * Check if the chip is in an error state (UE'd) before proceeding.
3213 if (sli_fw_error_status(&hw->sli) > 0) {
3214 ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
3215 return OCS_HW_RTN_ERROR;
3218 buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
3220 ocs_log_err(hw->os, "no buffer for command\n");
3221 return OCS_HW_RTN_NO_MEMORY;
3224 if (sli_cmd_unreg_rpi(&hw->sli, buf, SLI4_BMBX_SIZE, 0xffff,
3225 SLI_RSRC_FCOE_FCFI, UINT32_MAX)) {
3226 rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_node_free_all,
3230 if (rc != OCS_HW_RTN_SUCCESS) {
3231 ocs_log_err(hw->os, "UNREG_RPI failed\n");
3232 ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
3233 rc = OCS_HW_RTN_ERROR;
3240 ocs_hw_node_group_alloc(ocs_hw_t *hw, ocs_remote_node_group_t *ngroup)
3243 if (!hw || !ngroup) {
3244 ocs_log_err(NULL, "bad parameter hw=%p ngroup=%p\n",
3246 return OCS_HW_RTN_ERROR;
3249 if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_RPI, &ngroup->indicator,
3251 ocs_log_err(hw->os, "FCOE_RPI allocation failure addr=%#x\n",
3253 return OCS_HW_RTN_ERROR;
3256 return OCS_HW_RTN_SUCCESS;
3260 ocs_hw_node_group_attach(ocs_hw_t *hw, ocs_remote_node_group_t *ngroup, ocs_remote_node_t *rnode)
3263 if (!hw || !ngroup || !rnode) {
3264 ocs_log_err(NULL, "bad parameter hw=%p ngroup=%p rnode=%p\n",
3266 return OCS_HW_RTN_ERROR;
3269 if (rnode->attached) {
3270 ocs_log_err(hw->os, "node already attached RPI=%#x addr=%#x\n",
3271 rnode->indicator, rnode->fc_id);
3272 return OCS_HW_RTN_ERROR;
3275 if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, rnode->indicator)) {
3276 ocs_log_err(hw->os, "FCOE_RPI free failure RPI=%#x\n",
3278 return OCS_HW_RTN_ERROR;
3281 rnode->indicator = ngroup->indicator;
3282 rnode->index = ngroup->index;
3284 return OCS_HW_RTN_SUCCESS;
3288 ocs_hw_node_group_free(ocs_hw_t *hw, ocs_remote_node_group_t *ngroup)
3292 if (!hw || !ngroup) {
3293 ocs_log_err(NULL, "bad parameter hw=%p ngroup=%p\n",
3295 return OCS_HW_RTN_ERROR;
3298 ref = ocs_atomic_read(&hw->rpi_ref[ngroup->index].rpi_count);
3300 /* Hmmm, the reference count is non-zero */
3301 ocs_log_debug(hw->os, "node group reference=%d (RPI=%#x)\n",
3302 ref, ngroup->indicator);
3304 if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, ngroup->indicator)) {
3305 ocs_log_err(hw->os, "FCOE_RPI free failure RPI=%#x\n",
3307 return OCS_HW_RTN_ERROR;
3310 ocs_atomic_set(&hw->rpi_ref[ngroup->index].rpi_count, 0);
3313 ngroup->indicator = UINT32_MAX;
3314 ngroup->index = UINT32_MAX;
3316 return OCS_HW_RTN_SUCCESS;
3320 * @brief Initialize IO fields on each free call.
3322 * @n @b Note: This is done on each free call (as opposed to each
3323 * alloc call) because port-owned XRIs are not
3324 * allocated with ocs_hw_io_alloc() but are freed with this
3327 * @param io Pointer to HW IO.
3330 ocs_hw_init_free_io(ocs_hw_io_t *io)
3333 * Set io->done to NULL, to avoid any callbacks, should
3334 * a completion be received for one of these IOs
3337 io->abort_done = NULL;
3338 io->status_saved = 0;
3339 io->abort_in_progress = FALSE;
3340 io->port_owned_abort_count = 0;
3345 io->tgt_wqe_timeout = 0;
3350 * @brief Lockless allocate a HW IO object.
3353 * Assume that hw->ocs_lock is held. This function is only used if
3354 * use_dif_sec_xri workaround is being used.
3356 * @param hw Hardware context.
3358 * @return Returns a pointer to an object on success, or NULL on failure.
3360 static inline ocs_hw_io_t *
3361 _ocs_hw_io_alloc(ocs_hw_t *hw)
3363 ocs_hw_io_t *io = NULL;
3365 if (NULL != (io = ocs_list_remove_head(&hw->io_free))) {
3366 ocs_list_add_tail(&hw->io_inuse, io);
3367 io->state = OCS_HW_IO_STATE_INUSE;
3368 io->quarantine = FALSE;
3369 io->quarantine_first_phase = TRUE;
3370 io->abort_reqtag = UINT32_MAX;
3371 ocs_ref_init(&io->ref, ocs_hw_io_free_internal, io);
3373 ocs_atomic_add_return(&hw->io_alloc_failed_count, 1);
3380 * @brief Allocate a HW IO object.
3383 * @n @b Note: This function applies to non-port owned XRIs
3386 * @param hw Hardware context.
3388 * @return Returns a pointer to an object on success, or NULL on failure.
3391 ocs_hw_io_alloc(ocs_hw_t *hw)
3393 ocs_hw_io_t *io = NULL;
3395 ocs_lock(&hw->io_lock);
3396 io = _ocs_hw_io_alloc(hw);
3397 ocs_unlock(&hw->io_lock);
3404 * @brief Allocate/Activate a port owned HW IO object.
3407 * This function is called by the transport layer when an XRI is
3408 * allocated by the SLI-Port. This will "activate" the HW IO
3409 * associated with the XRI received from the SLI-Port to mirror
3410 * the state of the XRI.
3411 * @n @n @b Note: This function applies to port owned XRIs only.
3413 * @param hw Hardware context.
3414 * @param io Pointer HW IO to activate/allocate.
3416 * @return Returns a pointer to an object on success, or NULL on failure.
3419 ocs_hw_io_activate_port_owned(ocs_hw_t *hw, ocs_hw_io_t *io)
3421 if (ocs_ref_read_count(&io->ref) > 0) {
3422 ocs_log_err(hw->os, "Bad parameter: refcount > 0\n");
3426 if (io->wq != NULL) {
3427 ocs_log_err(hw->os, "XRI %x already in use\n", io->indicator);
3431 ocs_ref_init(&io->ref, ocs_hw_io_free_port_owned, io);
3439 * @brief When an IO is freed, depending on the exchange busy flag, and other
3440 * workarounds, move it to the correct list.
3443 * @n @b Note: Assumes that the hw->io_lock is held and the item has been removed
3444 * from the busy or wait_free list.
3446 * @param hw Hardware context.
3447 * @param io Pointer to the IO object to move.
3450 ocs_hw_io_free_move_correct_list(ocs_hw_t *hw, ocs_hw_io_t *io)
3453 /* add to wait_free list and wait for XRI_ABORTED CQEs to clean up */
3454 ocs_list_add_tail(&hw->io_wait_free, io);
3455 io->state = OCS_HW_IO_STATE_WAIT_FREE;
3457 /* IO not busy, add to free list */
3458 ocs_list_add_tail(&hw->io_free, io);
3459 io->state = OCS_HW_IO_STATE_FREE;
3462 /* BZ 161832 workaround */
3463 if (hw->workaround.use_dif_sec_xri) {
3464 ocs_hw_check_sec_hio_list(hw);
3470 * @brief Free a HW IO object. Perform cleanup common to
3471 * port and host-owned IOs.
3473 * @param hw Hardware context.
3474 * @param io Pointer to the HW IO object.
3477 ocs_hw_io_free_common(ocs_hw_t *hw, ocs_hw_io_t *io)
3479 /* initialize IO fields */
3480 ocs_hw_init_free_io(io);
3482 /* Restore default SGL */
3483 ocs_hw_io_restore_sgl(hw, io);
3488 * @brief Free a HW IO object associated with a port-owned XRI.
3490 * @param arg Pointer to the HW IO object.
3493 ocs_hw_io_free_port_owned(void *arg)
3495 ocs_hw_io_t *io = (ocs_hw_io_t *)arg;
3496 ocs_hw_t *hw = io->hw;
3499 * For auto xfer rdy, if the dnrx bit is set, then add it to the list of XRIs
3500 * waiting for buffers.
3502 if (io->auto_xfer_rdy_dnrx) {
3503 ocs_lock(&hw->io_lock);
3504 /* take a reference count because we still own the IO until the buffer is posted */
3505 ocs_ref_init(&io->ref, ocs_hw_io_free_port_owned, io);
3506 ocs_list_add_tail(&hw->io_port_dnrx, io);
3507 ocs_unlock(&hw->io_lock);
3510 /* perform common cleanup */
3511 ocs_hw_io_free_common(hw, io);
3516 * @brief Free a previously-allocated HW IO object. Called when
3517 * IO refcount goes to zero (host-owned IOs only).
3519 * @param arg Pointer to the HW IO object.
3522 ocs_hw_io_free_internal(void *arg)
3524 ocs_hw_io_t *io = (ocs_hw_io_t *)arg;
3525 ocs_hw_t *hw = io->hw;
3527 /* perform common cleanup */
3528 ocs_hw_io_free_common(hw, io);
3530 ocs_lock(&hw->io_lock);
3531 /* remove from in-use list */
3532 ocs_list_remove(&hw->io_inuse, io);
3533 ocs_hw_io_free_move_correct_list(hw, io);
3534 ocs_unlock(&hw->io_lock);
3539 * @brief Free a previously-allocated HW IO object.
3542 * @n @b Note: This function applies to port and host owned XRIs.
3544 * @param hw Hardware context.
3545 * @param io Pointer to the HW IO object.
3547 * @return Returns a non-zero value if HW IO was freed, 0 if references
3548 * on the IO still exist, or a negative value if an error occurred.
3551 ocs_hw_io_free(ocs_hw_t *hw, ocs_hw_io_t *io)
3553 /* just put refcount */
3554 if (ocs_ref_read_count(&io->ref) <= 0) {
3555 ocs_log_err(hw->os, "Bad parameter: refcount <= 0 xri=%x tag=%x\n",
3556 io->indicator, io->reqtag);
3560 return ocs_ref_put(&io->ref); /* ocs_ref_get(): ocs_hw_io_alloc() */
3565 * @brief Check if given HW IO is in-use
3568 * This function returns TRUE if the given HW IO has been
3569 * allocated and is in-use, and FALSE otherwise. It applies to
3570 * port and host owned XRIs.
3572 * @param hw Hardware context.
3573 * @param io Pointer to the HW IO object.
3575 * @return TRUE if an IO is in use, or FALSE otherwise.
3578 ocs_hw_io_inuse(ocs_hw_t *hw, ocs_hw_io_t *io)
3580 return (ocs_ref_read_count(&io->ref) > 0);
3584 * @brief Write a HW IO to a work queue.
3587 * A HW IO is written to a work queue.
3589 * @param wq Pointer to work queue.
3590 * @param wqe Pointer to WQ entry.
3592 * @n @b Note: Assumes the SLI-4 queue lock is held.
3594 * @return Returns 0 on success, or a negative error code value on failure.
3597 _hw_wq_write(hw_wq_t *wq, ocs_hw_wqe_t *wqe)
3602 /* Every so often, set the wqec bit to generate comsummed completions */
3603 if (wq->wqec_count) {
3606 if (wq->wqec_count == 0) {
3607 sli4_generic_wqe_t *genwqe = (void*)wqe->wqebuf;
3609 wq->wqec_count = wq->wqec_set_count;
3612 /* Decrement WQ free count */
3615 queue_rc = _sli_queue_write(&wq->hw->sli, wq->queue, wqe->wqebuf);
3621 ocs_queue_history_wq(&wq->hw->q_hist, (void *) wqe->wqebuf, wq->queue->id, queue_rc);
3628 * @brief Write a HW IO to a work queue.
3631 * A HW IO is written to a work queue.
3633 * @param wq Pointer to work queue.
3634 * @param wqe Pointer to WQE entry.
3636 * @n @b Note: Takes the SLI-4 queue lock.
3638 * @return Returns 0 on success, or a negative error code value on failure.
3641 hw_wq_write(hw_wq_t *wq, ocs_hw_wqe_t *wqe)
3645 sli_queue_lock(wq->queue);
3646 if ( ! ocs_list_empty(&wq->pending_list)) {
3647 ocs_list_add_tail(&wq->pending_list, wqe);
3648 OCS_STAT(wq->wq_pending_count++;)
3649 while ((wq->free_count > 0) && ((wqe = ocs_list_remove_head(&wq->pending_list)) != NULL)) {
3650 rc = _hw_wq_write(wq, wqe);
3654 if (wqe->abort_wqe_submit_needed) {
3655 wqe->abort_wqe_submit_needed = 0;
3656 sli_abort_wqe(&wq->hw->sli, wqe->wqebuf, wq->hw->sli.config.wqe_size, SLI_ABORT_XRI,
3657 wqe->send_abts, wqe->id, 0, wqe->abort_reqtag, SLI4_CQ_DEFAULT );
3658 ocs_list_add_tail(&wq->pending_list, wqe);
3659 OCS_STAT(wq->wq_pending_count++;)
3663 if (wq->free_count > 0) {
3664 rc = _hw_wq_write(wq, wqe);
3666 ocs_list_add_tail(&wq->pending_list, wqe);
3667 OCS_STAT(wq->wq_pending_count++;)
3671 sli_queue_unlock(wq->queue);
3678 * @brief Update free count and submit any pending HW IOs
3681 * The WQ free count is updated, and any pending HW IOs are submitted that
3682 * will fit in the queue.
3684 * @param wq Pointer to work queue.
3685 * @param update_free_count Value added to WQs free count.
3690 hw_wq_submit_pending(hw_wq_t *wq, uint32_t update_free_count)
3694 sli_queue_lock(wq->queue);
3696 /* Update free count with value passed in */
3697 wq->free_count += update_free_count;
3699 while ((wq->free_count > 0) && ((wqe = ocs_list_remove_head(&wq->pending_list)) != NULL)) {
3700 _hw_wq_write(wq, wqe);
3702 if (wqe->abort_wqe_submit_needed) {
3703 wqe->abort_wqe_submit_needed = 0;
3704 sli_abort_wqe(&wq->hw->sli, wqe->wqebuf, wq->hw->sli.config.wqe_size, SLI_ABORT_XRI,
3705 wqe->send_abts, wqe->id, 0, wqe->abort_reqtag, SLI4_CQ_DEFAULT);
3706 ocs_list_add_tail(&wq->pending_list, wqe);
3707 OCS_STAT(wq->wq_pending_count++;)
3711 sli_queue_unlock(wq->queue);
3715 * @brief Check to see if there are any BZ 161832 workaround waiting IOs
3718 * Checks hw->sec_hio_wait_list, if an IO is waiting for a HW IO, then try
3719 * to allocate a secondary HW io, and dispatch it.
3721 * @n @b Note: hw->io_lock MUST be taken when called.
3723 * @param hw pointer to HW object
3728 ocs_hw_check_sec_hio_list(ocs_hw_t *hw)
3731 ocs_hw_io_t *sec_io;
3734 while (!ocs_list_empty(&hw->sec_hio_wait_list)) {
3737 sec_io = _ocs_hw_io_alloc(hw);
3738 if (sec_io == NULL) {
3742 io = ocs_list_remove_head(&hw->sec_hio_wait_list);
3743 ocs_list_add_tail(&hw->io_inuse, io);
3744 io->state = OCS_HW_IO_STATE_INUSE;
3745 io->sec_hio = sec_io;
3747 /* mark secondary XRI for second and subsequent data phase as quarantine */
3749 sec_io->quarantine = TRUE;
3752 flags = io->sec_iparam.fcp_tgt.flags;
3754 flags |= SLI4_IO_CONTINUATION;
3756 flags &= ~SLI4_IO_CONTINUATION;
3759 io->tgt_wqe_timeout = io->sec_iparam.fcp_tgt.timeout;
3761 /* Complete (continue) TRECV IO */
3763 if (sli_fcp_cont_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl,
3765 io->sec_iparam.fcp_tgt.offset, io->sec_len, io->indicator, io->sec_hio->indicator,
3766 io->reqtag, SLI4_CQ_DEFAULT,
3767 io->sec_iparam.fcp_tgt.ox_id, io->rnode->indicator, io->rnode,
3769 io->sec_iparam.fcp_tgt.dif_oper, io->sec_iparam.fcp_tgt.blk_size, io->sec_iparam.fcp_tgt.cs_ctl, io->sec_iparam.fcp_tgt.app_id)) {
3770 ocs_log_test(hw->os, "TRECEIVE WQE error\n");
3774 if (sli_fcp_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl,
3776 io->sec_iparam.fcp_tgt.offset, io->sec_len, io->indicator,
3777 io->reqtag, SLI4_CQ_DEFAULT,
3778 io->sec_iparam.fcp_tgt.ox_id, io->rnode->indicator, io->rnode,
3780 io->sec_iparam.fcp_tgt.dif_oper, io->sec_iparam.fcp_tgt.blk_size,
3781 io->sec_iparam.fcp_tgt.cs_ctl, io->sec_iparam.fcp_tgt.app_id)) {
3782 ocs_log_test(hw->os, "TRECEIVE WQE error\n");
3787 if (io->wq == NULL) {
3788 io->wq = ocs_hw_queue_next_wq(hw, io);
3789 ocs_hw_assert(io->wq != NULL);
3794 * Add IO to active io wqe list before submitting, in case the
3795 * wcqe processing preempts this thread.
3797 ocs_hw_add_io_timed_wqe(hw, io);
3798 rc = hw_wq_write(io->wq, &io->wqe);
3800 /* non-negative return is success */
3803 /* failed to write wqe, remove from active wqe list */
3804 ocs_log_err(hw->os, "sli_queue_write failed: %d\n", rc);
3806 ocs_hw_remove_io_timed_wqe(hw, io);
3813 * @brief Send a Single Request/Response Sequence (SRRS).
3816 * This routine supports communication sequences consisting of a single
3817 * request and single response between two endpoints. Examples include:
3818 * - Sending an ELS request.
3819 * - Sending an ELS response - To send an ELS reponse, the caller must provide
3820 * the OX_ID from the received request.
3821 * - Sending a FC Common Transport (FC-CT) request - To send a FC-CT request,
3822 * the caller must provide the R_CTL, TYPE, and DF_CTL
3823 * values to place in the FC frame header.
3825 * @n @b Note: The caller is expected to provide both send and receive
3826 * buffers for requests. In the case of sending a response, no receive buffer
3827 * is necessary and the caller may pass in a NULL pointer.
3829 * @param hw Hardware context.
3830 * @param type Type of sequence (ELS request/response, FC-CT).
3831 * @param io Previously-allocated HW IO object.
3832 * @param send DMA memory holding data to send (for example, ELS request, BLS response).
3833 * @param len Length, in bytes, of data to send.
3834 * @param receive Optional DMA memory to hold a response.
3835 * @param rnode Destination of data (that is, a remote node).
3836 * @param iparam IO parameters (ELS response and FC-CT).
3837 * @param cb Function call upon completion of sending the data (may be NULL).
3838 * @param arg Argument to pass to IO completion function.
3840 * @return Returns 0 on success, or a non-zero on failure.
3843 ocs_hw_srrs_send(ocs_hw_t *hw, ocs_hw_io_type_e type, ocs_hw_io_t *io,
3844 ocs_dma_t *send, uint32_t len, ocs_dma_t *receive,
3845 ocs_remote_node_t *rnode, ocs_hw_io_param_t *iparam,
3846 ocs_hw_srrs_cb_t cb, void *arg)
3848 sli4_sge_t *sge = NULL;
3849 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
3850 uint16_t local_flags = 0;
3852 if (!hw || !io || !rnode || !iparam) {
3853 ocs_log_err(NULL, "bad parm hw=%p io=%p send=%p receive=%p rnode=%p iparam=%p\n",
3854 hw, io, send, receive, rnode, iparam);
3855 return OCS_HW_RTN_ERROR;
3858 if (hw->state != OCS_HW_STATE_ACTIVE) {
3859 ocs_log_test(hw->os, "cannot send SRRS, HW state=%d\n", hw->state);
3860 return OCS_HW_RTN_ERROR;
3863 if (ocs_hw_is_xri_port_owned(hw, io->indicator)) {
3864 /* We must set the XC bit for port owned XRIs */
3865 local_flags |= SLI4_IO_CONTINUATION;
3872 sge = io->sgl->virt;
3874 /* clear both SGE */
3875 ocs_memset(io->sgl->virt, 0, 2 * sizeof(sli4_sge_t));
3878 sge[0].buffer_address_high = ocs_addr32_hi(send->phys);
3879 sge[0].buffer_address_low = ocs_addr32_lo(send->phys);
3880 sge[0].sge_type = SLI4_SGE_TYPE_DATA;
3881 sge[0].buffer_length = len;
3884 if ((OCS_HW_ELS_REQ == type) || (OCS_HW_FC_CT == type)) {
3885 sge[1].buffer_address_high = ocs_addr32_hi(receive->phys);
3886 sge[1].buffer_address_low = ocs_addr32_lo(receive->phys);
3887 sge[1].sge_type = SLI4_SGE_TYPE_DATA;
3888 sge[1].buffer_length = receive->size;
3895 case OCS_HW_ELS_REQ:
3896 if ( (!send) || sli_els_request64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->sgl,
3897 *((uint8_t *)(send->virt)), /* req_type */
3899 iparam->els.timeout, io->indicator, io->reqtag, SLI4_CQ_DEFAULT, rnode)) {
3900 ocs_log_err(hw->os, "REQ WQE error\n");
3901 rc = OCS_HW_RTN_ERROR;
3904 case OCS_HW_ELS_RSP:
3905 if ( (!send) || sli_xmit_els_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, send, len,
3906 io->indicator, io->reqtag, SLI4_CQ_DEFAULT,
3908 rnode, local_flags, UINT32_MAX)) {
3909 ocs_log_err(hw->os, "RSP WQE error\n");
3910 rc = OCS_HW_RTN_ERROR;
3913 case OCS_HW_ELS_RSP_SID:
3914 if ( (!send) || sli_xmit_els_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, send, len,
3915 io->indicator, io->reqtag, SLI4_CQ_DEFAULT,
3916 iparam->els_sid.ox_id,
3917 rnode, local_flags, iparam->els_sid.s_id)) {
3918 ocs_log_err(hw->os, "RSP (SID) WQE error\n");
3919 rc = OCS_HW_RTN_ERROR;
3923 if ( (!send) || sli_gen_request64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->sgl, len,
3924 receive->size, iparam->fc_ct.timeout, io->indicator,
3925 io->reqtag, SLI4_CQ_DEFAULT, rnode, iparam->fc_ct.r_ctl,
3926 iparam->fc_ct.type, iparam->fc_ct.df_ctl)) {
3927 ocs_log_err(hw->os, "GEN WQE error\n");
3928 rc = OCS_HW_RTN_ERROR;
3931 case OCS_HW_FC_CT_RSP:
3932 if ( (!send) || sli_xmit_sequence64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->sgl, len,
3933 iparam->fc_ct_rsp.timeout, iparam->fc_ct_rsp.ox_id, io->indicator,
3934 io->reqtag, rnode, iparam->fc_ct_rsp.r_ctl,
3935 iparam->fc_ct_rsp.type, iparam->fc_ct_rsp.df_ctl)) {
3936 ocs_log_err(hw->os, "XMIT SEQ WQE error\n");
3937 rc = OCS_HW_RTN_ERROR;
3940 case OCS_HW_BLS_ACC:
3941 case OCS_HW_BLS_RJT:
3943 sli_bls_payload_t bls;
3945 if (OCS_HW_BLS_ACC == type) {
3946 bls.type = SLI_BLS_ACC;
3947 ocs_memcpy(&bls.u.acc, iparam->bls.payload, sizeof(bls.u.acc));
3949 bls.type = SLI_BLS_RJT;
3950 ocs_memcpy(&bls.u.rjt, iparam->bls.payload, sizeof(bls.u.rjt));
3953 bls.ox_id = iparam->bls.ox_id;
3954 bls.rx_id = iparam->bls.rx_id;
3956 if (sli_xmit_bls_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &bls,
3957 io->indicator, io->reqtag,
3959 rnode, UINT32_MAX)) {
3960 ocs_log_err(hw->os, "XMIT_BLS_RSP64 WQE error\n");
3961 rc = OCS_HW_RTN_ERROR;
3965 case OCS_HW_BLS_ACC_SID:
3967 sli_bls_payload_t bls;
3969 bls.type = SLI_BLS_ACC;
3970 ocs_memcpy(&bls.u.acc, iparam->bls_sid.payload, sizeof(bls.u.acc));
3972 bls.ox_id = iparam->bls_sid.ox_id;
3973 bls.rx_id = iparam->bls_sid.rx_id;
3975 if (sli_xmit_bls_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &bls,
3976 io->indicator, io->reqtag,
3978 rnode, iparam->bls_sid.s_id)) {
3979 ocs_log_err(hw->os, "XMIT_BLS_RSP64 WQE SID error\n");
3980 rc = OCS_HW_RTN_ERROR;
3985 if ( (!send) || sli_xmit_bcast64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, send, len,
3986 iparam->bcast.timeout, io->indicator, io->reqtag,
3987 SLI4_CQ_DEFAULT, rnode,
3988 iparam->bcast.r_ctl, iparam->bcast.type, iparam->bcast.df_ctl)) {
3989 ocs_log_err(hw->os, "XMIT_BCAST64 WQE error\n");
3990 rc = OCS_HW_RTN_ERROR;
3994 ocs_log_err(hw->os, "bad SRRS type %#x\n", type);
3995 rc = OCS_HW_RTN_ERROR;
3998 if (OCS_HW_RTN_SUCCESS == rc) {
3999 if (io->wq == NULL) {
4000 io->wq = ocs_hw_queue_next_wq(hw, io);
4001 ocs_hw_assert(io->wq != NULL);
4006 * Add IO to active io wqe list before submitting, in case the
4007 * wcqe processing preempts this thread.
4009 OCS_STAT(io->wq->use_count++);
4010 ocs_hw_add_io_timed_wqe(hw, io);
4011 rc = hw_wq_write(io->wq, &io->wqe);
4013 /* non-negative return is success */
4016 /* failed to write wqe, remove from active wqe list */
4017 ocs_log_err(hw->os, "sli_queue_write failed: %d\n", rc);
4019 ocs_hw_remove_io_timed_wqe(hw, io);
4028 * @brief Send a read, write, or response IO.
4031 * This routine supports sending a higher-level IO (for example, FCP) between two endpoints
4032 * as a target or initiator. Examples include:
4033 * - Sending read data and good response (target).
4034 * - Sending a response (target with no data or after receiving write data).
4036 * This routine assumes all IOs use the SGL associated with the HW IO. Prior to
4037 * calling this routine, the data should be loaded using ocs_hw_io_add_sge().
4039 * @param hw Hardware context.
4040 * @param type Type of IO (target read, target response, and so on).
4041 * @param io Previously-allocated HW IO object.
4042 * @param len Length, in bytes, of data to send.
4043 * @param iparam IO parameters.
4044 * @param rnode Destination of data (that is, a remote node).
4045 * @param cb Function call upon completion of sending data (may be NULL).
4046 * @param arg Argument to pass to IO completion function.
4048 * @return Returns 0 on success, or a non-zero value on failure.
4051 * - Support specifiying relative offset.
4052 * - Use a WQ other than 0.
4055 ocs_hw_io_send(ocs_hw_t *hw, ocs_hw_io_type_e type, ocs_hw_io_t *io,
4056 uint32_t len, ocs_hw_io_param_t *iparam, ocs_remote_node_t *rnode,
4057 void *cb, void *arg)
4059 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
4061 uint8_t send_wqe = TRUE;
4065 if (!hw || !io || !rnode || !iparam) {
4066 ocs_log_err(NULL, "bad parm hw=%p io=%p iparam=%p rnode=%p\n",
4067 hw, io, iparam, rnode);
4068 return OCS_HW_RTN_ERROR;
4071 if (hw->state != OCS_HW_STATE_ACTIVE) {
4072 ocs_log_err(hw->os, "cannot send IO, HW state=%d\n", hw->state);
4073 return OCS_HW_RTN_ERROR;
4076 rpi = rnode->indicator;
4078 if (hw->workaround.use_unregistered_rpi && (rpi == UINT32_MAX)) {
4079 rpi = hw->workaround.unregistered_rid;
4080 ocs_log_test(hw->os, "using unregistered RPI: %d\n", rpi);
4084 * Save state needed during later stages
4092 * Format the work queue entry used to send the IO
4095 case OCS_HW_IO_INITIATOR_READ:
4097 * If use_dif_quarantine workaround is in effect, and dif_separates then mark the
4098 * initiator read IO for quarantine
4100 if (hw->workaround.use_dif_quarantine && (hw->config.dif_mode == OCS_HW_DIF_MODE_SEPARATE) &&
4101 (iparam->fcp_tgt.dif_oper != OCS_HW_DIF_OPER_DISABLED)) {
4102 io->quarantine = TRUE;
4105 ocs_hw_io_ini_sge(hw, io, iparam->fcp_ini.cmnd, iparam->fcp_ini.cmnd_size,
4106 iparam->fcp_ini.rsp);
4108 if (sli_fcp_iread64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge, len,
4109 io->indicator, io->reqtag, SLI4_CQ_DEFAULT, rpi, rnode,
4110 iparam->fcp_ini.dif_oper, iparam->fcp_ini.blk_size,
4111 iparam->fcp_ini.timeout)) {
4112 ocs_log_err(hw->os, "IREAD WQE error\n");
4113 rc = OCS_HW_RTN_ERROR;
4116 case OCS_HW_IO_INITIATOR_WRITE:
4117 ocs_hw_io_ini_sge(hw, io, iparam->fcp_ini.cmnd, iparam->fcp_ini.cmnd_size,
4118 iparam->fcp_ini.rsp);
4120 if (sli_fcp_iwrite64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge,
4121 len, iparam->fcp_ini.first_burst,
4122 io->indicator, io->reqtag,
4123 SLI4_CQ_DEFAULT, rpi, rnode,
4124 iparam->fcp_ini.dif_oper, iparam->fcp_ini.blk_size,
4125 iparam->fcp_ini.timeout)) {
4126 ocs_log_err(hw->os, "IWRITE WQE error\n");
4127 rc = OCS_HW_RTN_ERROR;
4130 case OCS_HW_IO_INITIATOR_NODATA:
4131 ocs_hw_io_ini_sge(hw, io, iparam->fcp_ini.cmnd, iparam->fcp_ini.cmnd_size,
4132 iparam->fcp_ini.rsp);
4134 if (sli_fcp_icmnd64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl,
4135 io->indicator, io->reqtag, SLI4_CQ_DEFAULT,
4136 rpi, rnode, iparam->fcp_ini.timeout)) {
4137 ocs_log_err(hw->os, "ICMND WQE error\n");
4138 rc = OCS_HW_RTN_ERROR;
4141 case OCS_HW_IO_TARGET_WRITE: {
4142 uint16_t flags = iparam->fcp_tgt.flags;
4143 fcp_xfer_rdy_iu_t *xfer = io->xfer_rdy.virt;
4146 * Fill in the XFER_RDY for IF_TYPE 0 devices
4148 *((uint32_t *)xfer->fcp_data_ro) = ocs_htobe32(iparam->fcp_tgt.offset);
4149 *((uint32_t *)xfer->fcp_burst_len) = ocs_htobe32(len);
4150 *((uint32_t *)xfer->rsvd) = 0;
4153 flags |= SLI4_IO_CONTINUATION;
4155 flags &= ~SLI4_IO_CONTINUATION;
4158 io->tgt_wqe_timeout = iparam->fcp_tgt.timeout;
4161 * If use_dif_quarantine workaround is in effect, and this is a DIF enabled IO
4162 * then mark the target write IO for quarantine
4164 if (hw->workaround.use_dif_quarantine && (hw->config.dif_mode == OCS_HW_DIF_MODE_SEPARATE) &&
4165 (iparam->fcp_tgt.dif_oper != OCS_HW_DIF_OPER_DISABLED)) {
4166 io->quarantine = TRUE;
4170 * BZ 161832 Workaround:
4171 * Check for use_dif_sec_xri workaround. Note, even though the first dataphase
4172 * doesn't really need a secondary XRI, we allocate one anyway, as this avoids the
4173 * potential for deadlock where all XRI's are allocated as primaries to IOs that
4174 * are on hw->sec_hio_wait_list. If this secondary XRI is not for the first
4175 * data phase, it is marked for quarantine.
4177 if (hw->workaround.use_dif_sec_xri && (iparam->fcp_tgt.dif_oper != OCS_HW_DIF_OPER_DISABLED)) {
4180 * If we have allocated a chained SGL for skyhawk, then
4181 * we can re-use this for the sec_hio.
4183 if (io->ovfl_io != NULL) {
4184 io->sec_hio = io->ovfl_io;
4185 io->sec_hio->quarantine = TRUE;
4187 io->sec_hio = ocs_hw_io_alloc(hw);
4189 if (io->sec_hio == NULL) {
4190 /* Failed to allocate, so save full request context and put
4191 * this IO on the wait list
4193 io->sec_iparam = *iparam;
4195 ocs_lock(&hw->io_lock);
4196 ocs_list_remove(&hw->io_inuse, io);
4197 ocs_list_add_tail(&hw->sec_hio_wait_list, io);
4198 io->state = OCS_HW_IO_STATE_WAIT_SEC_HIO;
4199 hw->sec_hio_wait_count++;
4200 ocs_unlock(&hw->io_lock);
4205 /* We quarantine the secondary IO if this is the second or subsequent data phase */
4207 io->sec_hio->quarantine = TRUE;
4212 * If not the first data phase, and io->sec_hio has been allocated, then issue
4213 * FCP_CONT_TRECEIVE64 WQE, otherwise use the usual FCP_TRECEIVE64 WQE
4215 if (io->xbusy && (io->sec_hio != NULL)) {
4216 if (sli_fcp_cont_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge,
4217 iparam->fcp_tgt.offset, len, io->indicator, io->sec_hio->indicator,
4218 io->reqtag, SLI4_CQ_DEFAULT,
4219 iparam->fcp_tgt.ox_id, rpi, rnode,
4221 iparam->fcp_tgt.dif_oper, iparam->fcp_tgt.blk_size,
4222 iparam->fcp_tgt.cs_ctl, iparam->fcp_tgt.app_id)) {
4223 ocs_log_err(hw->os, "TRECEIVE WQE error\n");
4224 rc = OCS_HW_RTN_ERROR;
4227 if (sli_fcp_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge,
4228 iparam->fcp_tgt.offset, len, io->indicator, io->reqtag,
4230 iparam->fcp_tgt.ox_id, rpi, rnode,
4232 iparam->fcp_tgt.dif_oper, iparam->fcp_tgt.blk_size,
4233 iparam->fcp_tgt.cs_ctl, iparam->fcp_tgt.app_id)) {
4234 ocs_log_err(hw->os, "TRECEIVE WQE error\n");
4235 rc = OCS_HW_RTN_ERROR;
4240 case OCS_HW_IO_TARGET_READ: {
4241 uint16_t flags = iparam->fcp_tgt.flags;
4244 flags |= SLI4_IO_CONTINUATION;
4246 flags &= ~SLI4_IO_CONTINUATION;
4249 io->tgt_wqe_timeout = iparam->fcp_tgt.timeout;
4250 if (sli_fcp_tsend64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge,
4251 iparam->fcp_tgt.offset, len, io->indicator, io->reqtag,
4253 iparam->fcp_tgt.ox_id, rpi, rnode,
4255 iparam->fcp_tgt.dif_oper,
4256 iparam->fcp_tgt.blk_size,
4257 iparam->fcp_tgt.cs_ctl,
4258 iparam->fcp_tgt.app_id)) {
4259 ocs_log_err(hw->os, "TSEND WQE error\n");
4260 rc = OCS_HW_RTN_ERROR;
4261 } else if (hw->workaround.retain_tsend_io_length) {
4266 case OCS_HW_IO_TARGET_RSP: {
4267 uint16_t flags = iparam->fcp_tgt.flags;
4270 flags |= SLI4_IO_CONTINUATION;
4272 flags &= ~SLI4_IO_CONTINUATION;
4275 /* post a new auto xfer ready buffer */
4276 if (hw->auto_xfer_rdy_enabled && io->is_port_owned) {
4277 if ((io->auto_xfer_rdy_dnrx = ocs_hw_rqpair_auto_xfer_rdy_buffer_post(hw, io, 1))) {
4278 flags |= SLI4_IO_DNRX;
4282 io->tgt_wqe_timeout = iparam->fcp_tgt.timeout;
4283 if (sli_fcp_trsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size,
4286 io->indicator, io->reqtag,
4288 iparam->fcp_tgt.ox_id,
4290 flags, iparam->fcp_tgt.cs_ctl,
4292 iparam->fcp_tgt.app_id)) {
4293 ocs_log_err(hw->os, "TRSP WQE error\n");
4294 rc = OCS_HW_RTN_ERROR;
4300 ocs_log_err(hw->os, "unsupported IO type %#x\n", type);
4301 rc = OCS_HW_RTN_ERROR;
4304 if (send_wqe && (OCS_HW_RTN_SUCCESS == rc)) {
4305 if (io->wq == NULL) {
4306 io->wq = ocs_hw_queue_next_wq(hw, io);
4307 ocs_hw_assert(io->wq != NULL);
4313 * Add IO to active io wqe list before submitting, in case the
4314 * wcqe processing preempts this thread.
4316 OCS_STAT(hw->tcmd_wq_submit[io->wq->instance]++);
4317 OCS_STAT(io->wq->use_count++);
4318 ocs_hw_add_io_timed_wqe(hw, io);
4319 rc = hw_wq_write(io->wq, &io->wqe);
4321 /* non-negative return is success */
4324 /* failed to write wqe, remove from active wqe list */
4325 ocs_log_err(hw->os, "sli_queue_write failed: %d\n", rc);
4327 ocs_hw_remove_io_timed_wqe(hw, io);
4335 * @brief Send a raw frame
4338 * Using the SEND_FRAME_WQE, a frame consisting of header and payload is sent.
4340 * @param hw Pointer to HW object.
4341 * @param hdr Pointer to a little endian formatted FC header.
4342 * @param sof Value to use as the frame SOF.
4343 * @param eof Value to use as the frame EOF.
4344 * @param payload Pointer to payload DMA buffer.
4345 * @param ctx Pointer to caller provided send frame context.
4346 * @param callback Callback function.
4347 * @param arg Callback function argument.
4349 * @return Returns 0 on success, or a negative error code value on failure.
4352 ocs_hw_send_frame(ocs_hw_t *hw, fc_header_le_t *hdr, uint8_t sof, uint8_t eof, ocs_dma_t *payload,
4353 ocs_hw_send_frame_context_t *ctx, void (*callback)(void *arg, uint8_t *cqe, int32_t status), void *arg)
4362 /* populate the callback object */
4365 /* Fetch and populate request tag */
4366 ctx->wqcb = ocs_hw_reqtag_alloc(hw, callback, arg);
4367 if (ctx->wqcb == NULL) {
4368 ocs_log_err(hw->os, "can't allocate request tag\n");
4369 return OCS_HW_RTN_NO_RESOURCES;
4372 /* Choose a work queue, first look for a class[1] wq, otherwise just use wq[0] */
4373 wq = ocs_varray_iter_next(hw->wq_class_array[1]);
4378 /* Set XRI and RX_ID in the header based on which WQ, and which send_frame_io we are using */
4379 xri = wq->send_frame_io->indicator;
4381 /* Build the send frame WQE */
4382 rc = sli_send_frame_wqe(&hw->sli, wqe->wqebuf, hw->sli.config.wqe_size, sof, eof, (uint32_t*) hdr, payload,
4383 payload->len, OCS_HW_SEND_FRAME_TIMEOUT, xri, ctx->wqcb->instance_index);
4385 ocs_log_err(hw->os, "sli_send_frame_wqe failed: %d\n", rc);
4386 return OCS_HW_RTN_ERROR;
4390 rc = hw_wq_write(wq, wqe);
4392 ocs_log_err(hw->os, "hw_wq_write failed: %d\n", rc);
4393 return OCS_HW_RTN_ERROR;
4396 OCS_STAT(wq->use_count++);
4398 return rc ? OCS_HW_RTN_ERROR : OCS_HW_RTN_SUCCESS;
4402 ocs_hw_io_register_sgl(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_dma_t *sgl, uint32_t sgl_count)
4404 if (sli_get_sgl_preregister(&hw->sli)) {
4405 ocs_log_err(hw->os, "can't use temporary SGL with pre-registered SGLs\n");
4406 return OCS_HW_RTN_ERROR;
4409 io->ovfl_sgl_count = sgl_count;
4412 return OCS_HW_RTN_SUCCESS;
4416 ocs_hw_io_restore_sgl(ocs_hw_t *hw, ocs_hw_io_t *io)
4418 /* Restore the default */
4419 io->sgl = &io->def_sgl;
4420 io->sgl_count = io->def_sgl_count;
4423 * For skyhawk, we need to free the IO allocated for the chained
4424 * SGL. For all devices, clear the overflow fields on the IO.
4426 * Note: For DIF IOs, we may be using the same XRI for the sec_hio and
4427 * the chained SGLs. If so, then we clear the ovfl_io field
4428 * when the sec_hio is freed.
4430 if (io->ovfl_io != NULL) {
4431 ocs_hw_io_free(hw, io->ovfl_io);
4435 /* Clear the overflow SGL */
4436 io->ovfl_sgl = NULL;
4437 io->ovfl_sgl_count = 0;
4438 io->ovfl_lsp = NULL;
4443 * @brief Initialize the scatter gather list entries of an IO.
4445 * @param hw Hardware context.
4446 * @param io Previously-allocated HW IO object.
4447 * @param type Type of IO (target read, target response, and so on).
4449 * @return Returns 0 on success, or a non-zero value on failure.
4452 ocs_hw_io_init_sges(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_hw_io_type_e type)
4454 sli4_sge_t *data = NULL;
4459 ocs_log_err(hw ? hw->os : NULL, "bad parameter hw=%p io=%p\n",
4461 return OCS_HW_RTN_ERROR;
4464 /* Clear / reset the scatter-gather list */
4465 io->sgl = &io->def_sgl;
4466 io->sgl_count = io->def_sgl_count;
4467 io->first_data_sge = 0;
4469 ocs_memset(io->sgl->virt, 0, 2 * sizeof(sli4_sge_t));
4475 data = io->sgl->virt;
4478 * Some IO types have underlying hardware requirements on the order
4479 * of SGEs. Process all special entries here.
4482 case OCS_HW_IO_INITIATOR_READ:
4483 case OCS_HW_IO_INITIATOR_WRITE:
4484 case OCS_HW_IO_INITIATOR_NODATA:
4486 * No skips, 2 special for initiator I/Os
4487 * The addresses and length are written later
4489 /* setup command pointer */
4490 data->sge_type = SLI4_SGE_TYPE_DATA;
4493 /* setup response pointer */
4494 data->sge_type = SLI4_SGE_TYPE_DATA;
4496 if (OCS_HW_IO_INITIATOR_NODATA == type) {
4503 case OCS_HW_IO_TARGET_WRITE:
4504 #define OCS_TARGET_WRITE_SKIPS 2
4505 skips = OCS_TARGET_WRITE_SKIPS;
4507 /* populate host resident XFER_RDY buffer */
4508 data->sge_type = SLI4_SGE_TYPE_DATA;
4509 data->buffer_address_high = ocs_addr32_hi(io->xfer_rdy.phys);
4510 data->buffer_address_low = ocs_addr32_lo(io->xfer_rdy.phys);
4511 data->buffer_length = io->xfer_rdy.size;
4518 case OCS_HW_IO_TARGET_READ:
4520 * For FCP_TSEND64, the first 2 entries are SKIP SGE's
4522 #define OCS_TARGET_READ_SKIPS 2
4523 skips = OCS_TARGET_READ_SKIPS;
4525 case OCS_HW_IO_TARGET_RSP:
4527 * No skips, etc. for FCP_TRSP64
4531 ocs_log_err(hw->os, "unsupported IO type %#x\n", type);
4532 return OCS_HW_RTN_ERROR;
4536 * Write skip entries
4538 for (i = 0; i < skips; i++) {
4539 data->sge_type = SLI4_SGE_TYPE_SKIP;
4550 return OCS_HW_RTN_SUCCESS;
4555 * @brief Add a T10 PI seed scatter gather list entry.
4557 * @param hw Hardware context.
4558 * @param io Previously-allocated HW IO object.
4559 * @param dif_info Pointer to T10 DIF fields, or NULL if no DIF.
4561 * @return Returns 0 on success, or a non-zero value on failure.
4564 ocs_hw_io_add_seed_sge(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_hw_dif_info_t *dif_info)
4566 sli4_sge_t *data = NULL;
4567 sli4_diseed_sge_t *dif_seed;
4569 /* If no dif_info, or dif_oper is disabled, then just return success */
4570 if ((dif_info == NULL) || (dif_info->dif_oper == OCS_HW_DIF_OPER_DISABLED)) {
4571 return OCS_HW_RTN_SUCCESS;
4575 ocs_log_err(hw ? hw->os : NULL, "bad parameter hw=%p io=%p dif_info=%p\n",
4577 return OCS_HW_RTN_ERROR;
4580 data = io->sgl->virt;
4583 /* If we are doing T10 DIF add the DIF Seed SGE */
4584 ocs_memset(data, 0, sizeof(sli4_diseed_sge_t));
4585 dif_seed = (sli4_diseed_sge_t *)data;
4586 dif_seed->ref_tag_cmp = dif_info->ref_tag_cmp;
4587 dif_seed->ref_tag_repl = dif_info->ref_tag_repl;
4588 dif_seed->app_tag_repl = dif_info->app_tag_repl;
4589 dif_seed->repl_app_tag = dif_info->repl_app_tag;
4590 if (SLI4_IF_TYPE_LANCER_FC_ETH != hw->sli.if_type) {
4591 dif_seed->atrt = dif_info->disable_app_ref_ffff;
4592 dif_seed->at = dif_info->disable_app_ffff;
4594 dif_seed->sge_type = SLI4_SGE_TYPE_DISEED;
4595 /* Workaround for SKH (BZ157233) */
4596 if (((io->type == OCS_HW_IO_TARGET_WRITE) || (io->type == OCS_HW_IO_INITIATOR_READ)) &&
4597 (SLI4_IF_TYPE_LANCER_FC_ETH != hw->sli.if_type) && dif_info->dif_separate) {
4598 dif_seed->sge_type = SLI4_SGE_TYPE_SKIP;
4601 dif_seed->app_tag_cmp = dif_info->app_tag_cmp;
4602 dif_seed->dif_blk_size = dif_info->blk_size;
4603 dif_seed->auto_incr_ref_tag = dif_info->auto_incr_ref_tag;
4604 dif_seed->check_app_tag = dif_info->check_app_tag;
4605 dif_seed->check_ref_tag = dif_info->check_ref_tag;
4606 dif_seed->check_crc = dif_info->check_guard;
4607 dif_seed->new_ref_tag = dif_info->repl_ref_tag;
4609 switch(dif_info->dif_oper) {
4610 case OCS_HW_SGE_DIF_OP_IN_NODIF_OUT_CRC:
4611 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CRC;
4612 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CRC;
4614 case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_NODIF:
4615 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CRC_OUT_NODIF;
4616 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CRC_OUT_NODIF;
4618 case OCS_HW_SGE_DIF_OP_IN_NODIF_OUT_CHKSUM:
4619 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CHKSUM;
4620 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CHKSUM;
4622 case OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_NODIF:
4623 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_NODIF;
4624 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_NODIF;
4626 case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CRC:
4627 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CRC;
4628 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CRC;
4630 case OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_CHKSUM:
4631 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CHKSUM;
4632 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CHKSUM;
4634 case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CHKSUM:
4635 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CHKSUM;
4636 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CHKSUM;
4638 case OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_CRC:
4639 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CRC;
4640 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CRC;
4642 case OCS_HW_SGE_DIF_OP_IN_RAW_OUT_RAW:
4643 dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_RAW_OUT_RAW;
4644 dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_RAW_OUT_RAW;
4647 ocs_log_err(hw->os, "unsupported DIF operation %#x\n",
4648 dif_info->dif_oper);
4649 return OCS_HW_RTN_ERROR;
4653 * Set last, clear previous last
4657 data[-1].last = FALSE;
4662 return OCS_HW_RTN_SUCCESS;
4666 ocs_hw_io_overflow_sgl(ocs_hw_t *hw, ocs_hw_io_t *io)
4668 sli4_lsp_sge_t *lsp;
4670 /* fail if we're already pointing to the overflow SGL */
4671 if (io->sgl == io->ovfl_sgl) {
4672 return OCS_HW_RTN_ERROR;
4676 * For skyhawk, we can use another SGL to extend the SGL list. The
4677 * Chained entry must not be in the first 4 entries.
4679 * Note: For DIF enabled IOs, we will use the ovfl_io for the sec_hio.
4681 if (sli_get_sgl_preregister(&hw->sli) &&
4682 io->def_sgl_count > 4 &&
4683 io->ovfl_io == NULL &&
4684 ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) ||
4685 (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli)))) {
4686 io->ovfl_io = ocs_hw_io_alloc(hw);
4687 if (io->ovfl_io != NULL) {
4689 * Note: We can't call ocs_hw_io_register_sgl() here
4690 * because it checks that SGLs are not pre-registered
4691 * and for shyhawk, preregistered SGLs are required.
4693 io->ovfl_sgl = &io->ovfl_io->def_sgl;
4694 io->ovfl_sgl_count = io->ovfl_io->def_sgl_count;
4698 /* fail if we don't have an overflow SGL registered */
4699 if (io->ovfl_sgl == NULL) {
4700 return OCS_HW_RTN_ERROR;
4704 * Overflow, we need to put a link SGE in the last location of the current SGL, after
4705 * copying the the last SGE to the overflow SGL
4708 ((sli4_sge_t*)io->ovfl_sgl->virt)[0] = ((sli4_sge_t*)io->sgl->virt)[io->n_sge - 1];
4710 lsp = &((sli4_lsp_sge_t*)io->sgl->virt)[io->n_sge - 1];
4711 ocs_memset(lsp, 0, sizeof(*lsp));
4713 if ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) ||
4714 (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli))) {
4715 sli_skh_chain_sge_build(&hw->sli,
4717 io->ovfl_io->indicator,
4721 lsp->buffer_address_high = ocs_addr32_hi(io->ovfl_sgl->phys);
4722 lsp->buffer_address_low = ocs_addr32_lo(io->ovfl_sgl->phys);
4723 lsp->sge_type = SLI4_SGE_TYPE_LSP;
4726 io->ovfl_lsp->segment_length = sizeof(sli4_sge_t);
4729 /* Update the current SGL pointer, and n_sgl */
4730 io->sgl = io->ovfl_sgl;
4731 io->sgl_count = io->ovfl_sgl_count;
4734 return OCS_HW_RTN_SUCCESS;
4739 * @brief Add a scatter gather list entry to an IO.
4741 * @param hw Hardware context.
4742 * @param io Previously-allocated HW IO object.
4743 * @param addr Physical address.
4744 * @param length Length of memory pointed to by @c addr.
4746 * @return Returns 0 on success, or a non-zero value on failure.
4749 ocs_hw_io_add_sge(ocs_hw_t *hw, ocs_hw_io_t *io, uintptr_t addr, uint32_t length)
4751 sli4_sge_t *data = NULL;
4753 if (!hw || !io || !addr || !length) {
4754 ocs_log_err(hw ? hw->os : NULL,
4755 "bad parameter hw=%p io=%p addr=%lx length=%u\n",
4756 hw, io, addr, length);
4757 return OCS_HW_RTN_ERROR;
4760 if ((length != 0) && (io->n_sge + 1) > io->sgl_count) {
4761 if (ocs_hw_io_overflow_sgl(hw, io) != OCS_HW_RTN_SUCCESS) {
4762 ocs_log_err(hw->os, "SGL full (%d)\n", io->n_sge);
4763 return OCS_HW_RTN_ERROR;
4767 if (length > sli_get_max_sge(&hw->sli)) {
4768 ocs_log_err(hw->os, "length of SGE %d bigger than allowed %d\n",
4769 length, sli_get_max_sge(&hw->sli));
4770 return OCS_HW_RTN_ERROR;
4773 data = io->sgl->virt;
4776 data->sge_type = SLI4_SGE_TYPE_DATA;
4777 data->buffer_address_high = ocs_addr32_hi(addr);
4778 data->buffer_address_low = ocs_addr32_lo(addr);
4779 data->buffer_length = length;
4780 data->data_offset = io->sge_offset;
4782 * Always assume this is the last entry and mark as such.
4783 * If this is not the first entry unset the "last SGE"
4784 * indication for the previous entry
4788 data[-1].last = FALSE;
4791 /* Set first_data_bde if not previously set */
4792 if (io->first_data_sge == 0) {
4793 io->first_data_sge = io->n_sge;
4796 io->sge_offset += length;
4799 /* Update the linked segment length (only executed after overflow has begun) */
4800 if (io->ovfl_lsp != NULL) {
4801 io->ovfl_lsp->segment_length = io->n_sge * sizeof(sli4_sge_t);
4804 return OCS_HW_RTN_SUCCESS;
4809 * @brief Add a T10 DIF scatter gather list entry to an IO.
4811 * @param hw Hardware context.
4812 * @param io Previously-allocated HW IO object.
4813 * @param addr DIF physical address.
4815 * @return Returns 0 on success, or a non-zero value on failure.
4818 ocs_hw_io_add_dif_sge(ocs_hw_t *hw, ocs_hw_io_t *io, uintptr_t addr)
4820 sli4_dif_sge_t *data = NULL;
4822 if (!hw || !io || !addr) {
4823 ocs_log_err(hw ? hw->os : NULL,
4824 "bad parameter hw=%p io=%p addr=%lx\n",
4826 return OCS_HW_RTN_ERROR;
4829 if ((io->n_sge + 1) > hw->config.n_sgl) {
4830 if (ocs_hw_io_overflow_sgl(hw, io) != OCS_HW_RTN_ERROR) {
4831 ocs_log_err(hw->os, "SGL full (%d)\n", io->n_sge);
4832 return OCS_HW_RTN_ERROR;
4836 data = io->sgl->virt;
4839 data->sge_type = SLI4_SGE_TYPE_DIF;
4840 /* Workaround for SKH (BZ157233) */
4841 if (((io->type == OCS_HW_IO_TARGET_WRITE) || (io->type == OCS_HW_IO_INITIATOR_READ)) &&
4842 (SLI4_IF_TYPE_LANCER_FC_ETH != hw->sli.if_type)) {
4843 data->sge_type = SLI4_SGE_TYPE_SKIP;
4846 data->buffer_address_high = ocs_addr32_hi(addr);
4847 data->buffer_address_low = ocs_addr32_lo(addr);
4850 * Always assume this is the last entry and mark as such.
4851 * If this is not the first entry unset the "last SGE"
4852 * indication for the previous entry
4856 data[-1].last = FALSE;
4861 return OCS_HW_RTN_SUCCESS;
4866 * @brief Abort a previously-started IO.
4868 * @param hw Hardware context.
4869 * @param io_to_abort The IO to abort.
4870 * @param send_abts Boolean to have the hardware automatically
4872 * @param cb Function call upon completion of the abort (may be NULL).
4873 * @param arg Argument to pass to abort completion function.
4875 * @return Returns 0 on success, or a non-zero value on failure.
4878 ocs_hw_io_abort(ocs_hw_t *hw, ocs_hw_io_t *io_to_abort, uint32_t send_abts, void *cb, void *arg)
4880 sli4_abort_type_e atype = SLI_ABORT_MAX;
4881 uint32_t id = 0, mask = 0;
4882 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
4883 hw_wq_callback_t *wqcb;
4885 if (!hw || !io_to_abort) {
4886 ocs_log_err(hw ? hw->os : NULL,
4887 "bad parameter hw=%p io=%p\n",
4889 return OCS_HW_RTN_ERROR;
4892 if (hw->state != OCS_HW_STATE_ACTIVE) {
4893 ocs_log_err(hw->os, "cannot send IO abort, HW state=%d\n",
4895 return OCS_HW_RTN_ERROR;
4898 /* take a reference on IO being aborted */
4899 if (ocs_ref_get_unless_zero(&io_to_abort->ref) == 0) {
4900 /* command no longer active */
4901 ocs_log_test(hw ? hw->os : NULL,
4902 "io not active xri=0x%x tag=0x%x\n",
4903 io_to_abort->indicator, io_to_abort->reqtag);
4904 return OCS_HW_RTN_IO_NOT_ACTIVE;
4907 /* non-port owned XRI checks */
4908 /* Must have a valid WQ reference */
4909 if (io_to_abort->wq == NULL) {
4910 ocs_log_test(hw->os, "io_to_abort xri=0x%x not active on WQ\n",
4911 io_to_abort->indicator);
4912 ocs_ref_put(&io_to_abort->ref); /* ocs_ref_get(): same function */
4913 return OCS_HW_RTN_IO_NOT_ACTIVE;
4916 /* Validation checks complete; now check to see if already being aborted */
4917 ocs_lock(&hw->io_abort_lock);
4918 if (io_to_abort->abort_in_progress) {
4919 ocs_unlock(&hw->io_abort_lock);
4920 ocs_ref_put(&io_to_abort->ref); /* ocs_ref_get(): same function */
4921 ocs_log_debug(hw ? hw->os : NULL,
4922 "io already being aborted xri=0x%x tag=0x%x\n",
4923 io_to_abort->indicator, io_to_abort->reqtag);
4924 return OCS_HW_RTN_IO_ABORT_IN_PROGRESS;
4928 * This IO is not already being aborted. Set flag so we won't try to
4929 * abort it again. After all, we only have one abort_done callback.
4931 io_to_abort->abort_in_progress = 1;
4932 ocs_unlock(&hw->io_abort_lock);
4935 * If we got here, the possibilities are:
4937 * - io_to_abort->wq_index != UINT32_MAX
4938 * - submit ABORT_WQE to same WQ
4940 * - rxri: io_to_abort->wq_index == UINT32_MAX
4941 * - submit ABORT_WQE to any WQ
4943 * - io_to_abort->index != UINT32_MAX
4944 * - submit ABORT_WQE to same WQ
4945 * - io_to_abort->index == UINT32_MAX
4946 * - submit ABORT_WQE to any WQ
4948 io_to_abort->abort_done = cb;
4949 io_to_abort->abort_arg = arg;
4951 atype = SLI_ABORT_XRI;
4952 id = io_to_abort->indicator;
4954 /* Allocate a request tag for the abort portion of this IO */
4955 wqcb = ocs_hw_reqtag_alloc(hw, ocs_hw_wq_process_abort, io_to_abort);
4957 ocs_log_err(hw->os, "can't allocate request tag\n");
4958 return OCS_HW_RTN_NO_RESOURCES;
4960 io_to_abort->abort_reqtag = wqcb->instance_index;
4963 * If the wqe is on the pending list, then set this wqe to be
4964 * aborted when the IO's wqe is removed from the list.
4966 if (io_to_abort->wq != NULL) {
4967 sli_queue_lock(io_to_abort->wq->queue);
4968 if (ocs_list_on_list(&io_to_abort->wqe.link)) {
4969 io_to_abort->wqe.abort_wqe_submit_needed = 1;
4970 io_to_abort->wqe.send_abts = send_abts;
4971 io_to_abort->wqe.id = id;
4972 io_to_abort->wqe.abort_reqtag = io_to_abort->abort_reqtag;
4973 sli_queue_unlock(io_to_abort->wq->queue);
4976 sli_queue_unlock(io_to_abort->wq->queue);
4979 if (sli_abort_wqe(&hw->sli, io_to_abort->wqe.wqebuf, hw->sli.config.wqe_size, atype, send_abts, id, mask,
4980 io_to_abort->abort_reqtag, SLI4_CQ_DEFAULT)) {
4981 ocs_log_err(hw->os, "ABORT WQE error\n");
4982 io_to_abort->abort_reqtag = UINT32_MAX;
4983 ocs_hw_reqtag_free(hw, wqcb);
4984 rc = OCS_HW_RTN_ERROR;
4987 if (OCS_HW_RTN_SUCCESS == rc) {
4988 if (io_to_abort->wq == NULL) {
4989 io_to_abort->wq = ocs_hw_queue_next_wq(hw, io_to_abort);
4990 ocs_hw_assert(io_to_abort->wq != NULL);
4992 /* ABORT_WQE does not actually utilize an XRI on the Port,
4993 * therefore, keep xbusy as-is to track the exchange's state,
4994 * not the ABORT_WQE's state
4996 rc = hw_wq_write(io_to_abort->wq, &io_to_abort->wqe);
4998 /* non-negative return is success */
5000 /* can't abort an abort so skip adding to timed wqe list */
5004 if (OCS_HW_RTN_SUCCESS != rc) {
5005 ocs_lock(&hw->io_abort_lock);
5006 io_to_abort->abort_in_progress = 0;
5007 ocs_unlock(&hw->io_abort_lock);
5008 ocs_ref_put(&io_to_abort->ref); /* ocs_ref_get(): same function */
5015 * @brief Return the OX_ID/RX_ID of the IO.
5017 * @param hw Hardware context.
5018 * @param io HW IO object.
5020 * @return Returns X_ID on success, or -1 on failure.
5023 ocs_hw_io_get_xid(ocs_hw_t *hw, ocs_hw_io_t *io)
5026 ocs_log_err(hw ? hw->os : NULL,
5027 "bad parameter hw=%p io=%p\n", hw, io);
5031 return io->indicator;
5035 typedef struct ocs_hw_fw_write_cb_arg {
5038 } ocs_hw_fw_write_cb_arg_t;
5040 typedef struct ocs_hw_sfp_cb_arg {
5044 } ocs_hw_sfp_cb_arg_t;
5046 typedef struct ocs_hw_temp_cb_arg {
5047 ocs_hw_temp_cb_t cb;
5049 } ocs_hw_temp_cb_arg_t;
5051 typedef struct ocs_hw_link_stat_cb_arg {
5052 ocs_hw_link_stat_cb_t cb;
5054 } ocs_hw_link_stat_cb_arg_t;
5056 typedef struct ocs_hw_host_stat_cb_arg {
5057 ocs_hw_host_stat_cb_t cb;
5059 } ocs_hw_host_stat_cb_arg_t;
5061 typedef struct ocs_hw_dump_get_cb_arg {
5062 ocs_hw_dump_get_cb_t cb;
5065 } ocs_hw_dump_get_cb_arg_t;
5067 typedef struct ocs_hw_dump_clear_cb_arg {
5068 ocs_hw_dump_clear_cb_t cb;
5071 } ocs_hw_dump_clear_cb_arg_t;
5074 * @brief Write a portion of a firmware image to the device.
5077 * Calls the correct firmware write function based on the device type.
5079 * @param hw Hardware context.
5080 * @param dma DMA structure containing the firmware image chunk.
5081 * @param size Size of the firmware image chunk.
5082 * @param offset Offset, in bytes, from the beginning of the firmware image.
5083 * @param last True if this is the last chunk of the image.
5084 * Causes the image to be committed to flash.
5085 * @param cb Pointer to a callback function that is called when the command completes.
5086 * The callback function prototype is
5087 * <tt>void cb(int32_t status, uint32_t bytes_written, void *arg)</tt>.
5088 * @param arg Pointer to be passed to the callback function.
5090 * @return Returns 0 on success, or a non-zero value on failure.
5093 ocs_hw_firmware_write(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, int last, ocs_hw_fw_cb_t cb, void *arg)
5095 if (hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) {
5096 return ocs_hw_firmware_write_lancer(hw, dma, size, offset, last, cb, arg);
5098 /* Write firmware_write for BE3/Skyhawk not supported */
5104 * @brief Write a portion of a firmware image to the Emulex XE201 ASIC (Lancer).
5107 * Creates a SLI_CONFIG mailbox command, fills it with the correct values to write a
5108 * firmware image chunk, and then sends the command with ocs_hw_command(). On completion,
5109 * the callback function ocs_hw_fw_write_cb() gets called to free the mailbox
5110 * and to signal the caller that the write has completed.
5112 * @param hw Hardware context.
5113 * @param dma DMA structure containing the firmware image chunk.
5114 * @param size Size of the firmware image chunk.
5115 * @param offset Offset, in bytes, from the beginning of the firmware image.
5116 * @param last True if this is the last chunk of the image. Causes the image to be committed to flash.
5117 * @param cb Pointer to a callback function that is called when the command completes.
5118 * The callback function prototype is
5119 * <tt>void cb(int32_t status, uint32_t bytes_written, void *arg)</tt>.
5120 * @param arg Pointer to be passed to the callback function.
5122 * @return Returns 0 on success, or a non-zero value on failure.
5125 ocs_hw_firmware_write_lancer(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, int last, ocs_hw_fw_cb_t cb, void *arg)
5127 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
5129 ocs_hw_fw_write_cb_arg_t *cb_arg;
5130 int noc=0; /* No Commit bit - set to 1 for testing */
5132 if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
5133 ocs_log_test(hw->os, "Function only supported for I/F type 2\n");
5134 return OCS_HW_RTN_ERROR;
5137 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
5138 if (mbxdata == NULL) {
5139 ocs_log_err(hw->os, "failed to malloc mbox\n");
5140 return OCS_HW_RTN_NO_MEMORY;
5143 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_fw_write_cb_arg_t), OCS_M_NOWAIT);
5144 if (cb_arg == NULL) {
5145 ocs_log_err(hw->os, "failed to malloc cb_arg\n");
5146 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5147 return OCS_HW_RTN_NO_MEMORY;
5153 if (sli_cmd_common_write_object(&hw->sli, mbxdata, SLI4_BMBX_SIZE, noc, last,
5154 size, offset, "/prg/", dma)) {
5155 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_fw_write, cb_arg);
5158 if (rc != OCS_HW_RTN_SUCCESS) {
5159 ocs_log_test(hw->os, "COMMON_WRITE_OBJECT failed\n");
5160 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5161 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t));
5169 * @brief Called when the WRITE OBJECT command completes.
5172 * Get the number of bytes actually written out of the response, free the mailbox
5173 * that was malloc'd by ocs_hw_firmware_write(),
5174 * then call the callback and pass the status and bytes written.
5176 * @param hw Hardware context.
5177 * @param status Status field from the mbox completion.
5178 * @param mqe Mailbox response structure.
5179 * @param arg Pointer to a callback function that signals the caller that the command is done.
5180 * The callback function prototype is <tt>void cb(int32_t status, uint32_t bytes_written)</tt>.
5182 * @return Returns 0.
5185 ocs_hw_cb_fw_write(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
5188 sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe;
5189 sli4_res_common_write_object_t* wr_obj_rsp = (sli4_res_common_write_object_t*) &(mbox_rsp->payload.embed);
5190 ocs_hw_fw_write_cb_arg_t *cb_arg = arg;
5191 uint32_t bytes_written;
5192 uint16_t mbox_status;
5193 uint32_t change_status;
5195 bytes_written = wr_obj_rsp->actual_write_length;
5196 mbox_status = mbox_rsp->hdr.status;
5197 change_status = wr_obj_rsp->change_status;
5199 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
5203 if ((status == 0) && mbox_status) {
5204 status = mbox_status;
5206 cb_arg->cb(status, bytes_written, change_status, cb_arg->arg);
5209 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t));
5217 * @brief Called when the READ_TRANSCEIVER_DATA command completes.
5220 * Get the number of bytes read out of the response, free the mailbox that was malloc'd
5221 * by ocs_hw_get_sfp(), then call the callback and pass the status and bytes written.
5223 * @param hw Hardware context.
5224 * @param status Status field from the mbox completion.
5225 * @param mqe Mailbox response structure.
5226 * @param arg Pointer to a callback function that signals the caller that the command is done.
5227 * The callback function prototype is
5228 * <tt>void cb(int32_t status, uint32_t bytes_written, uint32_t *data, void *arg)</tt>.
5230 * @return Returns 0.
5233 ocs_hw_cb_sfp(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
5236 ocs_hw_sfp_cb_arg_t *cb_arg = arg;
5237 ocs_dma_t *payload = NULL;
5238 sli4_res_common_read_transceiver_data_t* mbox_rsp = NULL;
5239 uint32_t bytes_written;
5242 payload = &(cb_arg->payload);
5244 mbox_rsp = (sli4_res_common_read_transceiver_data_t*) payload->virt;
5245 bytes_written = mbox_rsp->hdr.response_length;
5246 if ((status == 0) && mbox_rsp->hdr.status) {
5247 status = mbox_rsp->hdr.status;
5249 cb_arg->cb(hw->os, status, bytes_written, mbox_rsp->page_data, cb_arg->arg);
5252 ocs_dma_free(hw->os, &cb_arg->payload);
5253 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_sfp_cb_arg_t));
5256 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
5262 * @brief Function to retrieve the SFP information.
5264 * @param hw Hardware context.
5265 * @param page The page of SFP data to retrieve (0xa0 or 0xa2).
5266 * @param cb Function call upon completion of sending the data (may be NULL).
5267 * @param arg Argument to pass to IO completion function.
5269 * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY.
5272 ocs_hw_get_sfp(ocs_hw_t *hw, uint16_t page, ocs_hw_sfp_cb_t cb, void *arg)
5274 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
5275 ocs_hw_sfp_cb_arg_t *cb_arg;
5278 /* mbxdata holds the header of the command */
5279 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
5280 if (mbxdata == NULL) {
5281 ocs_log_err(hw->os, "failed to malloc mbox\n");
5282 return OCS_HW_RTN_NO_MEMORY;
5285 /* cb_arg holds the data that will be passed to the callback on completion */
5286 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_sfp_cb_arg_t), OCS_M_NOWAIT);
5287 if (cb_arg == NULL) {
5288 ocs_log_err(hw->os, "failed to malloc cb_arg\n");
5289 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5290 return OCS_HW_RTN_NO_MEMORY;
5296 /* payload holds the non-embedded portion */
5297 if (ocs_dma_alloc(hw->os, &cb_arg->payload, sizeof(sli4_res_common_read_transceiver_data_t),
5298 OCS_MIN_DMA_ALIGNMENT)) {
5299 ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
5300 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_sfp_cb_arg_t));
5301 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5302 return OCS_HW_RTN_NO_MEMORY;
5305 /* Send the HW command */
5306 if (sli_cmd_common_read_transceiver_data(&hw->sli, mbxdata, SLI4_BMBX_SIZE, page,
5307 &cb_arg->payload)) {
5308 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_sfp, cb_arg);
5311 if (rc != OCS_HW_RTN_SUCCESS) {
5312 ocs_log_test(hw->os, "READ_TRANSCEIVER_DATA failed with status %d\n",
5314 ocs_dma_free(hw->os, &cb_arg->payload);
5315 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_sfp_cb_arg_t));
5316 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5323 * @brief Function to retrieve the temperature information.
5325 * @param hw Hardware context.
5326 * @param cb Function call upon completion of sending the data (may be NULL).
5327 * @param arg Argument to pass to IO completion function.
5329 * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY.
5332 ocs_hw_get_temperature(ocs_hw_t *hw, ocs_hw_temp_cb_t cb, void *arg)
5334 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
5335 ocs_hw_temp_cb_arg_t *cb_arg;
5338 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
5339 if (mbxdata == NULL) {
5340 ocs_log_err(hw->os, "failed to malloc mbox");
5341 return OCS_HW_RTN_NO_MEMORY;
5344 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_temp_cb_arg_t), OCS_M_NOWAIT);
5345 if (cb_arg == NULL) {
5346 ocs_log_err(hw->os, "failed to malloc cb_arg");
5347 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5348 return OCS_HW_RTN_NO_MEMORY;
5354 if (sli_cmd_dump_type4(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
5355 SLI4_WKI_TAG_SAT_TEM)) {
5356 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_temp, cb_arg);
5359 if (rc != OCS_HW_RTN_SUCCESS) {
5360 ocs_log_test(hw->os, "DUMP_TYPE4 failed\n");
5361 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5362 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_temp_cb_arg_t));
5369 * @brief Called when the DUMP command completes.
5372 * Get the temperature data out of the response, free the mailbox that was malloc'd
5373 * by ocs_hw_get_temperature(), then call the callback and pass the status and data.
5375 * @param hw Hardware context.
5376 * @param status Status field from the mbox completion.
5377 * @param mqe Mailbox response structure.
5378 * @param arg Pointer to a callback function that signals the caller that the command is done.
5379 * The callback function prototype is defined by ocs_hw_temp_cb_t.
5381 * @return Returns 0.
5384 ocs_hw_cb_temp(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
5387 sli4_cmd_dump4_t* mbox_rsp = (sli4_cmd_dump4_t*) mqe;
5388 ocs_hw_temp_cb_arg_t *cb_arg = arg;
5389 uint32_t curr_temp = mbox_rsp->resp_data[0]; /* word 5 */
5390 uint32_t crit_temp_thrshld = mbox_rsp->resp_data[1]; /* word 6*/
5391 uint32_t warn_temp_thrshld = mbox_rsp->resp_data[2]; /* word 7 */
5392 uint32_t norm_temp_thrshld = mbox_rsp->resp_data[3]; /* word 8 */
5393 uint32_t fan_off_thrshld = mbox_rsp->resp_data[4]; /* word 9 */
5394 uint32_t fan_on_thrshld = mbox_rsp->resp_data[5]; /* word 10 */
5398 if ((status == 0) && mbox_rsp->hdr.status) {
5399 status = mbox_rsp->hdr.status;
5411 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_temp_cb_arg_t));
5413 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
5419 * @brief Function to retrieve the link statistics.
5421 * @param hw Hardware context.
5422 * @param req_ext_counters If TRUE, then the extended counters will be requested.
5423 * @param clear_overflow_flags If TRUE, then overflow flags will be cleared.
5424 * @param clear_all_counters If TRUE, the counters will be cleared.
5425 * @param cb Function call upon completion of sending the data (may be NULL).
5426 * @param arg Argument to pass to IO completion function.
5428 * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY.
5431 ocs_hw_get_link_stats(ocs_hw_t *hw,
5432 uint8_t req_ext_counters,
5433 uint8_t clear_overflow_flags,
5434 uint8_t clear_all_counters,
5435 ocs_hw_link_stat_cb_t cb,
5438 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
5439 ocs_hw_link_stat_cb_arg_t *cb_arg;
5442 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
5443 if (mbxdata == NULL) {
5444 ocs_log_err(hw->os, "failed to malloc mbox");
5445 return OCS_HW_RTN_NO_MEMORY;
5448 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_link_stat_cb_arg_t), OCS_M_NOWAIT);
5449 if (cb_arg == NULL) {
5450 ocs_log_err(hw->os, "failed to malloc cb_arg");
5451 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5452 return OCS_HW_RTN_NO_MEMORY;
5458 if (sli_cmd_read_link_stats(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
5460 clear_overflow_flags,
5461 clear_all_counters)) {
5462 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_link_stat, cb_arg);
5465 if (rc != OCS_HW_RTN_SUCCESS) {
5466 ocs_log_test(hw->os, "READ_LINK_STATS failed\n");
5467 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5468 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_link_stat_cb_arg_t));
5475 * @brief Called when the READ_LINK_STAT command completes.
5478 * Get the counters out of the response, free the mailbox that was malloc'd
5479 * by ocs_hw_get_link_stats(), then call the callback and pass the status and data.
5481 * @param hw Hardware context.
5482 * @param status Status field from the mbox completion.
5483 * @param mqe Mailbox response structure.
5484 * @param arg Pointer to a callback function that signals the caller that the command is done.
5485 * The callback function prototype is defined by ocs_hw_link_stat_cb_t.
5487 * @return Returns 0.
5490 ocs_hw_cb_link_stat(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
5493 sli4_cmd_read_link_stats_t* mbox_rsp = (sli4_cmd_read_link_stats_t*) mqe;
5494 ocs_hw_link_stat_cb_arg_t *cb_arg = arg;
5495 ocs_hw_link_stat_counts_t counts[OCS_HW_LINK_STAT_MAX];
5496 uint32_t num_counters = (mbox_rsp->gec ? 20 : 13);
5498 ocs_memset(counts, 0, sizeof(ocs_hw_link_stat_counts_t) *
5499 OCS_HW_LINK_STAT_MAX);
5501 counts[OCS_HW_LINK_STAT_LINK_FAILURE_COUNT].overflow = mbox_rsp->w02of;
5502 counts[OCS_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].overflow = mbox_rsp->w03of;
5503 counts[OCS_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT].overflow = mbox_rsp->w04of;
5504 counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].overflow = mbox_rsp->w05of;
5505 counts[OCS_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].overflow = mbox_rsp->w06of;
5506 counts[OCS_HW_LINK_STAT_CRC_COUNT].overflow = mbox_rsp->w07of;
5507 counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT].overflow = mbox_rsp->w08of;
5508 counts[OCS_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT].overflow = mbox_rsp->w09of;
5509 counts[OCS_HW_LINK_STAT_ARB_TIMEOUT_COUNT].overflow = mbox_rsp->w10of;
5510 counts[OCS_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT].overflow = mbox_rsp->w11of;
5511 counts[OCS_HW_LINK_STAT_CURR_RCV_B2B_CREDIT].overflow = mbox_rsp->w12of;
5512 counts[OCS_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT].overflow = mbox_rsp->w13of;
5513 counts[OCS_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT].overflow = mbox_rsp->w14of;
5514 counts[OCS_HW_LINK_STAT_RCV_EOFA_COUNT].overflow = mbox_rsp->w15of;
5515 counts[OCS_HW_LINK_STAT_RCV_EOFDTI_COUNT].overflow = mbox_rsp->w16of;
5516 counts[OCS_HW_LINK_STAT_RCV_EOFNI_COUNT].overflow = mbox_rsp->w17of;
5517 counts[OCS_HW_LINK_STAT_RCV_SOFF_COUNT].overflow = mbox_rsp->w18of;
5518 counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT].overflow = mbox_rsp->w19of;
5519 counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT].overflow = mbox_rsp->w20of;
5520 counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT].overflow = mbox_rsp->w21of;
5522 counts[OCS_HW_LINK_STAT_LINK_FAILURE_COUNT].counter = mbox_rsp->link_failure_error_count;
5523 counts[OCS_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].counter = mbox_rsp->loss_of_sync_error_count;
5524 counts[OCS_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT].counter = mbox_rsp->loss_of_signal_error_count;
5525 counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].counter = mbox_rsp->primitive_sequence_error_count;
5526 counts[OCS_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].counter = mbox_rsp->invalid_transmission_word_error_count;
5527 counts[OCS_HW_LINK_STAT_CRC_COUNT].counter = mbox_rsp->crc_error_count;
5528 counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT].counter = mbox_rsp->primitive_sequence_event_timeout_count;
5529 counts[OCS_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT].counter = mbox_rsp->elastic_buffer_overrun_error_count;
5530 counts[OCS_HW_LINK_STAT_ARB_TIMEOUT_COUNT].counter = mbox_rsp->arbitration_fc_al_timout_count;
5531 counts[OCS_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT].counter = mbox_rsp->advertised_receive_bufftor_to_buffer_credit;
5532 counts[OCS_HW_LINK_STAT_CURR_RCV_B2B_CREDIT].counter = mbox_rsp->current_receive_buffer_to_buffer_credit;
5533 counts[OCS_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT].counter = mbox_rsp->advertised_transmit_buffer_to_buffer_credit;
5534 counts[OCS_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT].counter = mbox_rsp->current_transmit_buffer_to_buffer_credit;
5535 counts[OCS_HW_LINK_STAT_RCV_EOFA_COUNT].counter = mbox_rsp->received_eofa_count;
5536 counts[OCS_HW_LINK_STAT_RCV_EOFDTI_COUNT].counter = mbox_rsp->received_eofdti_count;
5537 counts[OCS_HW_LINK_STAT_RCV_EOFNI_COUNT].counter = mbox_rsp->received_eofni_count;
5538 counts[OCS_HW_LINK_STAT_RCV_SOFF_COUNT].counter = mbox_rsp->received_soff_count;
5539 counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT].counter = mbox_rsp->received_dropped_no_aer_count;
5540 counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT].counter = mbox_rsp->received_dropped_no_available_rpi_resources_count;
5541 counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT].counter = mbox_rsp->received_dropped_no_available_xri_resources_count;
5545 if ((status == 0) && mbox_rsp->hdr.status) {
5546 status = mbox_rsp->hdr.status;
5554 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_link_stat_cb_arg_t));
5556 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
5562 * @brief Function to retrieve the link and host statistics.
5564 * @param hw Hardware context.
5565 * @param cc clear counters, if TRUE all counters will be cleared.
5566 * @param cb Function call upon completion of receiving the data.
5567 * @param arg Argument to pass to pointer fc hosts statistics structure.
5569 * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY.
5572 ocs_hw_get_host_stats(ocs_hw_t *hw, uint8_t cc, ocs_hw_host_stat_cb_t cb, void *arg)
5574 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
5575 ocs_hw_host_stat_cb_arg_t *cb_arg;
5578 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO);
5579 if (mbxdata == NULL) {
5580 ocs_log_err(hw->os, "failed to malloc mbox");
5581 return OCS_HW_RTN_NO_MEMORY;
5584 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_host_stat_cb_arg_t), 0);
5585 if (cb_arg == NULL) {
5586 ocs_log_err(hw->os, "failed to malloc cb_arg");
5587 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5588 return OCS_HW_RTN_NO_MEMORY;
5594 /* Send the HW command to get the host stats */
5595 if (sli_cmd_read_status(&hw->sli, mbxdata, SLI4_BMBX_SIZE, cc)) {
5596 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_host_stat, cb_arg);
5599 if (rc != OCS_HW_RTN_SUCCESS) {
5600 ocs_log_test(hw->os, "READ_HOST_STATS failed\n");
5601 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5602 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_host_stat_cb_arg_t));
5610 * @brief Called when the READ_STATUS command completes.
5613 * Get the counters out of the response, free the mailbox that was malloc'd
5614 * by ocs_hw_get_host_stats(), then call the callback and pass
5615 * the status and data.
5617 * @param hw Hardware context.
5618 * @param status Status field from the mbox completion.
5619 * @param mqe Mailbox response structure.
5620 * @param arg Pointer to a callback function that signals the caller that the command is done.
5621 * The callback function prototype is defined by
5622 * ocs_hw_host_stat_cb_t.
5624 * @return Returns 0.
5627 ocs_hw_cb_host_stat(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
5630 sli4_cmd_read_status_t* mbox_rsp = (sli4_cmd_read_status_t*) mqe;
5631 ocs_hw_host_stat_cb_arg_t *cb_arg = arg;
5632 ocs_hw_host_stat_counts_t counts[OCS_HW_HOST_STAT_MAX];
5633 uint32_t num_counters = OCS_HW_HOST_STAT_MAX;
5635 ocs_memset(counts, 0, sizeof(ocs_hw_host_stat_counts_t) *
5636 OCS_HW_HOST_STAT_MAX);
5638 counts[OCS_HW_HOST_STAT_TX_KBYTE_COUNT].counter = mbox_rsp->transmit_kbyte_count;
5639 counts[OCS_HW_HOST_STAT_RX_KBYTE_COUNT].counter = mbox_rsp->receive_kbyte_count;
5640 counts[OCS_HW_HOST_STAT_TX_FRAME_COUNT].counter = mbox_rsp->transmit_frame_count;
5641 counts[OCS_HW_HOST_STAT_RX_FRAME_COUNT].counter = mbox_rsp->receive_frame_count;
5642 counts[OCS_HW_HOST_STAT_TX_SEQ_COUNT].counter = mbox_rsp->transmit_sequence_count;
5643 counts[OCS_HW_HOST_STAT_RX_SEQ_COUNT].counter = mbox_rsp->receive_sequence_count;
5644 counts[OCS_HW_HOST_STAT_TOTAL_EXCH_ORIG].counter = mbox_rsp->total_exchanges_originator;
5645 counts[OCS_HW_HOST_STAT_TOTAL_EXCH_RESP].counter = mbox_rsp->total_exchanges_responder;
5646 counts[OCS_HW_HOSY_STAT_RX_P_BSY_COUNT].counter = mbox_rsp->receive_p_bsy_count;
5647 counts[OCS_HW_HOST_STAT_RX_F_BSY_COUNT].counter = mbox_rsp->receive_f_bsy_count;
5648 counts[OCS_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_RQ_BUF_COUNT].counter = mbox_rsp->dropped_frames_due_to_no_rq_buffer_count;
5649 counts[OCS_HW_HOST_STAT_EMPTY_RQ_TIMEOUT_COUNT].counter = mbox_rsp->empty_rq_timeout_count;
5650 counts[OCS_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_XRI_COUNT].counter = mbox_rsp->dropped_frames_due_to_no_xri_count;
5651 counts[OCS_HW_HOST_STAT_EMPTY_XRI_POOL_COUNT].counter = mbox_rsp->empty_xri_pool_count;
5656 if ((status == 0) && mbox_rsp->hdr.status) {
5657 status = mbox_rsp->hdr.status;
5665 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_host_stat_cb_arg_t));
5667 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
5673 * @brief HW link configuration enum to the CLP string value mapping.
5675 * This structure provides a mapping from the ocs_hw_linkcfg_e
5676 * enum (enum exposed for the OCS_HW_PORT_SET_LINK_CONFIG port
5677 * control) to the CLP string that is used
5678 * in the DMTF_CLP_CMD mailbox command.
5680 typedef struct ocs_hw_linkcfg_map_s {
5681 ocs_hw_linkcfg_e linkcfg;
5682 const char *clp_str;
5683 } ocs_hw_linkcfg_map_t;
5686 * @brief Mapping from the HW linkcfg enum to the CLP command value
5689 static ocs_hw_linkcfg_map_t linkcfg_map[] = {
5690 {OCS_HW_LINKCFG_4X10G, "ELX_4x10G"},
5691 {OCS_HW_LINKCFG_1X40G, "ELX_1x40G"},
5692 {OCS_HW_LINKCFG_2X16G, "ELX_2x16G"},
5693 {OCS_HW_LINKCFG_4X8G, "ELX_4x8G"},
5694 {OCS_HW_LINKCFG_4X1G, "ELX_4x1G"},
5695 {OCS_HW_LINKCFG_2X10G, "ELX_2x10G"},
5696 {OCS_HW_LINKCFG_2X10G_2X8G, "ELX_2x10G_2x8G"}};
5699 * @brief HW link configuration enum to Skyhawk link config ID mapping.
5701 * This structure provides a mapping from the ocs_hw_linkcfg_e
5702 * enum (enum exposed for the OCS_HW_PORT_SET_LINK_CONFIG port
5703 * control) to the link config ID numbers used by Skyhawk
5705 typedef struct ocs_hw_skyhawk_linkcfg_map_s {
5706 ocs_hw_linkcfg_e linkcfg;
5708 } ocs_hw_skyhawk_linkcfg_map_t;
5711 * @brief Mapping from the HW linkcfg enum to the Skyhawk link config IDs
5713 static ocs_hw_skyhawk_linkcfg_map_t skyhawk_linkcfg_map[] = {
5714 {OCS_HW_LINKCFG_4X10G, 0x0a},
5715 {OCS_HW_LINKCFG_1X40G, 0x09},
5719 * @brief Helper function for getting the HW linkcfg enum from the CLP
5722 * @param clp_str CLP string value from OEMELX_LinkConfig.
5724 * @return Returns the HW linkcfg enum corresponding to clp_str.
5726 static ocs_hw_linkcfg_e
5727 ocs_hw_linkcfg_from_clp(const char *clp_str)
5730 for (i = 0; i < ARRAY_SIZE(linkcfg_map); i++) {
5731 if (ocs_strncmp(linkcfg_map[i].clp_str, clp_str, ocs_strlen(clp_str)) == 0) {
5732 return linkcfg_map[i].linkcfg;
5735 return OCS_HW_LINKCFG_NA;
5739 * @brief Helper function for getting the CLP string value from the HW
5742 * @param linkcfg HW linkcfg enum.
5744 * @return Returns the OEMELX_LinkConfig CLP string value corresponding to
5748 ocs_hw_clp_from_linkcfg(ocs_hw_linkcfg_e linkcfg)
5751 for (i = 0; i < ARRAY_SIZE(linkcfg_map); i++) {
5752 if (linkcfg_map[i].linkcfg == linkcfg) {
5753 return linkcfg_map[i].clp_str;
5760 * @brief Helper function for getting a Skyhawk link config ID from the HW
5763 * @param linkcfg HW linkcfg enum.
5765 * @return Returns the Skyhawk link config ID corresponding to
5769 ocs_hw_config_id_from_linkcfg(ocs_hw_linkcfg_e linkcfg)
5772 for (i = 0; i < ARRAY_SIZE(skyhawk_linkcfg_map); i++) {
5773 if (skyhawk_linkcfg_map[i].linkcfg == linkcfg) {
5774 return skyhawk_linkcfg_map[i].config_id;
5781 * @brief Helper function for getting the HW linkcfg enum from a
5782 * Skyhawk config ID.
5784 * @param config_id Skyhawk link config ID.
5786 * @return Returns the HW linkcfg enum corresponding to config_id.
5788 static ocs_hw_linkcfg_e
5789 ocs_hw_linkcfg_from_config_id(const uint32_t config_id)
5792 for (i = 0; i < ARRAY_SIZE(skyhawk_linkcfg_map); i++) {
5793 if (skyhawk_linkcfg_map[i].config_id == config_id) {
5794 return skyhawk_linkcfg_map[i].linkcfg;
5797 return OCS_HW_LINKCFG_NA;
5801 * @brief Link configuration callback argument.
5803 typedef struct ocs_hw_linkcfg_cb_arg_s {
5804 ocs_hw_port_control_cb_t cb;
5810 uint32_t result_len;
5811 } ocs_hw_linkcfg_cb_arg_t;
5814 * @brief Set link configuration.
5816 * @param hw Hardware context.
5817 * @param value Link configuration enum to which the link configuration is
5819 * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
5820 * @param cb Callback function to invoke following mbx command.
5821 * @param arg Callback argument.
5823 * @return Returns OCS_HW_RTN_SUCCESS on success.
5826 ocs_hw_set_linkcfg(ocs_hw_t *hw, ocs_hw_linkcfg_e value, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
5828 if (!sli_link_is_configurable(&hw->sli)) {
5829 ocs_log_debug(hw->os, "Function not supported\n");
5830 return OCS_HW_RTN_ERROR;
5833 if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) {
5834 return ocs_hw_set_linkcfg_lancer(hw, value, opts, cb, arg);
5835 } else if ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) ||
5836 (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli))) {
5837 return ocs_hw_set_linkcfg_skyhawk(hw, value, opts, cb, arg);
5839 ocs_log_test(hw->os, "Function not supported for this IF_TYPE\n");
5840 return OCS_HW_RTN_ERROR;
5845 * @brief Set link configuration for Lancer
5847 * @param hw Hardware context.
5848 * @param value Link configuration enum to which the link configuration is
5850 * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
5851 * @param cb Callback function to invoke following mbx command.
5852 * @param arg Callback argument.
5854 * @return Returns OCS_HW_RTN_SUCCESS on success.
5857 ocs_hw_set_linkcfg_lancer(ocs_hw_t *hw, ocs_hw_linkcfg_e value, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
5859 char cmd[OCS_HW_DMTF_CLP_CMD_MAX];
5860 ocs_hw_linkcfg_cb_arg_t *cb_arg;
5861 const char *value_str = NULL;
5862 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
5864 /* translate ocs_hw_linkcfg_e to CLP string */
5865 value_str = ocs_hw_clp_from_linkcfg(value);
5867 /* allocate memory for callback argument */
5868 cb_arg = ocs_malloc(hw->os, sizeof(*cb_arg), OCS_M_NOWAIT);
5869 if (cb_arg == NULL) {
5870 ocs_log_err(hw->os, "failed to malloc cb_arg");
5871 return OCS_HW_RTN_NO_MEMORY;
5874 ocs_snprintf(cmd, OCS_HW_DMTF_CLP_CMD_MAX, "set / OEMELX_LinkConfig=%s", value_str);
5875 /* allocate DMA for command */
5876 if (ocs_dma_alloc(hw->os, &cb_arg->dma_cmd, ocs_strlen(cmd)+1, 4096)) {
5877 ocs_log_err(hw->os, "malloc failed\n");
5878 ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
5879 return OCS_HW_RTN_NO_MEMORY;
5881 ocs_memset(cb_arg->dma_cmd.virt, 0, ocs_strlen(cmd)+1);
5882 ocs_memcpy(cb_arg->dma_cmd.virt, cmd, ocs_strlen(cmd));
5884 /* allocate DMA for response */
5885 if (ocs_dma_alloc(hw->os, &cb_arg->dma_resp, OCS_HW_DMTF_CLP_RSP_MAX, 4096)) {
5886 ocs_log_err(hw->os, "malloc failed\n");
5887 ocs_dma_free(hw->os, &cb_arg->dma_cmd);
5888 ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
5889 return OCS_HW_RTN_NO_MEMORY;
5893 cb_arg->opts = opts;
5895 rc = ocs_hw_exec_dmtf_clp_cmd(hw, &cb_arg->dma_cmd, &cb_arg->dma_resp,
5896 opts, ocs_hw_linkcfg_dmtf_clp_cb, cb_arg);
5898 if (opts == OCS_CMD_POLL || rc != OCS_HW_RTN_SUCCESS) {
5899 /* if failed, or polling, free memory here; if success and not
5900 * polling, will free in callback function
5903 ocs_log_test(hw->os, "CLP cmd=\"%s\" failed\n",
5904 (char *)cb_arg->dma_cmd.virt);
5906 ocs_dma_free(hw->os, &cb_arg->dma_cmd);
5907 ocs_dma_free(hw->os, &cb_arg->dma_resp);
5908 ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
5914 * @brief Callback for ocs_hw_set_linkcfg_skyhawk
5916 * @param hw Hardware context.
5917 * @param status Status from the RECONFIG_GET_LINK_INFO command.
5918 * @param mqe Mailbox response structure.
5919 * @param arg Pointer to a callback argument.
5924 ocs_hw_set_active_link_config_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
5926 ocs_hw_linkcfg_cb_arg_t *cb_arg = (ocs_hw_linkcfg_cb_arg_t *)arg;
5929 ocs_log_test(hw->os, "SET_RECONFIG_LINK_ID failed, status=%d\n", status);
5932 /* invoke callback */
5934 cb_arg->cb(status, 0, cb_arg->arg);
5937 /* if polling, will free memory in calling function */
5938 if (cb_arg->opts != OCS_CMD_POLL) {
5939 ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
5944 * @brief Set link configuration for a Skyhawk
5946 * @param hw Hardware context.
5947 * @param value Link configuration enum to which the link configuration is
5949 * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
5950 * @param cb Callback function to invoke following mbx command.
5951 * @param arg Callback argument.
5953 * @return Returns OCS_HW_RTN_SUCCESS on success.
5956 ocs_hw_set_linkcfg_skyhawk(ocs_hw_t *hw, ocs_hw_linkcfg_e value, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
5959 ocs_hw_linkcfg_cb_arg_t *cb_arg;
5960 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
5963 config_id = ocs_hw_config_id_from_linkcfg(value);
5965 if (config_id == 0) {
5966 ocs_log_test(hw->os, "Link config %d not supported by Skyhawk\n", value);
5967 return OCS_HW_RTN_ERROR;
5970 /* mbxdata holds the header of the command */
5971 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
5972 if (mbxdata == NULL) {
5973 ocs_log_err(hw->os, "failed to malloc mbox\n");
5974 return OCS_HW_RTN_NO_MEMORY;
5977 /* cb_arg holds the data that will be passed to the callback on completion */
5978 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_linkcfg_cb_arg_t), OCS_M_NOWAIT);
5979 if (cb_arg == NULL) {
5980 ocs_log_err(hw->os, "failed to malloc cb_arg\n");
5981 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5982 return OCS_HW_RTN_NO_MEMORY;
5988 if (sli_cmd_common_set_reconfig_link_id(&hw->sli, mbxdata, SLI4_BMBX_SIZE, NULL, 0, config_id)) {
5989 rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_set_active_link_config_cb, cb_arg);
5992 if (rc != OCS_HW_RTN_SUCCESS) {
5993 ocs_log_err(hw->os, "SET_RECONFIG_LINK_ID failed\n");
5994 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5995 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t));
5996 } else if (opts == OCS_CMD_POLL) {
5997 /* if we're polling we have to call the callback here. */
5998 ocs_hw_set_active_link_config_cb(hw, 0, mbxdata, cb_arg);
5999 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6000 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t));
6002 /* We weren't poling, so the callback got called */
6003 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6010 * @brief Get link configuration.
6012 * @param hw Hardware context.
6013 * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
6014 * @param cb Callback function to invoke following mbx command.
6015 * @param arg Callback argument.
6017 * @return Returns OCS_HW_RTN_SUCCESS on success.
6020 ocs_hw_get_linkcfg(ocs_hw_t *hw, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
6022 if (!sli_link_is_configurable(&hw->sli)) {
6023 ocs_log_debug(hw->os, "Function not supported\n");
6024 return OCS_HW_RTN_ERROR;
6027 if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) {
6028 return ocs_hw_get_linkcfg_lancer(hw, opts, cb, arg);
6029 } else if ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) ||
6030 (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli))) {
6031 return ocs_hw_get_linkcfg_skyhawk(hw, opts, cb, arg);
6033 ocs_log_test(hw->os, "Function not supported for this IF_TYPE\n");
6034 return OCS_HW_RTN_ERROR;
6039 * @brief Get link configuration for a Lancer
6041 * @param hw Hardware context.
6042 * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
6043 * @param cb Callback function to invoke following mbx command.
6044 * @param arg Callback argument.
6046 * @return Returns OCS_HW_RTN_SUCCESS on success.
6049 ocs_hw_get_linkcfg_lancer(ocs_hw_t *hw, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
6051 char cmd[OCS_HW_DMTF_CLP_CMD_MAX];
6052 ocs_hw_linkcfg_cb_arg_t *cb_arg;
6053 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6055 /* allocate memory for callback argument */
6056 cb_arg = ocs_malloc(hw->os, sizeof(*cb_arg), OCS_M_NOWAIT);
6057 if (cb_arg == NULL) {
6058 ocs_log_err(hw->os, "failed to malloc cb_arg");
6059 return OCS_HW_RTN_NO_MEMORY;
6062 ocs_snprintf(cmd, OCS_HW_DMTF_CLP_CMD_MAX, "show / OEMELX_LinkConfig");
6064 /* allocate DMA for command */
6065 if (ocs_dma_alloc(hw->os, &cb_arg->dma_cmd, ocs_strlen(cmd)+1, 4096)) {
6066 ocs_log_err(hw->os, "malloc failed\n");
6067 ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6068 return OCS_HW_RTN_NO_MEMORY;
6071 /* copy CLP command to DMA command */
6072 ocs_memset(cb_arg->dma_cmd.virt, 0, ocs_strlen(cmd)+1);
6073 ocs_memcpy(cb_arg->dma_cmd.virt, cmd, ocs_strlen(cmd));
6075 /* allocate DMA for response */
6076 if (ocs_dma_alloc(hw->os, &cb_arg->dma_resp, OCS_HW_DMTF_CLP_RSP_MAX, 4096)) {
6077 ocs_log_err(hw->os, "malloc failed\n");
6078 ocs_dma_free(hw->os, &cb_arg->dma_cmd);
6079 ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6080 return OCS_HW_RTN_NO_MEMORY;
6084 cb_arg->opts = opts;
6086 rc = ocs_hw_exec_dmtf_clp_cmd(hw, &cb_arg->dma_cmd, &cb_arg->dma_resp,
6087 opts, ocs_hw_linkcfg_dmtf_clp_cb, cb_arg);
6089 if (opts == OCS_CMD_POLL || rc != OCS_HW_RTN_SUCCESS) {
6090 /* if failed or polling, free memory here; if not polling and success,
6091 * will free in callback function
6094 ocs_log_test(hw->os, "CLP cmd=\"%s\" failed\n",
6095 (char *)cb_arg->dma_cmd.virt);
6097 ocs_dma_free(hw->os, &cb_arg->dma_cmd);
6098 ocs_dma_free(hw->os, &cb_arg->dma_resp);
6099 ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6106 * @brief Get the link configuration callback.
6108 * @param hw Hardware context.
6109 * @param status Status from the RECONFIG_GET_LINK_INFO command.
6110 * @param mqe Mailbox response structure.
6111 * @param arg Pointer to a callback argument.
6116 ocs_hw_get_active_link_config_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
6118 ocs_hw_linkcfg_cb_arg_t *cb_arg = (ocs_hw_linkcfg_cb_arg_t *)arg;
6119 sli4_res_common_get_reconfig_link_info_t *rsp = cb_arg->dma_cmd.virt;
6120 ocs_hw_linkcfg_e value = OCS_HW_LINKCFG_NA;
6123 ocs_log_test(hw->os, "GET_RECONFIG_LINK_INFO failed, status=%d\n", status);
6125 /* Call was successful */
6126 value = ocs_hw_linkcfg_from_config_id(rsp->active_link_config_id);
6129 /* invoke callback */
6131 cb_arg->cb(status, value, cb_arg->arg);
6134 /* if polling, will free memory in calling function */
6135 if (cb_arg->opts != OCS_CMD_POLL) {
6136 ocs_dma_free(hw->os, &cb_arg->dma_cmd);
6137 ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6142 * @brief Get link configuration for a Skyhawk.
6144 * @param hw Hardware context.
6145 * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
6146 * @param cb Callback function to invoke following mbx command.
6147 * @param arg Callback argument.
6149 * @return Returns OCS_HW_RTN_SUCCESS on success.
6152 ocs_hw_get_linkcfg_skyhawk(ocs_hw_t *hw, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
6155 ocs_hw_linkcfg_cb_arg_t *cb_arg;
6156 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6158 /* mbxdata holds the header of the command */
6159 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
6160 if (mbxdata == NULL) {
6161 ocs_log_err(hw->os, "failed to malloc mbox\n");
6162 return OCS_HW_RTN_NO_MEMORY;
6165 /* cb_arg holds the data that will be passed to the callback on completion */
6166 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_linkcfg_cb_arg_t), OCS_M_NOWAIT);
6167 if (cb_arg == NULL) {
6168 ocs_log_err(hw->os, "failed to malloc cb_arg\n");
6169 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6170 return OCS_HW_RTN_NO_MEMORY;
6175 cb_arg->opts = opts;
6177 /* dma_mem holds the non-embedded portion */
6178 if (ocs_dma_alloc(hw->os, &cb_arg->dma_cmd, sizeof(sli4_res_common_get_reconfig_link_info_t), 4)) {
6179 ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
6180 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6181 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t));
6182 return OCS_HW_RTN_NO_MEMORY;
6185 if (sli_cmd_common_get_reconfig_link_info(&hw->sli, mbxdata, SLI4_BMBX_SIZE, &cb_arg->dma_cmd)) {
6186 rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_get_active_link_config_cb, cb_arg);
6189 if (rc != OCS_HW_RTN_SUCCESS) {
6190 ocs_log_err(hw->os, "GET_RECONFIG_LINK_INFO failed\n");
6191 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6192 ocs_dma_free(hw->os, &cb_arg->dma_cmd);
6193 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t));
6194 } else if (opts == OCS_CMD_POLL) {
6195 /* if we're polling we have to call the callback here. */
6196 ocs_hw_get_active_link_config_cb(hw, 0, mbxdata, cb_arg);
6197 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6198 ocs_dma_free(hw->os, &cb_arg->dma_cmd);
6199 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t));
6201 /* We weren't poling, so the callback got called */
6202 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6209 * @brief Sets the DIF seed value.
6211 * @param hw Hardware context.
6213 * @return Returns OCS_HW_RTN_SUCCESS on success.
6216 ocs_hw_set_dif_seed(ocs_hw_t *hw)
6218 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6219 uint8_t buf[SLI4_BMBX_SIZE];
6220 sli4_req_common_set_features_dif_seed_t seed_param;
6222 ocs_memset(&seed_param, 0, sizeof(seed_param));
6223 seed_param.seed = hw->config.dif_seed;
6225 /* send set_features command */
6226 if (sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE,
6227 SLI4_SET_FEATURES_DIF_SEED,
6229 (uint32_t*)&seed_param)) {
6230 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
6232 ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc);
6234 ocs_log_debug(hw->os, "DIF seed set to 0x%x\n",
6235 hw->config.dif_seed);
6238 ocs_log_err(hw->os, "sli_cmd_common_set_features failed\n");
6239 rc = OCS_HW_RTN_ERROR;
6246 * @brief Sets the DIF mode value.
6248 * @param hw Hardware context.
6250 * @return Returns OCS_HW_RTN_SUCCESS on success.
6253 ocs_hw_set_dif_mode(ocs_hw_t *hw)
6255 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6256 uint8_t buf[SLI4_BMBX_SIZE];
6257 sli4_req_common_set_features_t10_pi_mem_model_t mode_param;
6259 ocs_memset(&mode_param, 0, sizeof(mode_param));
6260 mode_param.tmm = (hw->config.dif_mode == OCS_HW_DIF_MODE_INLINE ? 0 : 1);
6262 /* send set_features command */
6263 if (sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE,
6264 SLI4_SET_FEATURES_DIF_MEMORY_MODE,
6266 (uint32_t*)&mode_param)) {
6267 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
6269 ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc);
6271 ocs_log_test(hw->os, "DIF mode set to %s\n",
6272 (hw->config.dif_mode == OCS_HW_DIF_MODE_INLINE ? "inline" : "separate"));
6275 ocs_log_err(hw->os, "sli_cmd_common_set_features failed\n");
6276 rc = OCS_HW_RTN_ERROR;
6282 ocs_hw_watchdog_timer_cb(void *arg)
6284 ocs_hw_t *hw = (ocs_hw_t *)arg;
6286 ocs_hw_config_watchdog_timer(hw);
6291 ocs_hw_cb_cfg_watchdog(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
6293 uint16_t timeout = hw->watchdog_timeout;
6296 ocs_log_err(hw->os, "config watchdog timer failed, rc = %d\n", status);
6299 /* keeping callback 500ms before timeout to keep heartbeat alive */
6300 ocs_setup_timer(hw->os, &hw->watchdog_timer, ocs_hw_watchdog_timer_cb, hw, (timeout*1000 - 500) );
6302 ocs_del_timer(&hw->watchdog_timer);
6306 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
6311 * @brief Set configuration parameters for watchdog timer feature.
6313 * @param hw Hardware context.
6314 * @param timeout Timeout for watchdog timer in seconds
6316 * @return Returns OCS_HW_RTN_SUCCESS on success.
6319 ocs_hw_config_watchdog_timer(ocs_hw_t *hw)
6321 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6322 uint8_t *buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
6324 sli4_cmd_lowlevel_set_watchdog(&hw->sli, buf, SLI4_BMBX_SIZE, hw->watchdog_timeout);
6325 rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_cfg_watchdog, NULL);
6327 ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
6328 ocs_log_err(hw->os, "config watchdog timer failed, rc = %d\n", rc);
6334 * @brief Set configuration parameters for auto-generate xfer_rdy T10 PI feature.
6336 * @param hw Hardware context.
6337 * @param buf Pointer to a mailbox buffer area.
6339 * @return Returns OCS_HW_RTN_SUCCESS on success.
6342 ocs_hw_config_auto_xfer_rdy_t10pi(ocs_hw_t *hw, uint8_t *buf)
6344 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6345 sli4_req_common_set_features_xfer_rdy_t10pi_t param;
6347 ocs_memset(¶m, 0, sizeof(param));
6348 param.rtc = (hw->config.auto_xfer_rdy_ref_tag_is_lba ? 0 : 1);
6349 param.atv = (hw->config.auto_xfer_rdy_app_tag_valid ? 1 : 0);
6350 param.tmm = ((hw->config.dif_mode == OCS_HW_DIF_MODE_INLINE) ? 0 : 1);
6351 param.app_tag = hw->config.auto_xfer_rdy_app_tag_value;
6352 param.blk_size = hw->config.auto_xfer_rdy_blk_size_chip;
6354 switch (hw->config.auto_xfer_rdy_p_type) {
6362 ocs_log_err(hw->os, "unsupported p_type %d\n",
6363 hw->config.auto_xfer_rdy_p_type);
6364 return OCS_HW_RTN_ERROR;
6367 /* build the set_features command */
6368 sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE,
6369 SLI4_SET_FEATURES_SET_CONFIG_AUTO_XFER_RDY_T10PI,
6374 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
6376 ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc);
6378 ocs_log_test(hw->os, "Auto XFER RDY T10 PI configured rtc:%d atv:%d p_type:%d app_tag:%x blk_size:%d\n",
6379 param.rtc, param.atv, param.p_type,
6380 param.app_tag, param.blk_size);
6388 * @brief enable sli port health check
6390 * @param hw Hardware context.
6391 * @param buf Pointer to a mailbox buffer area.
6392 * @param query current status of the health check feature enabled/disabled
6393 * @param enable if 1: enable 0: disable
6394 * @param buf Pointer to a mailbox buffer area.
6396 * @return Returns OCS_HW_RTN_SUCCESS on success.
6399 ocs_hw_config_sli_port_health_check(ocs_hw_t *hw, uint8_t query, uint8_t enable)
6401 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6402 uint8_t buf[SLI4_BMBX_SIZE];
6403 sli4_req_common_set_features_health_check_t param;
6405 ocs_memset(¶m, 0, sizeof(param));
6409 /* build the set_features command */
6410 sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE,
6411 SLI4_SET_FEATURES_SLI_PORT_HEALTH_CHECK,
6415 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
6417 ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc);
6419 ocs_log_test(hw->os, "SLI Port Health Check is enabled \n");
6426 * @brief Set FTD transfer hint feature
6428 * @param hw Hardware context.
6429 * @param fdt_xfer_hint size in bytes where read requests are segmented.
6431 * @return Returns OCS_HW_RTN_SUCCESS on success.
6434 ocs_hw_config_set_fdt_xfer_hint(ocs_hw_t *hw, uint32_t fdt_xfer_hint)
6436 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6437 uint8_t buf[SLI4_BMBX_SIZE];
6438 sli4_req_common_set_features_set_fdt_xfer_hint_t param;
6440 ocs_memset(¶m, 0, sizeof(param));
6441 param.fdt_xfer_hint = fdt_xfer_hint;
6442 /* build the set_features command */
6443 sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE,
6444 SLI4_SET_FEATURES_SET_FTD_XFER_HINT,
6449 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
6451 ocs_log_warn(hw->os, "set FDT hint %d failed: %d\n", fdt_xfer_hint, rc);
6453 ocs_log_debug(hw->os, "Set FTD transfer hint to %d\n", param.fdt_xfer_hint);
6460 * @brief Get the link configuration callback.
6462 * @param hw Hardware context.
6463 * @param status Status from the DMTF CLP command.
6464 * @param result_len Length, in bytes, of the DMTF CLP result.
6465 * @param arg Pointer to a callback argument.
6467 * @return Returns OCS_HW_RTN_SUCCESS on success.
6470 ocs_hw_linkcfg_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint32_t result_len, void *arg)
6473 char retdata_str[64];
6474 ocs_hw_linkcfg_cb_arg_t *cb_arg = (ocs_hw_linkcfg_cb_arg_t *)arg;
6475 ocs_hw_linkcfg_e linkcfg = OCS_HW_LINKCFG_NA;
6478 ocs_log_test(hw->os, "CLP cmd failed, status=%d\n", status);
6480 /* parse CLP response to get return data */
6481 rval = ocs_hw_clp_resp_get_value(hw, "retdata", retdata_str,
6482 sizeof(retdata_str),
6483 cb_arg->dma_resp.virt,
6487 ocs_log_err(hw->os, "failed to get retdata %d\n", result_len);
6489 /* translate string into hw enum */
6490 linkcfg = ocs_hw_linkcfg_from_clp(retdata_str);
6494 /* invoke callback */
6496 cb_arg->cb(status, linkcfg, cb_arg->arg);
6499 /* if polling, will free memory in calling function */
6500 if (cb_arg->opts != OCS_CMD_POLL) {
6501 ocs_dma_free(hw->os, &cb_arg->dma_cmd);
6502 ocs_dma_free(hw->os, &cb_arg->dma_resp);
6503 ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6508 * @brief Set the Lancer dump location
6510 * This function tells a Lancer chip to use a specific DMA
6511 * buffer as a dump location rather than the internal flash.
6513 * @param hw Hardware context.
6514 * @param num_buffers The number of DMA buffers to hold the dump (1..n).
6515 * @param dump_buffers DMA buffers to hold the dump.
6517 * @return Returns OCS_HW_RTN_SUCCESS on success.
6520 ocs_hw_set_dump_location(ocs_hw_t *hw, uint32_t num_buffers, ocs_dma_t *dump_buffers, uint8_t fdb)
6522 uint8_t bus, dev, func;
6523 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6524 uint8_t buf[SLI4_BMBX_SIZE];
6527 * Make sure the FW is new enough to support this command. If the FW
6528 * is too old, the FW will UE.
6530 if (hw->workaround.disable_dump_loc) {
6531 ocs_log_test(hw->os, "FW version is too old for this feature\n");
6532 return OCS_HW_RTN_ERROR;
6535 /* This command is only valid for physical port 0 */
6536 ocs_get_bus_dev_func(hw->os, &bus, &dev, &func);
6537 if (fdb == 0 && func != 0) {
6538 ocs_log_test(hw->os, "function only valid for pci function 0, %d passed\n",
6540 return OCS_HW_RTN_ERROR;
6544 * If a single buffer is used, then it may be passed as is to the chip. For multiple buffers,
6545 * We must allocate a SGL list and then pass the address of the list to the chip.
6547 if (num_buffers > 1) {
6548 uint32_t sge_size = num_buffers * sizeof(sli4_sge_t);
6552 if (hw->dump_sges.size < sge_size) {
6553 ocs_dma_free(hw->os, &hw->dump_sges);
6554 if (ocs_dma_alloc(hw->os, &hw->dump_sges, sge_size, OCS_MIN_DMA_ALIGNMENT)) {
6555 ocs_log_err(hw->os, "SGE DMA allocation failed\n");
6556 return OCS_HW_RTN_NO_MEMORY;
6559 /* build the SGE list */
6560 ocs_memset(hw->dump_sges.virt, 0, hw->dump_sges.size);
6561 hw->dump_sges.len = sge_size;
6562 sge = hw->dump_sges.virt;
6563 for (i = 0; i < num_buffers; i++) {
6564 sge[i].buffer_address_high = ocs_addr32_hi(dump_buffers[i].phys);
6565 sge[i].buffer_address_low = ocs_addr32_lo(dump_buffers[i].phys);
6566 sge[i].last = (i == num_buffers - 1 ? 1 : 0);
6567 sge[i].buffer_length = dump_buffers[i].size;
6569 rc = sli_cmd_common_set_dump_location(&hw->sli, (void *)buf,
6570 SLI4_BMBX_SIZE, FALSE, TRUE,
6571 &hw->dump_sges, fdb);
6573 dump_buffers->len = dump_buffers->size;
6574 rc = sli_cmd_common_set_dump_location(&hw->sli, (void *)buf,
6575 SLI4_BMBX_SIZE, FALSE, FALSE,
6580 rc = ocs_hw_command(hw, buf, OCS_CMD_POLL,
6583 ocs_log_err(hw->os, "ocs_hw_command returns %d\n",
6588 "sli_cmd_common_set_dump_location failed\n");
6589 rc = OCS_HW_RTN_ERROR;
6597 * @brief Set the Ethernet license.
6600 * This function sends the appropriate mailbox command (DMTF
6601 * CLP) to set the Ethernet license to the given license value.
6602 * Since it is used during the time of ocs_hw_init(), the mailbox
6603 * command is sent via polling (the BMBX route).
6605 * @param hw Hardware context.
6606 * @param license 32-bit license value.
6608 * @return Returns OCS_HW_RTN_SUCCESS on success.
6611 ocs_hw_set_eth_license(ocs_hw_t *hw, uint32_t license)
6613 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6614 char cmd[OCS_HW_DMTF_CLP_CMD_MAX];
6618 /* only for lancer right now */
6619 if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
6620 ocs_log_test(hw->os, "Function only supported for I/F type 2\n");
6621 return OCS_HW_RTN_ERROR;
6624 ocs_snprintf(cmd, OCS_HW_DMTF_CLP_CMD_MAX, "set / OEMELX_Ethernet_License=%X", license);
6625 /* allocate DMA for command */
6626 if (ocs_dma_alloc(hw->os, &dma_cmd, ocs_strlen(cmd)+1, 4096)) {
6627 ocs_log_err(hw->os, "malloc failed\n");
6628 return OCS_HW_RTN_NO_MEMORY;
6630 ocs_memset(dma_cmd.virt, 0, ocs_strlen(cmd)+1);
6631 ocs_memcpy(dma_cmd.virt, cmd, ocs_strlen(cmd));
6633 /* allocate DMA for response */
6634 if (ocs_dma_alloc(hw->os, &dma_resp, OCS_HW_DMTF_CLP_RSP_MAX, 4096)) {
6635 ocs_log_err(hw->os, "malloc failed\n");
6636 ocs_dma_free(hw->os, &dma_cmd);
6637 return OCS_HW_RTN_NO_MEMORY;
6640 /* send DMTF CLP command mbx and poll */
6641 if (ocs_hw_exec_dmtf_clp_cmd(hw, &dma_cmd, &dma_resp, OCS_CMD_POLL, NULL, NULL)) {
6642 ocs_log_err(hw->os, "CLP cmd=\"%s\" failed\n", (char *)dma_cmd.virt);
6643 rc = OCS_HW_RTN_ERROR;
6646 ocs_dma_free(hw->os, &dma_cmd);
6647 ocs_dma_free(hw->os, &dma_resp);
6652 * @brief Callback argument structure for the DMTF CLP commands.
6654 typedef struct ocs_hw_clp_cb_arg_s {
6655 ocs_hw_dmtf_clp_cb_t cb;
6656 ocs_dma_t *dma_resp;
6660 } ocs_hw_clp_cb_arg_t;
6663 * @brief Execute the DMTF CLP command.
6665 * @param hw Hardware context.
6666 * @param dma_cmd DMA buffer containing the CLP command.
6667 * @param dma_resp DMA buffer that will contain the response (if successful).
6668 * @param opts Mailbox command options (such as OCS_CMD_NOWAIT and POLL).
6669 * @param cb Callback function.
6670 * @param arg Callback argument.
6672 * @return Returns the number of bytes written to the response
6673 * buffer on success, or a negative value if failed.
6676 ocs_hw_exec_dmtf_clp_cmd(ocs_hw_t *hw, ocs_dma_t *dma_cmd, ocs_dma_t *dma_resp, uint32_t opts, ocs_hw_dmtf_clp_cb_t cb, void *arg)
6678 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
6679 ocs_hw_clp_cb_arg_t *cb_arg;
6682 /* allocate DMA for mailbox */
6683 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
6684 if (mbxdata == NULL) {
6685 ocs_log_err(hw->os, "failed to malloc mbox\n");
6686 return OCS_HW_RTN_NO_MEMORY;
6689 /* allocate memory for callback argument */
6690 cb_arg = ocs_malloc(hw->os, sizeof(*cb_arg), OCS_M_NOWAIT);
6691 if (cb_arg == NULL) {
6692 ocs_log_err(hw->os, "failed to malloc cb_arg");
6693 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6694 return OCS_HW_RTN_NO_MEMORY;
6699 cb_arg->dma_resp = dma_resp;
6700 cb_arg->opts = opts;
6702 /* Send the HW command */
6703 if (sli_cmd_dmtf_exec_clp_cmd(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
6704 dma_cmd, dma_resp)) {
6705 rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_dmtf_clp_cb, cb_arg);
6707 if (opts == OCS_CMD_POLL && rc == OCS_HW_RTN_SUCCESS) {
6708 /* if we're polling, copy response and invoke callback to
6710 ocs_memcpy(mbxdata, hw->sli.bmbx.virt, SLI4_BMBX_SIZE);
6711 ocs_hw_dmtf_clp_cb(hw, 0, mbxdata, cb_arg);
6713 /* set rc to resulting or "parsed" status */
6714 rc = cb_arg->status;
6717 /* if failed, or polling, free memory here */
6718 if (opts == OCS_CMD_POLL || rc != OCS_HW_RTN_SUCCESS) {
6719 if (rc != OCS_HW_RTN_SUCCESS) {
6720 ocs_log_test(hw->os, "ocs_hw_command failed\n");
6722 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6723 ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6726 ocs_log_test(hw->os, "sli_cmd_dmtf_exec_clp_cmd failed\n");
6727 rc = OCS_HW_RTN_ERROR;
6728 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6729 ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6737 * @brief Called when the DMTF CLP command completes.
6739 * @param hw Hardware context.
6740 * @param status Status field from the mbox completion.
6741 * @param mqe Mailbox response structure.
6742 * @param arg Pointer to a callback argument.
6748 ocs_hw_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
6750 int32_t cb_status = 0;
6751 sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe;
6752 sli4_res_dmtf_exec_clp_cmd_t *clp_rsp = (sli4_res_dmtf_exec_clp_cmd_t *) mbox_rsp->payload.embed;
6753 ocs_hw_clp_cb_arg_t *cb_arg = arg;
6754 uint32_t result_len = 0;
6758 /* there are several status codes here, check them all and condense
6759 * into a single callback status
6761 if (status || mbox_rsp->hdr.status || clp_rsp->clp_status) {
6762 ocs_log_debug(hw->os, "status=x%x/x%x/x%x addl=x%x clp=x%x detail=x%x\n",
6764 mbox_rsp->hdr.status,
6765 clp_rsp->hdr.status,
6766 clp_rsp->hdr.additional_status,
6767 clp_rsp->clp_status,
6768 clp_rsp->clp_detailed_status);
6771 } else if (mbox_rsp->hdr.status) {
6772 cb_status = mbox_rsp->hdr.status;
6774 cb_status = clp_rsp->clp_status;
6777 result_len = clp_rsp->resp_length;
6781 goto ocs_hw_cb_dmtf_clp_done;
6784 if ((result_len == 0) || (cb_arg->dma_resp->size < result_len)) {
6785 ocs_log_test(hw->os, "Invalid response length: resp_len=%zu result len=%d\n",
6786 cb_arg->dma_resp->size, result_len);
6788 goto ocs_hw_cb_dmtf_clp_done;
6791 /* parse CLP response to get status */
6792 stat_len = ocs_hw_clp_resp_get_value(hw, "status", stat_str,
6794 cb_arg->dma_resp->virt,
6797 if (stat_len <= 0) {
6798 ocs_log_test(hw->os, "failed to get status %d\n", stat_len);
6800 goto ocs_hw_cb_dmtf_clp_done;
6803 if (ocs_strcmp(stat_str, "0") != 0) {
6804 ocs_log_test(hw->os, "CLP status indicates failure=%s\n", stat_str);
6806 goto ocs_hw_cb_dmtf_clp_done;
6809 ocs_hw_cb_dmtf_clp_done:
6811 /* save status in cb_arg for callers with NULL cb's + polling */
6812 cb_arg->status = cb_status;
6814 cb_arg->cb(hw, cb_status, result_len, cb_arg->arg);
6816 /* if polling, caller will free memory */
6817 if (cb_arg->opts != OCS_CMD_POLL) {
6818 ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6819 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
6824 * @brief Parse the CLP result and get the value corresponding to the given
6827 * @param hw Hardware context.
6828 * @param keyword CLP keyword for which the value is returned.
6829 * @param value Location to which the resulting value is copied.
6830 * @param value_len Length of the value parameter.
6831 * @param resp Pointer to the response buffer that is searched
6832 * for the keyword and value.
6833 * @param resp_len Length of response buffer passed in.
6835 * @return Returns the number of bytes written to the value
6836 * buffer on success, or a negative vaue on failure.
6839 ocs_hw_clp_resp_get_value(ocs_hw_t *hw, const char *keyword, char *value, uint32_t value_len, const char *resp, uint32_t resp_len)
6844 /* look for specified keyword in string */
6845 start = ocs_strstr(resp, keyword);
6846 if (start == NULL) {
6847 ocs_log_test(hw->os, "could not find keyword=%s in CLP response\n",
6852 /* now look for '=' and go one past */
6853 start = ocs_strchr(start, '=');
6854 if (start == NULL) {
6855 ocs_log_test(hw->os, "could not find \'=\' in CLP response for keyword=%s\n",
6861 /* \r\n terminates value */
6862 end = ocs_strstr(start, "\r\n");
6864 ocs_log_test(hw->os, "could not find \\r\\n for keyword=%s in CLP response\n",
6869 /* make sure given result array is big enough */
6870 if ((end - start + 1) > value_len) {
6871 ocs_log_test(hw->os, "value len=%d not large enough for actual=%ld\n",
6872 value_len, (end-start));
6876 ocs_strncpy(value, start, (end - start));
6877 value[end-start] = '\0';
6878 return (end-start+1);
6882 * @brief Cause chip to enter an unrecoverable error state.
6885 * Cause chip to enter an unrecoverable error state. This is
6886 * used when detecting unexpected FW behavior so that the FW can be
6887 * hwted from the driver as soon as the error is detected.
6889 * @param hw Hardware context.
6890 * @param dump Generate dump as part of reset.
6892 * @return Returns 0 on success, or a non-zero value on failure.
6896 ocs_hw_raise_ue(ocs_hw_t *hw, uint8_t dump)
6898 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6900 if (sli_raise_ue(&hw->sli, dump) != 0) {
6901 rc = OCS_HW_RTN_ERROR;
6903 if (hw->state != OCS_HW_STATE_UNINITIALIZED) {
6904 hw->state = OCS_HW_STATE_QUEUES_ALLOCATED;
6912 * @brief Called when the OBJECT_GET command completes.
6915 * Get the number of bytes actually written out of the response, free the mailbox
6916 * that was malloc'd by ocs_hw_dump_get(), then call the callback
6917 * and pass the status and bytes read.
6919 * @param hw Hardware context.
6920 * @param status Status field from the mbox completion.
6921 * @param mqe Mailbox response structure.
6922 * @param arg Pointer to a callback function that signals the caller that the command is done.
6923 * The callback function prototype is <tt>void cb(int32_t status, uint32_t bytes_read)</tt>.
6925 * @return Returns 0.
6928 ocs_hw_cb_dump_get(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
6930 sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe;
6931 sli4_res_common_read_object_t* rd_obj_rsp = (sli4_res_common_read_object_t*) mbox_rsp->payload.embed;
6932 ocs_hw_dump_get_cb_arg_t *cb_arg = arg;
6933 uint32_t bytes_read;
6936 bytes_read = rd_obj_rsp->actual_read_length;
6937 eof = rd_obj_rsp->eof;
6941 if ((status == 0) && mbox_rsp->hdr.status) {
6942 status = mbox_rsp->hdr.status;
6944 cb_arg->cb(status, bytes_read, eof, cb_arg->arg);
6947 ocs_free(hw->os, cb_arg->mbox_cmd, SLI4_BMBX_SIZE);
6948 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_get_cb_arg_t));
6956 * @brief Read a dump image to the host.
6959 * Creates a SLI_CONFIG mailbox command, fills in the correct values to read a
6960 * dump image chunk, then sends the command with the ocs_hw_command(). On completion,
6961 * the callback function ocs_hw_cb_dump_get() gets called to free the mailbox
6962 * and signal the caller that the read has completed.
6964 * @param hw Hardware context.
6965 * @param dma DMA structure to transfer the dump chunk into.
6966 * @param size Size of the dump chunk.
6967 * @param offset Offset, in bytes, from the beginning of the dump.
6968 * @param cb Pointer to a callback function that is called when the command completes.
6969 * The callback function prototype is
6970 * <tt>void cb(int32_t status, uint32_t bytes_read, uint8_t eof, void *arg)</tt>.
6971 * @param arg Pointer to be passed to the callback function.
6973 * @return Returns 0 on success, or a non-zero value on failure.
6976 ocs_hw_dump_get(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, ocs_hw_dump_get_cb_t cb, void *arg)
6978 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
6980 ocs_hw_dump_get_cb_arg_t *cb_arg;
6981 uint32_t opts = (hw->state == OCS_HW_STATE_ACTIVE ? OCS_CMD_NOWAIT : OCS_CMD_POLL);
6983 if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
6984 ocs_log_test(hw->os, "Function only supported for I/F type 2\n");
6985 return OCS_HW_RTN_ERROR;
6988 if (1 != sli_dump_is_present(&hw->sli)) {
6989 ocs_log_test(hw->os, "No dump is present\n");
6990 return OCS_HW_RTN_ERROR;
6993 if (1 == sli_reset_required(&hw->sli)) {
6994 ocs_log_test(hw->os, "device reset required\n");
6995 return OCS_HW_RTN_ERROR;
6998 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
6999 if (mbxdata == NULL) {
7000 ocs_log_err(hw->os, "failed to malloc mbox\n");
7001 return OCS_HW_RTN_NO_MEMORY;
7004 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_dump_get_cb_arg_t), OCS_M_NOWAIT);
7005 if (cb_arg == NULL) {
7006 ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7007 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7008 return OCS_HW_RTN_NO_MEMORY;
7013 cb_arg->mbox_cmd = mbxdata;
7015 if (sli_cmd_common_read_object(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
7016 size, offset, "/dbg/dump.bin", dma)) {
7017 rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_cb_dump_get, cb_arg);
7018 if (rc == 0 && opts == OCS_CMD_POLL) {
7019 ocs_memcpy(mbxdata, hw->sli.bmbx.virt, SLI4_BMBX_SIZE);
7020 rc = ocs_hw_cb_dump_get(hw, 0, mbxdata, cb_arg);
7024 if (rc != OCS_HW_RTN_SUCCESS) {
7025 ocs_log_test(hw->os, "COMMON_READ_OBJECT failed\n");
7026 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7027 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_get_cb_arg_t));
7034 * @brief Called when the OBJECT_DELETE command completes.
7037 * Free the mailbox that was malloc'd
7038 * by ocs_hw_dump_clear(), then call the callback and pass the status.
7040 * @param hw Hardware context.
7041 * @param status Status field from the mbox completion.
7042 * @param mqe Mailbox response structure.
7043 * @param arg Pointer to a callback function that signals the caller that the command is done.
7044 * The callback function prototype is <tt>void cb(int32_t status, void *arg)</tt>.
7046 * @return Returns 0.
7049 ocs_hw_cb_dump_clear(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
7051 ocs_hw_dump_clear_cb_arg_t *cb_arg = arg;
7052 sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe;
7056 if ((status == 0) && mbox_rsp->hdr.status) {
7057 status = mbox_rsp->hdr.status;
7059 cb_arg->cb(status, cb_arg->arg);
7062 ocs_free(hw->os, cb_arg->mbox_cmd, SLI4_BMBX_SIZE);
7063 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_clear_cb_arg_t));
7070 * @brief Clear a dump image from the device.
7073 * Creates a SLI_CONFIG mailbox command, fills it with the correct values to clear
7074 * the dump, then sends the command with ocs_hw_command(). On completion,
7075 * the callback function ocs_hw_cb_dump_clear() gets called to free the mailbox
7076 * and to signal the caller that the write has completed.
7078 * @param hw Hardware context.
7079 * @param cb Pointer to a callback function that is called when the command completes.
7080 * The callback function prototype is
7081 * <tt>void cb(int32_t status, uint32_t bytes_written, void *arg)</tt>.
7082 * @param arg Pointer to be passed to the callback function.
7084 * @return Returns 0 on success, or a non-zero value on failure.
7087 ocs_hw_dump_clear(ocs_hw_t *hw, ocs_hw_dump_clear_cb_t cb, void *arg)
7089 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
7091 ocs_hw_dump_clear_cb_arg_t *cb_arg;
7092 uint32_t opts = (hw->state == OCS_HW_STATE_ACTIVE ? OCS_CMD_NOWAIT : OCS_CMD_POLL);
7094 if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
7095 ocs_log_test(hw->os, "Function only supported for I/F type 2\n");
7096 return OCS_HW_RTN_ERROR;
7099 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7100 if (mbxdata == NULL) {
7101 ocs_log_err(hw->os, "failed to malloc mbox\n");
7102 return OCS_HW_RTN_NO_MEMORY;
7105 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_dump_clear_cb_arg_t), OCS_M_NOWAIT);
7106 if (cb_arg == NULL) {
7107 ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7108 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7109 return OCS_HW_RTN_NO_MEMORY;
7114 cb_arg->mbox_cmd = mbxdata;
7116 if (sli_cmd_common_delete_object(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
7118 rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_cb_dump_clear, cb_arg);
7119 if (rc == 0 && opts == OCS_CMD_POLL) {
7120 ocs_memcpy(mbxdata, hw->sli.bmbx.virt, SLI4_BMBX_SIZE);
7121 rc = ocs_hw_cb_dump_clear(hw, 0, mbxdata, cb_arg);
7125 if (rc != OCS_HW_RTN_SUCCESS) {
7126 ocs_log_test(hw->os, "COMMON_DELETE_OBJECT failed\n");
7127 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7128 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_clear_cb_arg_t));
7134 typedef struct ocs_hw_get_port_protocol_cb_arg_s {
7135 ocs_get_port_protocol_cb_t cb;
7139 } ocs_hw_get_port_protocol_cb_arg_t;
7142 * @brief Called for the completion of get_port_profile for a
7145 * @param hw Hardware context.
7146 * @param status The status from the MQE.
7147 * @param mqe Pointer to mailbox command buffer.
7148 * @param arg Pointer to a callback argument.
7150 * @return Returns 0 on success, or a non-zero value on failure.
7153 ocs_hw_get_port_protocol_cb(ocs_hw_t *hw, int32_t status,
7154 uint8_t *mqe, void *arg)
7156 ocs_hw_get_port_protocol_cb_arg_t *cb_arg = arg;
7157 ocs_dma_t *payload = &(cb_arg->payload);
7158 sli4_res_common_get_profile_config_t* response = (sli4_res_common_get_profile_config_t*) payload->virt;
7159 ocs_hw_port_protocol_e port_protocol;
7160 int num_descriptors;
7161 sli4_resource_descriptor_v1_t *desc_p;
7162 sli4_pcie_resource_descriptor_v1_t *pcie_desc_p;
7165 port_protocol = OCS_HW_PORT_PROTOCOL_OTHER;
7167 num_descriptors = response->desc_count;
7168 desc_p = (sli4_resource_descriptor_v1_t *)response->desc;
7169 for (i=0; i<num_descriptors; i++) {
7170 if (desc_p->descriptor_type == SLI4_RESOURCE_DESCRIPTOR_TYPE_PCIE) {
7171 pcie_desc_p = (sli4_pcie_resource_descriptor_v1_t*) desc_p;
7172 if (pcie_desc_p->pf_number == cb_arg->pci_func) {
7173 switch(pcie_desc_p->pf_type) {
7175 port_protocol = OCS_HW_PORT_PROTOCOL_ISCSI;
7178 port_protocol = OCS_HW_PORT_PROTOCOL_FCOE;
7181 port_protocol = OCS_HW_PORT_PROTOCOL_FC;
7184 port_protocol = OCS_HW_PORT_PROTOCOL_OTHER;
7190 desc_p = (sli4_resource_descriptor_v1_t *) ((uint8_t *)desc_p + desc_p->descriptor_length);
7194 cb_arg->cb(status, port_protocol, cb_arg->arg);
7198 ocs_dma_free(hw->os, &cb_arg->payload);
7199 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_port_protocol_cb_arg_t));
7200 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7207 * @brief Get the current port protocol.
7209 * Issues a SLI4 COMMON_GET_PROFILE_CONFIG mailbox. When the
7210 * command completes the provided mgmt callback function is
7213 * @param hw Hardware context.
7214 * @param pci_func PCI function to query for current protocol.
7215 * @param cb Callback function to be called when the command completes.
7216 * @param ul_arg An argument that is passed to the callback function.
7219 * - OCS_HW_RTN_SUCCESS on success.
7220 * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
7221 * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
7223 * - OCS_HW_RTN_ERROR on any other error.
7226 ocs_hw_get_port_protocol(ocs_hw_t *hw, uint32_t pci_func,
7227 ocs_get_port_protocol_cb_t cb, void* ul_arg)
7230 ocs_hw_get_port_protocol_cb_arg_t *cb_arg;
7231 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
7233 /* Only supported on Skyhawk */
7234 if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) {
7235 return OCS_HW_RTN_ERROR;
7238 /* mbxdata holds the header of the command */
7239 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7240 if (mbxdata == NULL) {
7241 ocs_log_err(hw->os, "failed to malloc mbox\n");
7242 return OCS_HW_RTN_NO_MEMORY;
7246 /* cb_arg holds the data that will be passed to the callback on completion */
7247 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_port_protocol_cb_arg_t), OCS_M_NOWAIT);
7248 if (cb_arg == NULL) {
7249 ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7250 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7251 return OCS_HW_RTN_NO_MEMORY;
7255 cb_arg->arg = ul_arg;
7256 cb_arg->pci_func = pci_func;
7258 /* dma_mem holds the non-embedded portion */
7259 if (ocs_dma_alloc(hw->os, &cb_arg->payload, 4096, 4)) {
7260 ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
7261 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7262 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_port_protocol_cb_arg_t));
7263 return OCS_HW_RTN_NO_MEMORY;
7266 if (sli_cmd_common_get_profile_config(&hw->sli, mbxdata, SLI4_BMBX_SIZE, &cb_arg->payload)) {
7267 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_port_protocol_cb, cb_arg);
7270 if (rc != OCS_HW_RTN_SUCCESS) {
7271 ocs_log_test(hw->os, "GET_PROFILE_CONFIG failed\n");
7272 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7273 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t));
7274 ocs_dma_free(hw->os, &cb_arg->payload);
7281 typedef struct ocs_hw_set_port_protocol_cb_arg_s {
7282 ocs_set_port_protocol_cb_t cb;
7285 uint32_t new_protocol;
7287 } ocs_hw_set_port_protocol_cb_arg_t;
7290 * @brief Called for the completion of set_port_profile for a
7294 * This is the second of two callbacks for the set_port_protocol
7295 * function. The set operation is a read-modify-write. This
7296 * callback is called when the write (SET_PROFILE_CONFIG)
7299 * @param hw Hardware context.
7300 * @param status The status from the MQE.
7301 * @param mqe Pointer to mailbox command buffer.
7302 * @param arg Pointer to a callback argument.
7304 * @return 0 on success, non-zero otherwise
7307 ocs_hw_set_port_protocol_cb2(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
7309 ocs_hw_set_port_protocol_cb_arg_t *cb_arg = arg;
7312 cb_arg->cb( status, cb_arg->arg);
7315 ocs_dma_free(hw->os, &(cb_arg->payload));
7316 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7317 ocs_free(hw->os, arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t));
7323 * @brief Called for the completion of set_port_profile for a
7327 * This is the first of two callbacks for the set_port_protocol
7328 * function. The set operation is a read-modify-write. This
7329 * callback is called when the read completes
7330 * (GET_PROFILE_CONFG). It will updated the resource
7331 * descriptors, then queue the write (SET_PROFILE_CONFIG).
7333 * On entry there are three memory areas that were allocated by
7334 * ocs_hw_set_port_protocol. If a failure is detected in this
7335 * function those need to be freed. If this function succeeds
7336 * it allocates three more areas.
7338 * @param hw Hardware context.
7339 * @param status The status from the MQE
7340 * @param mqe Pointer to mailbox command buffer.
7341 * @param arg Pointer to a callback argument.
7343 * @return Returns 0 on success, or a non-zero value otherwise.
7346 ocs_hw_set_port_protocol_cb1(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
7348 ocs_hw_set_port_protocol_cb_arg_t *cb_arg = arg;
7349 ocs_dma_t *payload = &(cb_arg->payload);
7350 sli4_res_common_get_profile_config_t* response = (sli4_res_common_get_profile_config_t*) payload->virt;
7351 int num_descriptors;
7352 sli4_resource_descriptor_v1_t *desc_p;
7353 sli4_pcie_resource_descriptor_v1_t *pcie_desc_p;
7355 ocs_hw_set_port_protocol_cb_arg_t *new_cb_arg;
7356 ocs_hw_port_protocol_e new_protocol;
7358 sli4_isap_resouce_descriptor_v1_t *isap_desc_p;
7360 int pci_descriptor_count;
7361 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
7362 int num_fcoe_ports = 0;
7363 int num_iscsi_ports = 0;
7365 new_protocol = (ocs_hw_port_protocol_e)cb_arg->new_protocol;
7367 num_descriptors = response->desc_count;
7369 /* Count PCI descriptors */
7370 pci_descriptor_count = 0;
7371 desc_p = (sli4_resource_descriptor_v1_t *)response->desc;
7372 for (i=0; i<num_descriptors; i++) {
7373 if (desc_p->descriptor_type == SLI4_RESOURCE_DESCRIPTOR_TYPE_PCIE) {
7374 ++pci_descriptor_count;
7376 desc_p = (sli4_resource_descriptor_v1_t *) ((uint8_t *)desc_p + desc_p->descriptor_length);
7379 /* mbxdata holds the header of the command */
7380 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7381 if (mbxdata == NULL) {
7382 ocs_log_err(hw->os, "failed to malloc mbox\n");
7383 return OCS_HW_RTN_NO_MEMORY;
7387 /* cb_arg holds the data that will be passed to the callback on completion */
7388 new_cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_port_protocol_cb_arg_t), OCS_M_NOWAIT);
7389 if (new_cb_arg == NULL) {
7390 ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7391 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7392 return OCS_HW_RTN_NO_MEMORY;
7395 new_cb_arg->cb = cb_arg->cb;
7396 new_cb_arg->arg = cb_arg->arg;
7398 /* Allocate memory for the descriptors we're going to send. This is
7399 * one for each PCI descriptor plus one ISAP descriptor. */
7400 if (ocs_dma_alloc(hw->os, &new_cb_arg->payload, sizeof(sli4_req_common_set_profile_config_t) +
7401 (pci_descriptor_count * sizeof(sli4_pcie_resource_descriptor_v1_t)) +
7402 sizeof(sli4_isap_resouce_descriptor_v1_t), 4)) {
7403 ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
7404 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7405 ocs_free(hw->os, new_cb_arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t));
7406 return OCS_HW_RTN_NO_MEMORY;
7409 sli_cmd_common_set_profile_config(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
7410 &new_cb_arg->payload,
7411 0, pci_descriptor_count+1, 1);
7413 /* Point dst to the first descriptor entry in the SET_PROFILE_CONFIG command */
7414 dst = (uint8_t *)&(((sli4_req_common_set_profile_config_t *) new_cb_arg->payload.virt)->desc);
7416 /* Loop over all descriptors. If the descriptor is a PCIe descriptor, copy it
7417 * to the SET_PROFILE_CONFIG command to be written back. If it's the descriptor
7418 * that we're trying to change also set its pf_type.
7420 desc_p = (sli4_resource_descriptor_v1_t *)response->desc;
7421 for (i=0; i<num_descriptors; i++) {
7422 if (desc_p->descriptor_type == SLI4_RESOURCE_DESCRIPTOR_TYPE_PCIE) {
7423 pcie_desc_p = (sli4_pcie_resource_descriptor_v1_t*) desc_p;
7424 if (pcie_desc_p->pf_number == cb_arg->pci_func) {
7425 /* This is the PCIe descriptor for this OCS instance.
7426 * Update it with the new pf_type */
7427 switch(new_protocol) {
7428 case OCS_HW_PORT_PROTOCOL_FC:
7429 pcie_desc_p->pf_type = SLI4_PROTOCOL_FC;
7431 case OCS_HW_PORT_PROTOCOL_FCOE:
7432 pcie_desc_p->pf_type = SLI4_PROTOCOL_FCOE;
7434 case OCS_HW_PORT_PROTOCOL_ISCSI:
7435 pcie_desc_p->pf_type = SLI4_PROTOCOL_ISCSI;
7438 pcie_desc_p->pf_type = SLI4_PROTOCOL_DEFAULT;
7444 if (pcie_desc_p->pf_type == SLI4_PROTOCOL_FCOE) {
7447 if (pcie_desc_p->pf_type == SLI4_PROTOCOL_ISCSI) {
7450 ocs_memcpy(dst, pcie_desc_p, sizeof(sli4_pcie_resource_descriptor_v1_t));
7451 dst += sizeof(sli4_pcie_resource_descriptor_v1_t);
7454 desc_p = (sli4_resource_descriptor_v1_t *) ((uint8_t *)desc_p + desc_p->descriptor_length);
7457 /* Create an ISAP resource descriptor */
7458 isap_desc_p = (sli4_isap_resouce_descriptor_v1_t*)dst;
7459 isap_desc_p->descriptor_type = SLI4_RESOURCE_DESCRIPTOR_TYPE_ISAP;
7460 isap_desc_p->descriptor_length = sizeof(sli4_isap_resouce_descriptor_v1_t);
7461 if (num_iscsi_ports > 0) {
7462 isap_desc_p->iscsi_tgt = 1;
7463 isap_desc_p->iscsi_ini = 1;
7464 isap_desc_p->iscsi_dif = 1;
7466 if (num_fcoe_ports > 0) {
7467 isap_desc_p->fcoe_tgt = 1;
7468 isap_desc_p->fcoe_ini = 1;
7469 isap_desc_p->fcoe_dif = 1;
7472 /* At this point we're done with the memory allocated by ocs_port_set_protocol */
7473 ocs_dma_free(hw->os, &cb_arg->payload);
7474 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7475 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t));
7478 /* Send a SET_PROFILE_CONFIG mailbox command with the new descriptors */
7479 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_port_protocol_cb2, new_cb_arg);
7481 ocs_log_err(hw->os, "Error posting COMMON_SET_PROFILE_CONFIG\n");
7482 /* Call the upper level callback to report a failure */
7483 if (new_cb_arg->cb) {
7484 new_cb_arg->cb( rc, new_cb_arg->arg);
7487 /* Free the memory allocated by this function */
7488 ocs_dma_free(hw->os, &new_cb_arg->payload);
7489 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7490 ocs_free(hw->os, new_cb_arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t));
7499 * @brief Set the port protocol.
7501 * Setting the port protocol is a read-modify-write operation.
7502 * This function submits a GET_PROFILE_CONFIG command to read
7503 * the current settings. The callback function will modify the
7504 * settings and issue the write.
7506 * On successful completion this function will have allocated
7507 * two regular memory areas and one dma area which will need to
7508 * get freed later in the callbacks.
7510 * @param hw Hardware context.
7511 * @param new_protocol New protocol to use.
7512 * @param pci_func PCI function to configure.
7513 * @param cb Callback function to be called when the command completes.
7514 * @param ul_arg An argument that is passed to the callback function.
7517 * - OCS_HW_RTN_SUCCESS on success.
7518 * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
7519 * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
7521 * - OCS_HW_RTN_ERROR on any other error.
7524 ocs_hw_set_port_protocol(ocs_hw_t *hw, ocs_hw_port_protocol_e new_protocol,
7525 uint32_t pci_func, ocs_set_port_protocol_cb_t cb, void *ul_arg)
7528 ocs_hw_set_port_protocol_cb_arg_t *cb_arg;
7529 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
7531 /* Only supported on Skyhawk */
7532 if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) {
7533 return OCS_HW_RTN_ERROR;
7536 /* mbxdata holds the header of the command */
7537 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7538 if (mbxdata == NULL) {
7539 ocs_log_err(hw->os, "failed to malloc mbox\n");
7540 return OCS_HW_RTN_NO_MEMORY;
7544 /* cb_arg holds the data that will be passed to the callback on completion */
7545 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_port_protocol_cb_arg_t), OCS_M_NOWAIT);
7546 if (cb_arg == NULL) {
7547 ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7548 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7549 return OCS_HW_RTN_NO_MEMORY;
7553 cb_arg->arg = ul_arg;
7554 cb_arg->new_protocol = new_protocol;
7555 cb_arg->pci_func = pci_func;
7557 /* dma_mem holds the non-embedded portion */
7558 if (ocs_dma_alloc(hw->os, &cb_arg->payload, 4096, 4)) {
7559 ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
7560 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7561 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_port_protocol_cb_arg_t));
7562 return OCS_HW_RTN_NO_MEMORY;
7565 if (sli_cmd_common_get_profile_config(&hw->sli, mbxdata, SLI4_BMBX_SIZE, &cb_arg->payload)) {
7566 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_port_protocol_cb1, cb_arg);
7569 if (rc != OCS_HW_RTN_SUCCESS) {
7570 ocs_log_test(hw->os, "GET_PROFILE_CONFIG failed\n");
7571 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7572 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t));
7573 ocs_dma_free(hw->os, &cb_arg->payload);
7579 typedef struct ocs_hw_get_profile_list_cb_arg_s {
7580 ocs_get_profile_list_cb_t cb;
7583 } ocs_hw_get_profile_list_cb_arg_t;
7586 * @brief Called for the completion of get_profile_list for a
7589 * This function is called when the COMMMON_GET_PROFILE_LIST
7590 * mailbox completes. The response will be in
7591 * ctx->non_embedded_mem.virt. This function parses the
7592 * response and creates a ocs_hw_profile_list, then calls the
7593 * mgmt_cb callback function and passes that list to it.
7595 * @param hw Hardware context.
7596 * @param status The status from the MQE
7597 * @param mqe Pointer to mailbox command buffer.
7598 * @param arg Pointer to a callback argument.
7600 * @return Returns 0 on success, or a non-zero value on failure.
7603 ocs_hw_get_profile_list_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
7605 ocs_hw_profile_list_t *list;
7606 ocs_hw_get_profile_list_cb_arg_t *cb_arg = arg;
7607 ocs_dma_t *payload = &(cb_arg->payload);
7608 sli4_res_common_get_profile_list_t *response = (sli4_res_common_get_profile_list_t *)payload->virt;
7610 int num_descriptors;
7612 list = ocs_malloc(hw->os, sizeof(ocs_hw_profile_list_t), OCS_M_ZERO);
7613 list->num_descriptors = response->profile_descriptor_count;
7615 num_descriptors = list->num_descriptors;
7616 if (num_descriptors > OCS_HW_MAX_PROFILES) {
7617 num_descriptors = OCS_HW_MAX_PROFILES;
7620 for (i=0; i<num_descriptors; i++) {
7621 list->descriptors[i].profile_id = response->profile_descriptor[i].profile_id;
7622 list->descriptors[i].profile_index = response->profile_descriptor[i].profile_index;
7623 ocs_strcpy(list->descriptors[i].profile_description, (char *)response->profile_descriptor[i].profile_description);
7627 cb_arg->cb(status, list, cb_arg->arg);
7629 ocs_free(hw->os, list, sizeof(*list));
7632 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7633 ocs_dma_free(hw->os, &cb_arg->payload);
7634 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_profile_list_cb_arg_t));
7641 * @brief Get a list of available profiles.
7643 * Issues a SLI-4 COMMON_GET_PROFILE_LIST mailbox. When the
7644 * command completes the provided mgmt callback function is
7647 * @param hw Hardware context.
7648 * @param cb Callback function to be called when the
7649 * command completes.
7650 * @param ul_arg An argument that is passed to the callback
7654 * - OCS_HW_RTN_SUCCESS on success.
7655 * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
7656 * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
7658 * - OCS_HW_RTN_ERROR on any other error.
7661 ocs_hw_get_profile_list(ocs_hw_t *hw, ocs_get_profile_list_cb_t cb, void* ul_arg)
7664 ocs_hw_get_profile_list_cb_arg_t *cb_arg;
7665 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
7667 /* Only supported on Skyhawk */
7668 if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) {
7669 return OCS_HW_RTN_ERROR;
7672 /* mbxdata holds the header of the command */
7673 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7674 if (mbxdata == NULL) {
7675 ocs_log_err(hw->os, "failed to malloc mbox\n");
7676 return OCS_HW_RTN_NO_MEMORY;
7680 /* cb_arg holds the data that will be passed to the callback on completion */
7681 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_profile_list_cb_arg_t), OCS_M_NOWAIT);
7682 if (cb_arg == NULL) {
7683 ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7684 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7685 return OCS_HW_RTN_NO_MEMORY;
7689 cb_arg->arg = ul_arg;
7691 /* dma_mem holds the non-embedded portion */
7692 if (ocs_dma_alloc(hw->os, &cb_arg->payload, sizeof(sli4_res_common_get_profile_list_t), 4)) {
7693 ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
7694 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7695 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_profile_list_cb_arg_t));
7696 return OCS_HW_RTN_NO_MEMORY;
7699 if (sli_cmd_common_get_profile_list(&hw->sli, mbxdata, SLI4_BMBX_SIZE, 0, &cb_arg->payload)) {
7700 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_profile_list_cb, cb_arg);
7703 if (rc != OCS_HW_RTN_SUCCESS) {
7704 ocs_log_test(hw->os, "GET_PROFILE_LIST failed\n");
7705 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7706 ocs_dma_free(hw->os, &cb_arg->payload);
7707 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_profile_list_cb_arg_t));
7713 typedef struct ocs_hw_get_active_profile_cb_arg_s {
7714 ocs_get_active_profile_cb_t cb;
7716 } ocs_hw_get_active_profile_cb_arg_t;
7719 * @brief Called for the completion of get_active_profile for a
7722 * @param hw Hardware context.
7723 * @param status The status from the MQE
7724 * @param mqe Pointer to mailbox command buffer.
7725 * @param arg Pointer to a callback argument.
7727 * @return Returns 0 on success, or a non-zero value on failure.
7730 ocs_hw_get_active_profile_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
7732 ocs_hw_get_active_profile_cb_arg_t *cb_arg = arg;
7733 sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe;
7734 sli4_res_common_get_active_profile_t* response = (sli4_res_common_get_active_profile_t*) mbox_rsp->payload.embed;
7735 uint32_t active_profile;
7737 active_profile = response->active_profile_id;
7740 cb_arg->cb(status, active_profile, cb_arg->arg);
7743 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7744 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_active_profile_cb_arg_t));
7751 * @brief Get the currently active profile.
7753 * Issues a SLI-4 COMMON_GET_ACTIVE_PROFILE mailbox. When the
7754 * command completes the provided mgmt callback function is
7757 * @param hw Hardware context.
7758 * @param cb Callback function to be called when the
7759 * command completes.
7760 * @param ul_arg An argument that is passed to the callback
7764 * - OCS_HW_RTN_SUCCESS on success.
7765 * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
7766 * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
7768 * - OCS_HW_RTN_ERROR on any other error.
7771 ocs_hw_get_active_profile(ocs_hw_t *hw, ocs_get_active_profile_cb_t cb, void* ul_arg)
7774 ocs_hw_get_active_profile_cb_arg_t *cb_arg;
7775 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
7777 /* Only supported on Skyhawk */
7778 if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) {
7779 return OCS_HW_RTN_ERROR;
7782 /* mbxdata holds the header of the command */
7783 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7784 if (mbxdata == NULL) {
7785 ocs_log_err(hw->os, "failed to malloc mbox\n");
7786 return OCS_HW_RTN_NO_MEMORY;
7789 /* cb_arg holds the data that will be passed to the callback on completion */
7790 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_active_profile_cb_arg_t), OCS_M_NOWAIT);
7791 if (cb_arg == NULL) {
7792 ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7793 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7794 return OCS_HW_RTN_NO_MEMORY;
7798 cb_arg->arg = ul_arg;
7800 if (sli_cmd_common_get_active_profile(&hw->sli, mbxdata, SLI4_BMBX_SIZE)) {
7801 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_active_profile_cb, cb_arg);
7804 if (rc != OCS_HW_RTN_SUCCESS) {
7805 ocs_log_test(hw->os, "GET_ACTIVE_PROFILE failed\n");
7806 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7807 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_active_profile_cb_arg_t));
7813 typedef struct ocs_hw_get_nvparms_cb_arg_s {
7814 ocs_get_nvparms_cb_t cb;
7816 } ocs_hw_get_nvparms_cb_arg_t;
7819 * @brief Called for the completion of get_nvparms for a
7822 * @param hw Hardware context.
7823 * @param status The status from the MQE.
7824 * @param mqe Pointer to mailbox command buffer.
7825 * @param arg Pointer to a callback argument.
7827 * @return 0 on success, non-zero otherwise
7830 ocs_hw_get_nvparms_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
7832 ocs_hw_get_nvparms_cb_arg_t *cb_arg = arg;
7833 sli4_cmd_read_nvparms_t* mbox_rsp = (sli4_cmd_read_nvparms_t*) mqe;
7836 cb_arg->cb(status, mbox_rsp->wwpn, mbox_rsp->wwnn, mbox_rsp->hard_alpa,
7837 mbox_rsp->preferred_d_id, cb_arg->arg);
7840 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7841 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_nvparms_cb_arg_t));
7848 * @brief Read non-volatile parms.
7850 * Issues a SLI-4 READ_NVPARMS mailbox. When the
7851 * command completes the provided mgmt callback function is
7854 * @param hw Hardware context.
7855 * @param cb Callback function to be called when the
7856 * command completes.
7857 * @param ul_arg An argument that is passed to the callback
7861 * - OCS_HW_RTN_SUCCESS on success.
7862 * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
7863 * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
7865 * - OCS_HW_RTN_ERROR on any other error.
7868 ocs_hw_get_nvparms(ocs_hw_t *hw, ocs_get_nvparms_cb_t cb, void* ul_arg)
7871 ocs_hw_get_nvparms_cb_arg_t *cb_arg;
7872 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
7874 /* mbxdata holds the header of the command */
7875 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7876 if (mbxdata == NULL) {
7877 ocs_log_err(hw->os, "failed to malloc mbox\n");
7878 return OCS_HW_RTN_NO_MEMORY;
7881 /* cb_arg holds the data that will be passed to the callback on completion */
7882 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_nvparms_cb_arg_t), OCS_M_NOWAIT);
7883 if (cb_arg == NULL) {
7884 ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7885 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7886 return OCS_HW_RTN_NO_MEMORY;
7890 cb_arg->arg = ul_arg;
7892 if (sli_cmd_read_nvparms(&hw->sli, mbxdata, SLI4_BMBX_SIZE)) {
7893 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_nvparms_cb, cb_arg);
7896 if (rc != OCS_HW_RTN_SUCCESS) {
7897 ocs_log_test(hw->os, "READ_NVPARMS failed\n");
7898 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7899 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_nvparms_cb_arg_t));
7905 typedef struct ocs_hw_set_nvparms_cb_arg_s {
7906 ocs_set_nvparms_cb_t cb;
7908 } ocs_hw_set_nvparms_cb_arg_t;
7911 * @brief Called for the completion of set_nvparms for a
7914 * @param hw Hardware context.
7915 * @param status The status from the MQE.
7916 * @param mqe Pointer to mailbox command buffer.
7917 * @param arg Pointer to a callback argument.
7919 * @return Returns 0 on success, or a non-zero value on failure.
7922 ocs_hw_set_nvparms_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
7924 ocs_hw_set_nvparms_cb_arg_t *cb_arg = arg;
7927 cb_arg->cb(status, cb_arg->arg);
7930 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7931 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_nvparms_cb_arg_t));
7938 * @brief Write non-volatile parms.
7940 * Issues a SLI-4 WRITE_NVPARMS mailbox. When the
7941 * command completes the provided mgmt callback function is
7944 * @param hw Hardware context.
7945 * @param cb Callback function to be called when the
7946 * command completes.
7947 * @param wwpn Port's WWPN in big-endian order, or NULL to use default.
7948 * @param wwnn Port's WWNN in big-endian order, or NULL to use default.
7949 * @param hard_alpa A hard AL_PA address setting used during loop
7950 * initialization. If no hard AL_PA is required, set to 0.
7951 * @param preferred_d_id A preferred D_ID address setting
7952 * that may be overridden with the CONFIG_LINK mailbox command.
7953 * If there is no preference, set to 0.
7954 * @param ul_arg An argument that is passed to the callback
7958 * - OCS_HW_RTN_SUCCESS on success.
7959 * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
7960 * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
7962 * - OCS_HW_RTN_ERROR on any other error.
7965 ocs_hw_set_nvparms(ocs_hw_t *hw, ocs_set_nvparms_cb_t cb, uint8_t *wwpn,
7966 uint8_t *wwnn, uint8_t hard_alpa, uint32_t preferred_d_id, void* ul_arg)
7969 ocs_hw_set_nvparms_cb_arg_t *cb_arg;
7970 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
7972 /* mbxdata holds the header of the command */
7973 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7974 if (mbxdata == NULL) {
7975 ocs_log_err(hw->os, "failed to malloc mbox\n");
7976 return OCS_HW_RTN_NO_MEMORY;
7979 /* cb_arg holds the data that will be passed to the callback on completion */
7980 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_nvparms_cb_arg_t), OCS_M_NOWAIT);
7981 if (cb_arg == NULL) {
7982 ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7983 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7984 return OCS_HW_RTN_NO_MEMORY;
7988 cb_arg->arg = ul_arg;
7990 if (sli_cmd_write_nvparms(&hw->sli, mbxdata, SLI4_BMBX_SIZE, wwpn, wwnn, hard_alpa, preferred_d_id)) {
7991 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_nvparms_cb, cb_arg);
7994 if (rc != OCS_HW_RTN_SUCCESS) {
7995 ocs_log_test(hw->os, "SET_NVPARMS failed\n");
7996 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7997 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_nvparms_cb_arg_t));
8006 * @brief Called to obtain the count for the specified type.
8008 * @param hw Hardware context.
8009 * @param io_count_type IO count type (inuse, free, wait_free).
8011 * @return Returns the number of IOs on the specified list type.
8014 ocs_hw_io_get_count(ocs_hw_t *hw, ocs_hw_io_count_type_e io_count_type)
8016 ocs_hw_io_t *io = NULL;
8019 ocs_lock(&hw->io_lock);
8021 switch (io_count_type) {
8022 case OCS_HW_IO_INUSE_COUNT :
8023 ocs_list_foreach(&hw->io_inuse, io) {
8027 case OCS_HW_IO_FREE_COUNT :
8028 ocs_list_foreach(&hw->io_free, io) {
8032 case OCS_HW_IO_WAIT_FREE_COUNT :
8033 ocs_list_foreach(&hw->io_wait_free, io) {
8037 case OCS_HW_IO_PORT_OWNED_COUNT:
8038 ocs_list_foreach(&hw->io_port_owned, io) {
8042 case OCS_HW_IO_N_TOTAL_IO_COUNT :
8043 count = hw->config.n_io;
8047 ocs_unlock(&hw->io_lock);
8053 * @brief Called to obtain the count of produced RQs.
8055 * @param hw Hardware context.
8057 * @return Returns the number of RQs produced.
8060 ocs_hw_get_rqes_produced_count(ocs_hw_t *hw)
8066 for (i = 0; i < hw->hw_rq_count; i++) {
8067 hw_rq_t *rq = hw->hw_rq[i];
8068 if (rq->rq_tracker != NULL) {
8069 for (j = 0; j < rq->entry_count; j++) {
8070 if (rq->rq_tracker[j] != NULL) {
8080 typedef struct ocs_hw_set_active_profile_cb_arg_s {
8081 ocs_set_active_profile_cb_t cb;
8083 } ocs_hw_set_active_profile_cb_arg_t;
8086 * @brief Called for the completion of set_active_profile for a
8089 * @param hw Hardware context.
8090 * @param status The status from the MQE
8091 * @param mqe Pointer to mailbox command buffer.
8092 * @param arg Pointer to a callback argument.
8094 * @return Returns 0 on success, or a non-zero value on failure.
8097 ocs_hw_set_active_profile_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
8099 ocs_hw_set_active_profile_cb_arg_t *cb_arg = arg;
8102 cb_arg->cb(status, cb_arg->arg);
8105 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
8106 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_active_profile_cb_arg_t));
8113 * @brief Set the currently active profile.
8115 * Issues a SLI4 COMMON_GET_ACTIVE_PROFILE mailbox. When the
8116 * command completes the provided mgmt callback function is
8119 * @param hw Hardware context.
8120 * @param profile_id Profile ID to activate.
8121 * @param cb Callback function to be called when the command completes.
8122 * @param ul_arg An argument that is passed to the callback function.
8125 * - OCS_HW_RTN_SUCCESS on success.
8126 * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
8127 * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
8129 * - OCS_HW_RTN_ERROR on any other error.
8132 ocs_hw_set_active_profile(ocs_hw_t *hw, ocs_set_active_profile_cb_t cb, uint32_t profile_id, void* ul_arg)
8135 ocs_hw_set_active_profile_cb_arg_t *cb_arg;
8136 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
8138 /* Only supported on Skyhawk */
8139 if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) {
8140 return OCS_HW_RTN_ERROR;
8143 /* mbxdata holds the header of the command */
8144 mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
8145 if (mbxdata == NULL) {
8146 ocs_log_err(hw->os, "failed to malloc mbox\n");
8147 return OCS_HW_RTN_NO_MEMORY;
8151 /* cb_arg holds the data that will be passed to the callback on completion */
8152 cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_active_profile_cb_arg_t), OCS_M_NOWAIT);
8153 if (cb_arg == NULL) {
8154 ocs_log_err(hw->os, "failed to malloc cb_arg\n");
8155 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
8156 return OCS_HW_RTN_NO_MEMORY;
8160 cb_arg->arg = ul_arg;
8162 if (sli_cmd_common_set_active_profile(&hw->sli, mbxdata, SLI4_BMBX_SIZE, 0, profile_id)) {
8163 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_active_profile_cb, cb_arg);
8166 if (rc != OCS_HW_RTN_SUCCESS) {
8167 ocs_log_test(hw->os, "SET_ACTIVE_PROFILE failed\n");
8168 ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
8169 ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_active_profile_cb_arg_t));
8182 * @brief Update the queue hash with the ID and index.
8184 * @param hash Pointer to hash table.
8185 * @param id ID that was created.
8186 * @param index The index into the hash object.
8189 ocs_hw_queue_hash_add(ocs_queue_hash_t *hash, uint16_t id, uint16_t index)
8191 uint32_t hash_index = id & (OCS_HW_Q_HASH_SIZE - 1);
8194 * Since the hash is always bigger than the number of queues, then we
8195 * never have to worry about an infinite loop.
8197 while(hash[hash_index].in_use) {
8198 hash_index = (hash_index + 1) & (OCS_HW_Q_HASH_SIZE - 1);
8201 /* not used, claim the entry */
8202 hash[hash_index].id = id;
8203 hash[hash_index].in_use = 1;
8204 hash[hash_index].index = index;
8208 * @brief Find index given queue ID.
8210 * @param hash Pointer to hash table.
8211 * @param id ID to find.
8213 * @return Returns the index into the HW cq array or -1 if not found.
8216 ocs_hw_queue_hash_find(ocs_queue_hash_t *hash, uint16_t id)
8219 int32_t index = id & (OCS_HW_Q_HASH_SIZE - 1);
8222 * Since the hash is always bigger than the maximum number of Qs, then we
8223 * never have to worry about an infinite loop. We will always find an
8227 if (hash[index].in_use &&
8228 hash[index].id == id) {
8229 rc = hash[index].index;
8231 index = (index + 1) & (OCS_HW_Q_HASH_SIZE - 1);
8233 } while(rc == -1 && hash[index].in_use);
8239 ocs_hw_domain_add(ocs_hw_t *hw, ocs_domain_t *domain)
8241 int32_t rc = OCS_HW_RTN_ERROR;
8242 uint16_t fcfi = UINT16_MAX;
8244 if ((hw == NULL) || (domain == NULL)) {
8245 ocs_log_err(NULL, "bad parameter hw=%p domain=%p\n",
8247 return OCS_HW_RTN_ERROR;
8250 fcfi = domain->fcf_indicator;
8252 if (fcfi < SLI4_MAX_FCFI) {
8253 uint16_t fcf_index = UINT16_MAX;
8255 ocs_log_debug(hw->os, "adding domain %p @ %#x\n",
8257 hw->domains[fcfi] = domain;
8259 /* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */
8260 if (hw->workaround.override_fcfi) {
8261 if (hw->first_domain_idx < 0) {
8262 hw->first_domain_idx = fcfi;
8266 fcf_index = domain->fcf;
8268 if (fcf_index < SLI4_MAX_FCF_INDEX) {
8269 ocs_log_debug(hw->os, "adding map of FCF index %d to FCFI %d\n",
8271 hw->fcf_index_fcfi[fcf_index] = fcfi;
8272 rc = OCS_HW_RTN_SUCCESS;
8274 ocs_log_test(hw->os, "FCF index %d out of range (max %d)\n",
8275 fcf_index, SLI4_MAX_FCF_INDEX);
8276 hw->domains[fcfi] = NULL;
8279 ocs_log_test(hw->os, "FCFI %#x out of range (max %#x)\n",
8280 fcfi, SLI4_MAX_FCFI);
8287 ocs_hw_domain_del(ocs_hw_t *hw, ocs_domain_t *domain)
8289 int32_t rc = OCS_HW_RTN_ERROR;
8290 uint16_t fcfi = UINT16_MAX;
8292 if ((hw == NULL) || (domain == NULL)) {
8293 ocs_log_err(NULL, "bad parameter hw=%p domain=%p\n",
8295 return OCS_HW_RTN_ERROR;
8298 fcfi = domain->fcf_indicator;
8300 if (fcfi < SLI4_MAX_FCFI) {
8301 uint16_t fcf_index = UINT16_MAX;
8303 ocs_log_debug(hw->os, "deleting domain %p @ %#x\n",
8306 if (domain != hw->domains[fcfi]) {
8307 ocs_log_test(hw->os, "provided domain %p does not match stored domain %p\n",
8308 domain, hw->domains[fcfi]);
8309 return OCS_HW_RTN_ERROR;
8312 hw->domains[fcfi] = NULL;
8314 /* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */
8315 if (hw->workaround.override_fcfi) {
8316 if (hw->first_domain_idx == fcfi) {
8317 hw->first_domain_idx = -1;
8321 fcf_index = domain->fcf;
8323 if (fcf_index < SLI4_MAX_FCF_INDEX) {
8324 if (hw->fcf_index_fcfi[fcf_index] == fcfi) {
8325 hw->fcf_index_fcfi[fcf_index] = 0;
8326 rc = OCS_HW_RTN_SUCCESS;
8328 ocs_log_test(hw->os, "indexed FCFI %#x doesn't match provided %#x @ %d\n",
8329 hw->fcf_index_fcfi[fcf_index], fcfi, fcf_index);
8332 ocs_log_test(hw->os, "FCF index %d out of range (max %d)\n",
8333 fcf_index, SLI4_MAX_FCF_INDEX);
8336 ocs_log_test(hw->os, "FCFI %#x out of range (max %#x)\n",
8337 fcfi, SLI4_MAX_FCFI);
8344 ocs_hw_domain_get(ocs_hw_t *hw, uint16_t fcfi)
8348 ocs_log_err(NULL, "bad parameter hw=%p\n", hw);
8352 if (fcfi < SLI4_MAX_FCFI) {
8353 return hw->domains[fcfi];
8355 ocs_log_test(hw->os, "FCFI %#x out of range (max %#x)\n",
8356 fcfi, SLI4_MAX_FCFI);
8361 static ocs_domain_t *
8362 ocs_hw_domain_get_indexed(ocs_hw_t *hw, uint16_t fcf_index)
8366 ocs_log_err(NULL, "bad parameter hw=%p\n", hw);
8370 if (fcf_index < SLI4_MAX_FCF_INDEX) {
8371 return ocs_hw_domain_get(hw, hw->fcf_index_fcfi[fcf_index]);
8373 ocs_log_test(hw->os, "FCF index %d out of range (max %d)\n",
8374 fcf_index, SLI4_MAX_FCF_INDEX);
8380 * @brief Quaratine an IO by taking a reference count and adding it to the
8381 * quarantine list. When the IO is popped from the list then the
8382 * count is released and the IO MAY be freed depending on whether
8383 * it is still referenced by the IO.
8385 * @n @b Note: BZ 160124 - If this is a target write or an initiator read using
8386 * DIF, then we must add the XRI to a quarantine list until we receive
8387 * 4 more completions of this same type.
8389 * @param hw Hardware context.
8390 * @param wq Pointer to the WQ associated with the IO object to quarantine.
8391 * @param io Pointer to the io object to quarantine.
8394 ocs_hw_io_quarantine(ocs_hw_t *hw, hw_wq_t *wq, ocs_hw_io_t *io)
8396 ocs_quarantine_info_t *q_info = &wq->quarantine_info;
8398 ocs_hw_io_t *free_io = NULL;
8400 /* return if the QX bit was clear */
8401 if (!io->quarantine) {
8405 /* increment the IO refcount to prevent it from being freed before the quarantine is over */
8406 if (ocs_ref_get_unless_zero(&io->ref) == 0) {
8407 /* command no longer active */
8408 ocs_log_debug(hw ? hw->os : NULL,
8409 "io not active xri=0x%x tag=0x%x\n",
8410 io->indicator, io->reqtag);
8414 sli_queue_lock(wq->queue);
8415 index = q_info->quarantine_index;
8416 free_io = q_info->quarantine_ios[index];
8417 q_info->quarantine_ios[index] = io;
8418 q_info->quarantine_index = (index + 1) % OCS_HW_QUARANTINE_QUEUE_DEPTH;
8419 sli_queue_unlock(wq->queue);
8421 if (free_io != NULL) {
8422 ocs_ref_put(&free_io->ref); /* ocs_ref_get(): same function */
8427 * @brief Process entries on the given completion queue.
8429 * @param hw Hardware context.
8430 * @param cq Pointer to the HW completion queue object.
8435 ocs_hw_cq_process(ocs_hw_t *hw, hw_cq_t *cq)
8437 uint8_t cqe[sizeof(sli4_mcqe_t)];
8438 uint16_t rid = UINT16_MAX;
8439 sli4_qentry_e ctype; /* completion type */
8441 uint32_t n_processed = 0;
8445 tstart = ocs_msectime();
8447 while (!sli_queue_read(&hw->sli, cq->queue, cqe)) {
8448 status = sli_cq_parse(&hw->sli, cq->queue, cqe, &ctype, &rid);
8450 * The sign of status is significant. If status is:
8451 * == 0 : call completed correctly and the CQE indicated success
8452 * > 0 : call completed correctly and the CQE indicated an error
8453 * < 0 : call failed and no information is available about the CQE
8457 /* Notification that an entry was consumed, but not completed */
8465 case SLI_QENTRY_ASYNC:
8467 sli_cqe_async(&hw->sli, cqe);
8471 * Process MQ entry. Note there is no way to determine
8472 * the MQ_ID from the completion entry.
8475 ocs_hw_mq_process(hw, status, hw->mq);
8477 case SLI_QENTRY_OPT_WRITE_CMD:
8478 ocs_hw_rqpair_process_auto_xfr_rdy_cmd(hw, cq, cqe);
8480 case SLI_QENTRY_OPT_WRITE_DATA:
8481 ocs_hw_rqpair_process_auto_xfr_rdy_data(hw, cq, cqe);
8485 ocs_hw_wq_process(hw, cq, cqe, status, rid);
8487 case SLI_QENTRY_WQ_RELEASE: {
8488 uint32_t wq_id = rid;
8489 uint32_t index = ocs_hw_queue_hash_find(hw->wq_hash, wq_id);
8490 hw_wq_t *wq = hw->hw_wq[index];
8492 /* Submit any HW IOs that are on the WQ pending list */
8493 hw_wq_submit_pending(wq, wq->wqec_set_count);
8500 ocs_hw_rqpair_process_rq(hw, cq, cqe);
8502 case SLI_QENTRY_XABT: {
8504 ocs_hw_xabt_process(hw, cq, cqe, rid);
8509 ocs_log_test(hw->os, "unhandled ctype=%#x rid=%#x\n", ctype, rid);
8514 if (n_processed == cq->queue->proc_limit) {
8518 if (cq->queue->n_posted >= (cq->queue->posted_limit)) {
8519 sli_queue_arm(&hw->sli, cq->queue, FALSE);
8523 sli_queue_arm(&hw->sli, cq->queue, TRUE);
8525 if (n_processed > cq->queue->max_num_processed) {
8526 cq->queue->max_num_processed = n_processed;
8528 telapsed = ocs_msectime() - tstart;
8529 if (telapsed > cq->queue->max_process_time) {
8530 cq->queue->max_process_time = telapsed;
8535 * @brief Process WQ completion queue entries.
8537 * @param hw Hardware context.
8538 * @param cq Pointer to the HW completion queue object.
8539 * @param cqe Pointer to WQ completion queue.
8540 * @param status Completion status.
8541 * @param rid Resource ID (IO tag).
8546 ocs_hw_wq_process(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe, int32_t status, uint16_t rid)
8548 hw_wq_callback_t *wqcb;
8550 ocs_queue_history_cqe(&hw->q_hist, SLI_QENTRY_WQ, (void *)cqe, ((sli4_fc_wcqe_t *)cqe)->status, cq->queue->id,
8551 ((cq->queue->index - 1) & (cq->queue->length - 1)));
8553 if(rid == OCS_HW_REQUE_XRI_REGTAG) {
8555 ocs_log_err(hw->os, "reque xri failed, status = %d \n", status);
8560 wqcb = ocs_hw_reqtag_get_instance(hw, rid);
8562 ocs_log_err(hw->os, "invalid request tag: x%x\n", rid);
8566 if (wqcb->callback == NULL) {
8567 ocs_log_err(hw->os, "wqcb callback is NULL\n");
8571 (*wqcb->callback)(wqcb->arg, cqe, status);
8575 * @brief Process WQ completions for IO requests
8577 * @param arg Generic callback argument
8578 * @param cqe Pointer to completion queue entry
8579 * @param status Completion status
8582 * @n @b Note: Regarding io->reqtag, the reqtag is assigned once when HW IOs are initialized
8583 * in ocs_hw_setup_io(), and don't need to be returned to the hw->wq_reqtag_pool.
8588 ocs_hw_wq_process_io(void *arg, uint8_t *cqe, int32_t status)
8590 ocs_hw_io_t *io = arg;
8591 ocs_hw_t *hw = io->hw;
8592 sli4_fc_wcqe_t *wcqe = (void *)cqe;
8595 uint8_t out_of_order_axr_cmd = 0;
8596 uint8_t out_of_order_axr_data = 0;
8597 uint8_t lock_taken = 0;
8598 #if defined(OCS_DISC_SPIN_DELAY)
8604 * For the primary IO, this will also be used for the
8605 * response. So it is important to only set/clear this
8606 * flag on the first data phase of the IO because
8607 * subsequent phases will be done on the secondary XRI.
8609 if (io->quarantine && io->quarantine_first_phase) {
8610 io->quarantine = (wcqe->qx == 1);
8611 ocs_hw_io_quarantine(hw, io->wq, io);
8613 io->quarantine_first_phase = FALSE;
8615 /* BZ 161832 - free secondary HW IO */
8616 if (io->sec_hio != NULL &&
8617 io->sec_hio->quarantine) {
8619 * If the quarantine flag is set on the
8620 * IO, then set it on the secondary IO
8621 * based on the quarantine XRI (QX) bit
8624 io->sec_hio->quarantine = (wcqe->qx == 1);
8625 /* use the primary io->wq because it is not set on the secondary IO. */
8626 ocs_hw_io_quarantine(hw, io->wq, io->sec_hio);
8629 ocs_hw_remove_io_timed_wqe(hw, io);
8631 /* clear xbusy flag if WCQE[XB] is clear */
8632 if (io->xbusy && wcqe->xb == 0) {
8636 /* get extended CQE status */
8638 case OCS_HW_BLS_ACC:
8639 case OCS_HW_BLS_ACC_SID:
8641 case OCS_HW_ELS_REQ:
8642 sli_fc_els_did(&hw->sli, cqe, &ext);
8643 len = sli_fc_response_length(&hw->sli, cqe);
8645 case OCS_HW_ELS_RSP:
8646 case OCS_HW_ELS_RSP_SID:
8647 case OCS_HW_FC_CT_RSP:
8650 len = sli_fc_response_length(&hw->sli, cqe);
8652 case OCS_HW_IO_TARGET_WRITE:
8653 len = sli_fc_io_length(&hw->sli, cqe);
8654 #if defined(OCS_DISC_SPIN_DELAY)
8655 if (ocs_get_property("disk_spin_delay", prop_buf, sizeof(prop_buf)) == 0) {
8656 delay = ocs_strtoul(prop_buf, 0, 0);
8661 case OCS_HW_IO_TARGET_READ:
8662 len = sli_fc_io_length(&hw->sli, cqe);
8664 * if_type == 2 seems to return 0 "total length placed" on
8665 * FCP_TSEND64_WQE completions. If this appears to happen,
8666 * use the CTIO data transfer length instead.
8668 if (hw->workaround.retain_tsend_io_length && !len && !status) {
8673 case OCS_HW_IO_TARGET_RSP:
8674 if(io->is_port_owned) {
8675 ocs_lock(&io->axr_lock);
8677 if(io->axr_buf->call_axr_cmd) {
8678 out_of_order_axr_cmd = 1;
8680 if(io->axr_buf->call_axr_data) {
8681 out_of_order_axr_data = 1;
8685 case OCS_HW_IO_INITIATOR_READ:
8686 len = sli_fc_io_length(&hw->sli, cqe);
8688 case OCS_HW_IO_INITIATOR_WRITE:
8689 len = sli_fc_io_length(&hw->sli, cqe);
8691 case OCS_HW_IO_INITIATOR_NODATA:
8693 case OCS_HW_IO_DNRX_REQUEUE:
8694 /* release the count for re-posting the buffer */
8695 //ocs_hw_io_free(hw, io);
8698 ocs_log_test(hw->os, "XXX unhandled io type %#x for XRI 0x%x\n",
8699 io->type, io->indicator);
8703 ext = sli_fc_ext_status(&hw->sli, cqe);
8704 /* Emulate IAAB=0 for initiator WQEs only; i.e. automatically
8705 * abort exchange if an error occurred and exchange is still busy.
8707 if (hw->config.i_only_aab &&
8708 (ocs_hw_iotype_is_originator(io->type)) &&
8709 (ocs_hw_wcqe_abort_needed(status, ext, wcqe->xb))) {
8712 ocs_log_debug(hw->os, "aborting xri=%#x tag=%#x\n",
8713 io->indicator, io->reqtag);
8715 * Because the initiator will not issue another IO phase, then it is OK to to issue the
8716 * callback on the abort completion, but for consistency with the target, wait for the
8717 * XRI_ABORTED CQE to issue the IO callback.
8719 rc = ocs_hw_io_abort(hw, io, TRUE, NULL, NULL);
8721 if (rc == OCS_HW_RTN_SUCCESS) {
8722 /* latch status to return after abort is complete */
8723 io->status_saved = 1;
8724 io->saved_status = status;
8725 io->saved_ext = ext;
8726 io->saved_len = len;
8727 goto exit_ocs_hw_wq_process_io;
8728 } else if (rc == OCS_HW_RTN_IO_ABORT_IN_PROGRESS) {
8730 * Already being aborted by someone else (ABTS
8731 * perhaps). Just fall through and return original
8734 ocs_log_debug(hw->os, "abort in progress xri=%#x tag=%#x\n",
8735 io->indicator, io->reqtag);
8738 /* Failed to abort for some other reason, log error */
8739 ocs_log_test(hw->os, "Failed to abort xri=%#x tag=%#x rc=%d\n",
8740 io->indicator, io->reqtag, rc);
8745 * If we're not an originator IO, and XB is set, then issue abort for the IO from within the HW
8747 if ( (! ocs_hw_iotype_is_originator(io->type)) && wcqe->xb) {
8750 ocs_log_debug(hw->os, "aborting xri=%#x tag=%#x\n", io->indicator, io->reqtag);
8753 * Because targets may send a response when the IO completes using the same XRI, we must
8754 * wait for the XRI_ABORTED CQE to issue the IO callback
8756 rc = ocs_hw_io_abort(hw, io, FALSE, NULL, NULL);
8757 if (rc == OCS_HW_RTN_SUCCESS) {
8758 /* latch status to return after abort is complete */
8759 io->status_saved = 1;
8760 io->saved_status = status;
8761 io->saved_ext = ext;
8762 io->saved_len = len;
8763 goto exit_ocs_hw_wq_process_io;
8764 } else if (rc == OCS_HW_RTN_IO_ABORT_IN_PROGRESS) {
8766 * Already being aborted by someone else (ABTS
8767 * perhaps). Just fall through and return original
8770 ocs_log_debug(hw->os, "abort in progress xri=%#x tag=%#x\n",
8771 io->indicator, io->reqtag);
8774 /* Failed to abort for some other reason, log error */
8775 ocs_log_test(hw->os, "Failed to abort xri=%#x tag=%#x rc=%d\n",
8776 io->indicator, io->reqtag, rc);
8780 /* BZ 161832 - free secondary HW IO */
8781 if (io->sec_hio != NULL) {
8782 ocs_hw_io_free(hw, io->sec_hio);
8786 if (io->done != NULL) {
8787 ocs_hw_done_t done = io->done;
8788 void *arg = io->arg;
8792 if (io->status_saved) {
8793 /* use latched status if exists */
8794 status = io->saved_status;
8795 len = io->saved_len;
8796 ext = io->saved_ext;
8797 io->status_saved = 0;
8800 /* Restore default SGL */
8801 ocs_hw_io_restore_sgl(hw, io);
8802 done(io, io->rnode, len, status, ext, arg);
8805 if(out_of_order_axr_cmd) {
8806 /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */
8807 if (hw->config.bounce) {
8808 fc_header_t *hdr = io->axr_buf->cmd_seq->header->dma.virt;
8809 uint32_t s_id = fc_be24toh(hdr->s_id);
8810 uint32_t d_id = fc_be24toh(hdr->d_id);
8811 uint32_t ox_id = ocs_be16toh(hdr->ox_id);
8812 if (hw->callback.bounce != NULL) {
8813 (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, io->axr_buf->cmd_seq, s_id, d_id, ox_id);
8816 hw->callback.unsolicited(hw->args.unsolicited, io->axr_buf->cmd_seq);
8819 if(out_of_order_axr_data) {
8820 /* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */
8821 if (hw->config.bounce) {
8822 fc_header_t *hdr = io->axr_buf->seq.header->dma.virt;
8823 uint32_t s_id = fc_be24toh(hdr->s_id);
8824 uint32_t d_id = fc_be24toh(hdr->d_id);
8825 uint32_t ox_id = ocs_be16toh(hdr->ox_id);
8826 if (hw->callback.bounce != NULL) {
8827 (*hw->callback.bounce)(ocs_hw_unsol_process_bounce, &io->axr_buf->seq, s_id, d_id, ox_id);
8830 hw->callback.unsolicited(hw->args.unsolicited, &io->axr_buf->seq);
8835 exit_ocs_hw_wq_process_io:
8837 ocs_unlock(&io->axr_lock);
8842 * @brief Process WQ completions for abort requests.
8844 * @param arg Generic callback argument.
8845 * @param cqe Pointer to completion queue entry.
8846 * @param status Completion status.
8851 ocs_hw_wq_process_abort(void *arg, uint8_t *cqe, int32_t status)
8853 ocs_hw_io_t *io = arg;
8854 ocs_hw_t *hw = io->hw;
8857 hw_wq_callback_t *wqcb;
8860 * For IOs that were aborted internally, we may need to issue the callback here depending
8861 * on whether a XRI_ABORTED CQE is expected ot not. If the status is Local Reject/No XRI, then
8862 * issue the callback now.
8864 ext = sli_fc_ext_status(&hw->sli, cqe);
8865 if (status == SLI4_FC_WCQE_STATUS_LOCAL_REJECT &&
8866 ext == SLI4_FC_LOCAL_REJECT_NO_XRI &&
8868 ocs_hw_done_t done = io->done;
8869 void *arg = io->arg;
8874 * Use latched status as this is always saved for an internal abort
8876 * Note: We wont have both a done and abort_done function, so don't worry about
8877 * clobbering the len, status and ext fields.
8879 status = io->saved_status;
8880 len = io->saved_len;
8881 ext = io->saved_ext;
8882 io->status_saved = 0;
8883 done(io, io->rnode, len, status, ext, arg);
8886 if (io->abort_done != NULL) {
8887 ocs_hw_done_t done = io->abort_done;
8888 void *arg = io->abort_arg;
8890 io->abort_done = NULL;
8892 done(io, io->rnode, len, status, ext, arg);
8894 ocs_lock(&hw->io_abort_lock);
8895 /* clear abort bit to indicate abort is complete */
8896 io->abort_in_progress = 0;
8897 ocs_unlock(&hw->io_abort_lock);
8899 /* Free the WQ callback */
8900 ocs_hw_assert(io->abort_reqtag != UINT32_MAX);
8901 wqcb = ocs_hw_reqtag_get_instance(hw, io->abort_reqtag);
8902 ocs_hw_reqtag_free(hw, wqcb);
8905 * Call ocs_hw_io_free() because this releases the WQ reservation as
8906 * well as doing the refcount put. Don't duplicate the code here.
8908 (void)ocs_hw_io_free(hw, io);
8912 * @brief Process XABT completions
8914 * @param hw Hardware context.
8915 * @param cq Pointer to the HW completion queue object.
8916 * @param cqe Pointer to WQ completion queue.
8917 * @param rid Resource ID (IO tag).
8923 ocs_hw_xabt_process(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe, uint16_t rid)
8925 /* search IOs wait free list */
8926 ocs_hw_io_t *io = NULL;
8928 io = ocs_hw_io_lookup(hw, rid);
8930 ocs_queue_history_cqe(&hw->q_hist, SLI_QENTRY_XABT, (void *)cqe, 0, cq->queue->id,
8931 ((cq->queue->index - 1) & (cq->queue->length - 1)));
8933 /* IO lookup failure should never happen */
8934 ocs_log_err(hw->os, "Error: xabt io lookup failed rid=%#x\n", rid);
8939 ocs_log_debug(hw->os, "xabt io not busy rid=%#x\n", rid);
8941 /* mark IO as no longer busy */
8945 if (io->is_port_owned) {
8946 ocs_lock(&hw->io_lock);
8947 /* Take reference so that below callback will not free io before reque */
8948 ocs_ref_get(&io->ref);
8949 ocs_unlock(&hw->io_lock);
8954 /* For IOs that were aborted internally, we need to issue any pending callback here. */
8955 if (io->done != NULL) {
8956 ocs_hw_done_t done = io->done;
8957 void *arg = io->arg;
8959 /* Use latched status as this is always saved for an internal abort */
8960 int32_t status = io->saved_status;
8961 uint32_t len = io->saved_len;
8962 uint32_t ext = io->saved_ext;
8965 io->status_saved = 0;
8967 done(io, io->rnode, len, status, ext, arg);
8970 /* Check to see if this is a port owned XRI */
8971 if (io->is_port_owned) {
8972 ocs_lock(&hw->io_lock);
8973 ocs_hw_reque_xri(hw, io);
8974 ocs_unlock(&hw->io_lock);
8975 /* Not hanlding reque xri completion, free io */
8976 ocs_hw_io_free(hw, io);
8980 ocs_lock(&hw->io_lock);
8981 if ((io->state == OCS_HW_IO_STATE_INUSE) || (io->state == OCS_HW_IO_STATE_WAIT_FREE)) {
8982 /* if on wait_free list, caller has already freed IO;
8983 * remove from wait_free list and add to free list.
8984 * if on in-use list, already marked as no longer busy;
8985 * just leave there and wait for caller to free.
8987 if (io->state == OCS_HW_IO_STATE_WAIT_FREE) {
8988 io->state = OCS_HW_IO_STATE_FREE;
8989 ocs_list_remove(&hw->io_wait_free, io);
8990 ocs_hw_io_free_move_correct_list(hw, io);
8993 ocs_unlock(&hw->io_lock);
8997 * @brief Adjust the number of WQs and CQs within the HW.
9000 * Calculates the number of WQs and associated CQs needed in the HW based on
9001 * the number of IOs. Calculates the starting CQ index for each WQ, RQ and
9004 * @param hw Hardware context allocated by the caller.
9007 ocs_hw_adjust_wqs(ocs_hw_t *hw)
9009 uint32_t max_wq_num = sli_get_max_queue(&hw->sli, SLI_QTYPE_WQ);
9010 uint32_t max_wq_entries = hw->num_qentries[SLI_QTYPE_WQ];
9011 uint32_t max_cq_entries = hw->num_qentries[SLI_QTYPE_CQ];
9014 * possibly adjust the the size of the WQs so that the CQ is twice as
9015 * big as the WQ to allow for 2 completions per IO. This allows us to
9016 * handle multi-phase as well as aborts.
9018 if (max_cq_entries < max_wq_entries * 2) {
9019 max_wq_entries = hw->num_qentries[SLI_QTYPE_WQ] = max_cq_entries / 2;
9023 * Calculate the number of WQs to use base on the number of IOs.
9025 * Note: We need to reserve room for aborts which must be sent down
9026 * the same WQ as the IO. So we allocate enough WQ space to
9027 * handle 2 times the number of IOs. Half of the space will be
9028 * used for normal IOs and the other hwf is reserved for aborts.
9030 hw->config.n_wq = ((hw->config.n_io * 2) + (max_wq_entries - 1)) / max_wq_entries;
9033 * For performance reasons, it is best to use use a minimum of 4 WQs
9034 * for BE3 and Skyhawk.
9036 if (hw->config.n_wq < 4 &&
9037 SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) {
9038 hw->config.n_wq = 4;
9042 * For dual-chute support, we need to have at least one WQ per chute.
9044 if (hw->config.n_wq < 2 &&
9045 ocs_hw_get_num_chutes(hw) > 1) {
9046 hw->config.n_wq = 2;
9049 /* make sure we haven't exceeded the max supported in the HW */
9050 if (hw->config.n_wq > OCS_HW_MAX_NUM_WQ) {
9051 hw->config.n_wq = OCS_HW_MAX_NUM_WQ;
9054 /* make sure we haven't exceeded the chip maximum */
9055 if (hw->config.n_wq > max_wq_num) {
9056 hw->config.n_wq = max_wq_num;
9060 * Using Queue Topology string, we divide by number of chutes
9062 hw->config.n_wq /= ocs_hw_get_num_chutes(hw);
9066 ocs_hw_command_process(ocs_hw_t *hw, int32_t status, uint8_t *mqe, size_t size)
9068 ocs_command_ctx_t *ctx = NULL;
9070 ocs_lock(&hw->cmd_lock);
9071 if (NULL == (ctx = ocs_list_remove_head(&hw->cmd_head))) {
9072 ocs_log_err(hw->os, "XXX no command context?!?\n");
9073 ocs_unlock(&hw->cmd_lock);
9077 hw->cmd_head_count--;
9079 /* Post any pending requests */
9080 ocs_hw_cmd_submit_pending(hw);
9082 ocs_unlock(&hw->cmd_lock);
9086 ocs_memcpy(ctx->buf, mqe, size);
9088 ctx->cb(hw, status, ctx->buf, ctx->arg);
9091 ocs_memset(ctx, 0, sizeof(ocs_command_ctx_t));
9092 ocs_free(hw->os, ctx, sizeof(ocs_command_ctx_t));
9101 * @brief Process entries on the given mailbox queue.
9103 * @param hw Hardware context.
9104 * @param status CQE status.
9105 * @param mq Pointer to the mailbox queue object.
9107 * @return Returns 0 on success, or a non-zero value on failure.
9110 ocs_hw_mq_process(ocs_hw_t *hw, int32_t status, sli4_queue_t *mq)
9112 uint8_t mqe[SLI4_BMBX_SIZE];
9114 if (!sli_queue_read(&hw->sli, mq, mqe)) {
9115 ocs_hw_command_process(hw, status, mqe, mq->size);
9122 * @brief Read a FCF table entry.
9124 * @param hw Hardware context.
9125 * @param index Table index to read. Use SLI4_FCOE_FCF_TABLE_FIRST for the first
9126 * read and the next_index field from the FCOE_READ_FCF_TABLE command
9127 * for subsequent reads.
9129 * @return Returns 0 on success, or a non-zero value on failure.
9132 ocs_hw_read_fcf(ocs_hw_t *hw, uint32_t index)
9134 uint8_t *buf = NULL;
9135 int32_t rc = OCS_HW_RTN_ERROR;
9137 buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
9139 ocs_log_err(hw->os, "no buffer for command\n");
9140 return OCS_HW_RTN_NO_MEMORY;
9143 if (sli_cmd_fcoe_read_fcf_table(&hw->sli, buf, SLI4_BMBX_SIZE, &hw->fcf_dmem,
9145 rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_read_fcf, &hw->fcf_dmem);
9148 if (rc != OCS_HW_RTN_SUCCESS) {
9149 ocs_log_test(hw->os, "FCOE_READ_FCF_TABLE failed\n");
9150 ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
9157 * @brief Callback function for the FCOE_READ_FCF_TABLE command.
9160 * Note that the caller has allocated:
9161 * - DMA memory to hold the table contents
9162 * - DMA memory structure
9163 * - Command/results buffer
9165 * Each of these must be freed here.
9167 * @param hw Hardware context.
9168 * @param status Hardware status.
9169 * @param mqe Pointer to the mailbox command/results buffer.
9170 * @param arg Pointer to the DMA memory structure.
9172 * @return Returns 0 on success, or a non-zero value on failure.
9175 ocs_hw_cb_read_fcf(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
9177 ocs_dma_t *dma = arg;
9178 sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe;
9180 if (status || hdr->status) {
9181 ocs_log_test(hw->os, "bad status cqe=%#x mqe=%#x\n",
9182 status, hdr->status);
9183 } else if (dma->virt) {
9184 sli4_res_fcoe_read_fcf_table_t *read_fcf = dma->virt;
9186 /* if FC or FCOE and FCF entry valid, process it */
9187 if (read_fcf->fcf_entry.fc ||
9188 (read_fcf->fcf_entry.val && !read_fcf->fcf_entry.sol)) {
9189 if (hw->callback.domain != NULL) {
9190 ocs_domain_record_t drec = {0};
9192 if (read_fcf->fcf_entry.fc) {
9194 * This is a pseudo FCF entry. Create a domain
9195 * record based on the read topology information
9197 drec.speed = hw->link.speed;
9198 drec.fc_id = hw->link.fc_id;
9200 if (SLI_LINK_TOPO_LOOP == hw->link.topology) {
9201 drec.is_loop = TRUE;
9202 ocs_memcpy(drec.map.loop, hw->link.loop_map,
9203 sizeof(drec.map.loop));
9204 } else if (SLI_LINK_TOPO_NPORT == hw->link.topology) {
9205 drec.is_nport = TRUE;
9208 drec.index = read_fcf->fcf_entry.fcf_index;
9209 drec.priority = read_fcf->fcf_entry.fip_priority;
9211 /* copy address, wwn and vlan_bitmap */
9212 ocs_memcpy(drec.address, read_fcf->fcf_entry.fcf_mac_address,
9213 sizeof(drec.address));
9214 ocs_memcpy(drec.wwn, read_fcf->fcf_entry.fabric_name_id,
9216 ocs_memcpy(drec.map.vlan, read_fcf->fcf_entry.vlan_bitmap,
9217 sizeof(drec.map.vlan));
9219 drec.is_ethernet = TRUE;
9220 drec.is_nport = TRUE;
9223 hw->callback.domain(hw->args.domain,
9224 OCS_HW_DOMAIN_FOUND,
9228 /* if FCOE and FCF is not valid, ignore it */
9229 ocs_log_test(hw->os, "ignore invalid FCF entry\n");
9232 if (SLI4_FCOE_FCF_TABLE_LAST != read_fcf->next_index) {
9233 ocs_hw_read_fcf(hw, read_fcf->next_index);
9237 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
9238 //ocs_dma_free(hw->os, dma);
9239 //ocs_free(hw->os, dma, sizeof(ocs_dma_t));
9245 * @brief Callback function for the SLI link events.
9248 * This function allocates memory which must be freed in its callback.
9250 * @param ctx Hardware context pointer (that is, ocs_hw_t *).
9251 * @param e Event structure pointer (that is, sli4_link_event_t *).
9253 * @return Returns 0 on success, or a non-zero value on failure.
9256 ocs_hw_cb_link(void *ctx, void *e)
9259 sli4_link_event_t *event = e;
9260 ocs_domain_t *d = NULL;
9262 int32_t rc = OCS_HW_RTN_ERROR;
9263 ocs_t *ocs = hw->os;
9265 ocs_hw_link_event_init(hw);
9267 switch (event->status) {
9268 case SLI_LINK_STATUS_UP:
9272 if (SLI_LINK_TOPO_NPORT == event->topology) {
9273 device_printf(ocs->dev, "Link Up, NPORT, speed is %d\n", event->speed);
9274 ocs_hw_read_fcf(hw, SLI4_FCOE_FCF_TABLE_FIRST);
9275 } else if (SLI_LINK_TOPO_LOOP == event->topology) {
9276 uint8_t *buf = NULL;
9277 device_printf(ocs->dev, "Link Up, LOOP, speed is %d\n", event->speed);
9279 buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
9281 ocs_log_err(hw->os, "no buffer for command\n");
9285 if (sli_cmd_read_topology(&hw->sli, buf, SLI4_BMBX_SIZE, &hw->loop_map)) {
9286 rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, __ocs_read_topology_cb, NULL);
9289 if (rc != OCS_HW_RTN_SUCCESS) {
9290 ocs_log_test(hw->os, "READ_TOPOLOGY failed\n");
9291 ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
9294 device_printf(ocs->dev, "Link Up, unsupported topology (%#x), speed is %d\n",
9295 event->topology, event->speed);
9298 case SLI_LINK_STATUS_DOWN:
9299 device_printf(ocs->dev, "Link Down\n");
9301 hw->link.status = event->status;
9303 for (i = 0; d = hw->domains[i], i < SLI4_MAX_FCFI; i++) {
9305 hw->callback.domain != NULL) {
9306 hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, d);
9311 ocs_log_test(hw->os, "unhandled link status %#x\n", event->status);
9319 ocs_hw_cb_fip(void *ctx, void *e)
9322 ocs_domain_t *domain = NULL;
9323 sli4_fip_event_t *event = e;
9325 /* Find the associated domain object */
9326 if (event->type == SLI4_FCOE_FIP_FCF_CLEAR_VLINK) {
9327 ocs_domain_t *d = NULL;
9330 /* Clear VLINK is different from the other FIP events as it passes back
9331 * a VPI instead of a FCF index. Check all attached SLI ports for a
9333 for (i = 0; d = hw->domains[i], i < SLI4_MAX_FCFI; i++) {
9335 ocs_sport_t *sport = NULL;
9337 ocs_list_foreach(&d->sport_list, sport) {
9338 if (sport->indicator == event->index) {
9344 if (domain != NULL) {
9350 domain = ocs_hw_domain_get_indexed(hw, event->index);
9353 switch (event->type) {
9354 case SLI4_FCOE_FIP_FCF_DISCOVERED:
9355 ocs_hw_read_fcf(hw, event->index);
9357 case SLI4_FCOE_FIP_FCF_DEAD:
9358 if (domain != NULL &&
9359 hw->callback.domain != NULL) {
9360 hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, domain);
9363 case SLI4_FCOE_FIP_FCF_CLEAR_VLINK:
9364 if (domain != NULL &&
9365 hw->callback.domain != NULL) {
9367 * We will want to issue rediscover FCF when this domain is free'd in order
9368 * to invalidate the FCF table
9370 domain->req_rediscover_fcf = TRUE;
9371 hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, domain);
9374 case SLI4_FCOE_FIP_FCF_MODIFIED:
9375 if (domain != NULL &&
9376 hw->callback.domain != NULL) {
9377 hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, domain);
9380 ocs_hw_read_fcf(hw, event->index);
9383 ocs_log_test(hw->os, "unsupported event %#x\n", event->type);
9390 ocs_hw_cb_node_attach(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
9392 ocs_remote_node_t *rnode = arg;
9393 sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe;
9394 ocs_hw_remote_node_event_e evt = 0;
9396 if (status || hdr->status) {
9397 ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status,
9399 ocs_atomic_sub_return(&hw->rpi_ref[rnode->index].rpi_count, 1);
9400 rnode->attached = FALSE;
9401 ocs_atomic_set(&hw->rpi_ref[rnode->index].rpi_attached, 0);
9402 evt = OCS_HW_NODE_ATTACH_FAIL;
9404 rnode->attached = TRUE;
9405 ocs_atomic_set(&hw->rpi_ref[rnode->index].rpi_attached, 1);
9406 evt = OCS_HW_NODE_ATTACH_OK;
9409 if (hw->callback.rnode != NULL) {
9410 hw->callback.rnode(hw->args.rnode, evt, rnode);
9412 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
9418 ocs_hw_cb_node_free(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
9420 ocs_remote_node_t *rnode = arg;
9421 sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe;
9422 ocs_hw_remote_node_event_e evt = OCS_HW_NODE_FREE_FAIL;
9425 if (status || hdr->status) {
9426 ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status,
9430 * In certain cases, a non-zero MQE status is OK (all must be true):
9431 * - node is attached
9432 * - if High Login Mode is enabled, node is part of a node group
9433 * - status is 0x1400
9435 if (!rnode->attached || ((sli_get_hlm(&hw->sli) == TRUE) && !rnode->node_group) ||
9436 (hdr->status != SLI4_MBOX_STATUS_RPI_NOT_REG)) {
9442 rnode->node_group = FALSE;
9443 rnode->attached = FALSE;
9445 if (ocs_atomic_read(&hw->rpi_ref[rnode->index].rpi_count) == 0) {
9446 ocs_atomic_set(&hw->rpi_ref[rnode->index].rpi_attached, 0);
9449 evt = OCS_HW_NODE_FREE_OK;
9452 if (hw->callback.rnode != NULL) {
9453 hw->callback.rnode(hw->args.rnode, evt, rnode);
9456 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
9462 ocs_hw_cb_node_free_all(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
9464 sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe;
9465 ocs_hw_remote_node_event_e evt = OCS_HW_NODE_FREE_FAIL;
9469 if (status || hdr->status) {
9470 ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status,
9473 evt = OCS_HW_NODE_FREE_ALL_OK;
9476 if (evt == OCS_HW_NODE_FREE_ALL_OK) {
9477 for (i = 0; i < sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI); i++) {
9478 ocs_atomic_set(&hw->rpi_ref[i].rpi_count, 0);
9481 if (sli_resource_reset(&hw->sli, SLI_RSRC_FCOE_RPI)) {
9482 ocs_log_test(hw->os, "FCOE_RPI free all failure\n");
9487 if (hw->callback.rnode != NULL) {
9488 hw->callback.rnode(hw->args.rnode, evt, NULL);
9491 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
9497 * @brief Initialize the pool of HW IO objects.
9499 * @param hw Hardware context.
9501 * @return Returns 0 on success, or a non-zero value on failure.
9504 ocs_hw_setup_io(ocs_hw_t *hw)
9507 ocs_hw_io_t *io = NULL;
9508 uintptr_t xfer_virt = 0;
9509 uintptr_t xfer_phys = 0;
9511 uint8_t new_alloc = TRUE;
9513 if (NULL == hw->io) {
9514 hw->io = ocs_malloc(hw->os, hw->config.n_io * sizeof(ocs_hw_io_t *), OCS_M_ZERO | OCS_M_NOWAIT);
9516 if (NULL == hw->io) {
9517 ocs_log_err(hw->os, "IO pointer memory allocation failed, %d Ios at size %zu\n",
9519 sizeof(ocs_hw_io_t *));
9520 return OCS_HW_RTN_NO_MEMORY;
9522 for (i = 0; i < hw->config.n_io; i++) {
9523 hw->io[i] = ocs_malloc(hw->os, sizeof(ocs_hw_io_t),
9524 OCS_M_ZERO | OCS_M_NOWAIT);
9525 if (hw->io[i] == NULL) {
9526 ocs_log_err(hw->os, "IO(%d) memory allocation failed\n", i);
9531 /* Create WQE buffs for IO */
9532 hw->wqe_buffs = ocs_malloc(hw->os, hw->config.n_io * hw->sli.config.wqe_size,
9533 OCS_M_ZERO | OCS_M_NOWAIT);
9534 if (NULL == hw->wqe_buffs) {
9535 ocs_free(hw->os, hw->io, hw->config.n_io * sizeof(ocs_hw_io_t));
9536 ocs_log_err(hw->os, "%s: IO WQE buff allocation failed, %d Ios at size %zu\n",
9537 __func__, hw->config.n_io, hw->sli.config.wqe_size);
9538 return OCS_HW_RTN_NO_MEMORY;
9542 /* re-use existing IOs, including SGLs */
9547 if (ocs_dma_alloc(hw->os, &hw->xfer_rdy,
9548 sizeof(fcp_xfer_rdy_iu_t) * hw->config.n_io,
9549 4/*XXX what does this need to be? */)) {
9550 ocs_log_err(hw->os, "XFER_RDY buffer allocation failed\n");
9551 return OCS_HW_RTN_NO_MEMORY;
9554 xfer_virt = (uintptr_t)hw->xfer_rdy.virt;
9555 xfer_phys = hw->xfer_rdy.phys;
9557 for (i = 0; i < hw->config.n_io; i++) {
9558 hw_wq_callback_t *wqcb;
9562 /* initialize IO fields */
9565 /* Assign a WQE buff */
9566 io->wqe.wqebuf = &hw->wqe_buffs[i * hw->sli.config.wqe_size];
9568 /* Allocate the request tag for this IO */
9569 wqcb = ocs_hw_reqtag_alloc(hw, ocs_hw_wq_process_io, io);
9571 ocs_log_err(hw->os, "can't allocate request tag\n");
9572 return OCS_HW_RTN_NO_RESOURCES;
9574 io->reqtag = wqcb->instance_index;
9576 /* Now for the fields that are initialized on each free */
9577 ocs_hw_init_free_io(io);
9579 /* The XB flag isn't cleared on IO free, so initialize it to zero here */
9582 if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_XRI, &io->indicator, &index)) {
9583 ocs_log_err(hw->os, "sli_resource_alloc failed @ %d\n", i);
9584 return OCS_HW_RTN_NO_MEMORY;
9587 if (new_alloc && ocs_dma_alloc(hw->os, &io->def_sgl, hw->config.n_sgl * sizeof(sli4_sge_t), 64)) {
9588 ocs_log_err(hw->os, "ocs_dma_alloc failed @ %d\n", i);
9589 ocs_memset(&io->def_sgl, 0, sizeof(ocs_dma_t));
9590 return OCS_HW_RTN_NO_MEMORY;
9592 io->def_sgl_count = hw->config.n_sgl;
9593 io->sgl = &io->def_sgl;
9594 io->sgl_count = io->def_sgl_count;
9596 if (hw->xfer_rdy.size) {
9597 io->xfer_rdy.virt = (void *)xfer_virt;
9598 io->xfer_rdy.phys = xfer_phys;
9599 io->xfer_rdy.size = sizeof(fcp_xfer_rdy_iu_t);
9601 xfer_virt += sizeof(fcp_xfer_rdy_iu_t);
9602 xfer_phys += sizeof(fcp_xfer_rdy_iu_t);
9606 return OCS_HW_RTN_SUCCESS;
9608 for (i = 0; i < hw->config.n_io && hw->io[i]; i++) {
9609 ocs_free(hw->os, hw->io[i], sizeof(ocs_hw_io_t));
9613 return OCS_HW_RTN_NO_MEMORY;
9617 ocs_hw_init_io(ocs_hw_t *hw)
9619 uint32_t i = 0, io_index = 0;
9620 uint32_t prereg = 0;
9621 ocs_hw_io_t *io = NULL;
9622 uint8_t cmd[SLI4_BMBX_SIZE];
9623 ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
9624 uint32_t nremaining;
9626 uint32_t sgls_per_request = 256;
9627 ocs_dma_t **sgls = NULL;
9628 ocs_dma_t reqbuf = { 0 };
9630 prereg = sli_get_sgl_preregister(&hw->sli);
9633 sgls = ocs_malloc(hw->os, sizeof(*sgls) * sgls_per_request, OCS_M_NOWAIT);
9635 ocs_log_err(hw->os, "ocs_malloc sgls failed\n");
9636 return OCS_HW_RTN_NO_MEMORY;
9639 rc = ocs_dma_alloc(hw->os, &reqbuf, 32 + sgls_per_request*16, OCS_MIN_DMA_ALIGNMENT);
9641 ocs_log_err(hw->os, "ocs_dma_alloc reqbuf failed\n");
9642 ocs_free(hw->os, sgls, sizeof(*sgls) * sgls_per_request);
9643 return OCS_HW_RTN_NO_MEMORY;
9647 io = hw->io[io_index];
9648 for (nremaining = hw->config.n_io; nremaining; nremaining -= n) {
9650 /* Copy address of SGL's into local sgls[] array, break out if the xri
9651 * is not contiguous.
9653 for (n = 0; n < MIN(sgls_per_request, nremaining); n++) {
9654 /* Check that we have contiguous xri values */
9656 if (hw->io[io_index + n]->indicator != (hw->io[io_index + n-1]->indicator+1)) {
9660 sgls[n] = hw->io[io_index + n]->sgl;
9663 if (sli_cmd_fcoe_post_sgl_pages(&hw->sli, cmd, sizeof(cmd),
9664 io->indicator, n, sgls, NULL, &reqbuf)) {
9665 if (ocs_hw_command(hw, cmd, OCS_CMD_POLL, NULL, NULL)) {
9666 rc = OCS_HW_RTN_ERROR;
9667 ocs_log_err(hw->os, "SGL post failed\n");
9675 /* Add to tail if successful */
9676 for (i = 0; i < n; i ++) {
9677 io->is_port_owned = 0;
9678 io->state = OCS_HW_IO_STATE_FREE;
9679 ocs_list_add_tail(&hw->io_free, io);
9680 io = hw->io[io_index+1];
9686 ocs_dma_free(hw->os, &reqbuf);
9687 ocs_free(hw->os, sgls, sizeof(*sgls) * sgls_per_request);
9694 ocs_hw_flush(ocs_hw_t *hw)
9698 /* Process any remaining completions */
9699 for (i = 0; i < hw->eq_count; i++) {
9700 ocs_hw_process(hw, i, ~0);
9707 ocs_hw_command_cancel(ocs_hw_t *hw)
9710 ocs_lock(&hw->cmd_lock);
9713 * Manually clean up remaining commands. Note: since this calls
9714 * ocs_hw_command_process(), we'll also process the cmd_pending
9715 * list, so no need to manually clean that out.
9717 while (!ocs_list_empty(&hw->cmd_head)) {
9718 uint8_t mqe[SLI4_BMBX_SIZE] = { 0 };
9719 ocs_command_ctx_t *ctx = ocs_list_get_head(&hw->cmd_head);
9721 ocs_log_test(hw->os, "hung command %08x\n",
9722 NULL == ctx ? UINT32_MAX :
9723 (NULL == ctx->buf ? UINT32_MAX : *((uint32_t *)ctx->buf)));
9724 ocs_unlock(&hw->cmd_lock);
9725 ocs_hw_command_process(hw, -1/*Bad status*/, mqe, SLI4_BMBX_SIZE);
9726 ocs_lock(&hw->cmd_lock);
9729 ocs_unlock(&hw->cmd_lock);
9735 * @brief Find IO given indicator (xri).
9737 * @param hw Hal context.
9738 * @param indicator Indicator (xri) to look for.
9740 * @return Returns io if found, NULL otherwise.
9743 ocs_hw_io_lookup(ocs_hw_t *hw, uint32_t xri)
9746 ioindex = xri - hw->sli.config.extent[SLI_RSRC_FCOE_XRI].base[0];
9747 return hw->io[ioindex];
9751 * @brief Issue any pending callbacks for an IO and remove off the timer and pending lists.
9753 * @param hw Hal context.
9754 * @param io Pointer to the IO to cleanup.
9757 ocs_hw_io_cancel_cleanup(ocs_hw_t *hw, ocs_hw_io_t *io)
9759 ocs_hw_done_t done = io->done;
9760 ocs_hw_done_t abort_done = io->abort_done;
9762 /* first check active_wqe list and remove if there */
9763 if (ocs_list_on_list(&io->wqe_link)) {
9764 ocs_list_remove(&hw->io_timed_wqe, io);
9767 /* Remove from WQ pending list */
9768 if ((io->wq != NULL) && ocs_list_on_list(&io->wq->pending_list)) {
9769 ocs_list_remove(&io->wq->pending_list, io);
9773 void *arg = io->arg;
9776 ocs_unlock(&hw->io_lock);
9777 done(io, io->rnode, 0, SLI4_FC_WCQE_STATUS_SHUTDOWN, 0, arg);
9778 ocs_lock(&hw->io_lock);
9781 if (io->abort_done != NULL) {
9782 void *abort_arg = io->abort_arg;
9784 io->abort_done = NULL;
9785 ocs_unlock(&hw->io_lock);
9786 abort_done(io, io->rnode, 0, SLI4_FC_WCQE_STATUS_SHUTDOWN, 0, abort_arg);
9787 ocs_lock(&hw->io_lock);
9792 ocs_hw_io_cancel(ocs_hw_t *hw)
9794 ocs_hw_io_t *io = NULL;
9795 ocs_hw_io_t *tmp_io = NULL;
9796 uint32_t iters = 100; /* One second limit */
9799 * Manually clean up outstanding IO.
9800 * Only walk through list once: the backend will cleanup any IOs when done/abort_done is called.
9802 ocs_lock(&hw->io_lock);
9803 ocs_list_foreach_safe(&hw->io_inuse, io, tmp_io) {
9804 ocs_hw_done_t done = io->done;
9805 ocs_hw_done_t abort_done = io->abort_done;
9807 ocs_hw_io_cancel_cleanup(hw, io);
9810 * Since this is called in a reset/shutdown
9811 * case, If there is no callback, then just
9814 * Note: A port owned XRI cannot be on
9815 * the in use list. We cannot call
9816 * ocs_hw_io_free() because we already
9820 abort_done == NULL) {
9822 * Since this is called in a reset/shutdown
9823 * case, If there is no callback, then just
9826 ocs_hw_io_free_common(hw, io);
9827 ocs_list_remove(&hw->io_inuse, io);
9828 ocs_hw_io_free_move_correct_list(hw, io);
9833 * For port owned XRIs, they are not on the in use list, so
9834 * walk though XRIs and issue any callbacks.
9836 ocs_list_foreach_safe(&hw->io_port_owned, io, tmp_io) {
9837 /* check list and remove if there */
9838 if (ocs_list_on_list(&io->dnrx_link)) {
9839 ocs_list_remove(&hw->io_port_dnrx, io);
9840 ocs_ref_put(&io->ref); /* ocs_ref_get(): same function */
9842 ocs_hw_io_cancel_cleanup(hw, io);
9843 ocs_list_remove(&hw->io_port_owned, io);
9844 ocs_hw_io_free_common(hw, io);
9846 ocs_unlock(&hw->io_lock);
9848 /* Give time for the callbacks to complete */
9852 } while (!ocs_list_empty(&hw->io_inuse) && iters);
9854 /* Leave a breadcrumb that cleanup is not yet complete. */
9855 if (!ocs_list_empty(&hw->io_inuse)) {
9856 ocs_log_test(hw->os, "io_inuse list is not empty\n");
9863 ocs_hw_io_ini_sge(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_dma_t *cmnd, uint32_t cmnd_size,
9866 sli4_sge_t *data = NULL;
9869 ocs_log_err(NULL, "bad parm hw=%p io=%p\n", hw, io);
9870 return OCS_HW_RTN_ERROR;
9873 data = io->def_sgl.virt;
9875 /* setup command pointer */
9876 data->buffer_address_high = ocs_addr32_hi(cmnd->phys);
9877 data->buffer_address_low = ocs_addr32_lo(cmnd->phys);
9878 data->buffer_length = cmnd_size;
9881 /* setup response pointer */
9882 data->buffer_address_high = ocs_addr32_hi(rsp->phys);
9883 data->buffer_address_low = ocs_addr32_lo(rsp->phys);
9884 data->buffer_length = rsp->size;
9890 __ocs_read_topology_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
9892 sli4_cmd_read_topology_t *read_topo = (sli4_cmd_read_topology_t *)mqe;
9894 if (status || read_topo->hdr.status) {
9895 ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n",
9896 status, read_topo->hdr.status);
9897 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
9901 switch (read_topo->attention_type) {
9902 case SLI4_READ_TOPOLOGY_LINK_UP:
9903 hw->link.status = SLI_LINK_STATUS_UP;
9905 case SLI4_READ_TOPOLOGY_LINK_DOWN:
9906 hw->link.status = SLI_LINK_STATUS_DOWN;
9908 case SLI4_READ_TOPOLOGY_LINK_NO_ALPA:
9909 hw->link.status = SLI_LINK_STATUS_NO_ALPA;
9912 hw->link.status = SLI_LINK_STATUS_MAX;
9916 switch (read_topo->topology) {
9917 case SLI4_READ_TOPOLOGY_NPORT:
9918 hw->link.topology = SLI_LINK_TOPO_NPORT;
9920 case SLI4_READ_TOPOLOGY_FC_AL:
9921 hw->link.topology = SLI_LINK_TOPO_LOOP;
9922 if (SLI_LINK_STATUS_UP == hw->link.status) {
9923 hw->link.loop_map = hw->loop_map.virt;
9925 hw->link.fc_id = read_topo->acquired_al_pa;
9928 hw->link.topology = SLI_LINK_TOPO_MAX;
9932 hw->link.medium = SLI_LINK_MEDIUM_FC;
9934 switch (read_topo->link_current.link_speed) {
9935 case SLI4_READ_TOPOLOGY_SPEED_1G:
9936 hw->link.speed = 1 * 1000;
9938 case SLI4_READ_TOPOLOGY_SPEED_2G:
9939 hw->link.speed = 2 * 1000;
9941 case SLI4_READ_TOPOLOGY_SPEED_4G:
9942 hw->link.speed = 4 * 1000;
9944 case SLI4_READ_TOPOLOGY_SPEED_8G:
9945 hw->link.speed = 8 * 1000;
9947 case SLI4_READ_TOPOLOGY_SPEED_16G:
9948 hw->link.speed = 16 * 1000;
9949 hw->link.loop_map = NULL;
9951 case SLI4_READ_TOPOLOGY_SPEED_32G:
9952 hw->link.speed = 32 * 1000;
9953 hw->link.loop_map = NULL;
9957 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
9959 ocs_hw_read_fcf(hw, SLI4_FCOE_FCF_TABLE_FIRST);
9965 __ocs_hw_port_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
9967 ocs_sli_port_t *sport = ctx->app;
9968 ocs_hw_t *hw = sport->hw;
9977 case OCS_EVT_HW_PORT_REQ_FREE:
9978 case OCS_EVT_HW_PORT_REQ_ATTACH:
9980 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
9984 ocs_log_test(hw->os, "%s %-20s not handled\n", funcname, ocs_sm_event_name(evt));
9992 __ocs_hw_port_free_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
9994 ocs_sli_port_t *sport = ctx->app;
9995 ocs_hw_t *hw = sport->hw;
10000 case OCS_EVT_ENTER:
10001 if (data != NULL) {
10002 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10004 if (hw->callback.port != NULL) {
10005 hw->callback.port(hw->args.port,
10006 OCS_HW_PORT_FREE_FAIL, sport);
10017 __ocs_hw_port_freed(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10019 ocs_sli_port_t *sport = ctx->app;
10020 ocs_hw_t *hw = sport->hw;
10025 case OCS_EVT_ENTER:
10026 /* free SLI resource */
10027 if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator)) {
10028 ocs_log_err(hw->os, "FCOE_VPI free failure addr=%#x\n", sport->fc_id);
10031 /* free mailbox buffer */
10032 if (data != NULL) {
10033 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10035 if (hw->callback.port != NULL) {
10036 hw->callback.port(hw->args.port,
10037 OCS_HW_PORT_FREE_OK, sport);
10048 __ocs_hw_port_attach_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10050 ocs_sli_port_t *sport = ctx->app;
10051 ocs_hw_t *hw = sport->hw;
10056 case OCS_EVT_ENTER:
10057 /* free SLI resource */
10058 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator);
10060 /* free mailbox buffer */
10061 if (data != NULL) {
10062 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10065 if (hw->callback.port != NULL) {
10066 hw->callback.port(hw->args.port,
10067 OCS_HW_PORT_ATTACH_FAIL, sport);
10069 if (sport->sm_free_req_pending) {
10070 ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
10074 __ocs_hw_port_common(__func__, ctx, evt, data);
10082 __ocs_hw_port_free_unreg_vpi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10084 ocs_sli_port_t *sport = ctx->app;
10085 ocs_hw_t *hw = sport->hw;
10086 uint8_t *cmd = NULL;
10091 case OCS_EVT_ENTER:
10092 /* allocate memory and send unreg_vpi */
10093 cmd = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
10095 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10099 if (0 == sli_cmd_unreg_vpi(&hw->sli, cmd, SLI4_BMBX_SIZE, sport->indicator,
10100 SLI4_UNREG_TYPE_PORT)) {
10101 ocs_log_err(hw->os, "UNREG_VPI format failure\n");
10102 ocs_free(hw->os, cmd, SLI4_BMBX_SIZE);
10103 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10107 if (ocs_hw_command(hw, cmd, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) {
10108 ocs_log_err(hw->os, "UNREG_VPI command failure\n");
10109 ocs_free(hw->os, cmd, SLI4_BMBX_SIZE);
10110 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10114 case OCS_EVT_RESPONSE:
10115 ocs_sm_transition(ctx, __ocs_hw_port_freed, data);
10117 case OCS_EVT_ERROR:
10118 ocs_sm_transition(ctx, __ocs_hw_port_free_report_fail, data);
10121 __ocs_hw_port_common(__func__, ctx, evt, data);
10129 __ocs_hw_port_free_nop(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10131 ocs_sli_port_t *sport = ctx->app;
10132 ocs_hw_t *hw = sport->hw;
10137 case OCS_EVT_ENTER:
10138 /* Forward to execute in mailbox completion processing context */
10139 if (ocs_hw_async_call(hw, __ocs_hw_port_realloc_cb, sport)) {
10140 ocs_log_err(hw->os, "ocs_hw_async_call failed\n");
10143 case OCS_EVT_RESPONSE:
10144 ocs_sm_transition(ctx, __ocs_hw_port_freed, data);
10146 case OCS_EVT_ERROR:
10147 ocs_sm_transition(ctx, __ocs_hw_port_free_report_fail, data);
10157 __ocs_hw_port_attached(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10159 ocs_sli_port_t *sport = ctx->app;
10160 ocs_hw_t *hw = sport->hw;
10165 case OCS_EVT_ENTER:
10166 if (data != NULL) {
10167 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10169 if (hw->callback.port != NULL) {
10170 hw->callback.port(hw->args.port,
10171 OCS_HW_PORT_ATTACH_OK, sport);
10173 if (sport->sm_free_req_pending) {
10174 ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
10177 case OCS_EVT_HW_PORT_REQ_FREE:
10178 /* virtual/physical port request free */
10179 ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
10182 __ocs_hw_port_common(__func__, ctx, evt, data);
10190 __ocs_hw_port_attach_reg_vpi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10192 ocs_sli_port_t *sport = ctx->app;
10193 ocs_hw_t *hw = sport->hw;
10198 case OCS_EVT_ENTER:
10199 if (0 == sli_cmd_reg_vpi(&hw->sli, data, SLI4_BMBX_SIZE, sport, FALSE)) {
10200 ocs_log_err(hw->os, "REG_VPI format failure\n");
10201 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10205 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) {
10206 ocs_log_err(hw->os, "REG_VPI command failure\n");
10207 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10211 case OCS_EVT_RESPONSE:
10212 ocs_sm_transition(ctx, __ocs_hw_port_attached, data);
10214 case OCS_EVT_ERROR:
10215 ocs_sm_transition(ctx, __ocs_hw_port_attach_report_fail, data);
10217 case OCS_EVT_HW_PORT_REQ_FREE:
10218 /* Wait for attach response and then free */
10219 sport->sm_free_req_pending = 1;
10222 __ocs_hw_port_common(__func__, ctx, evt, data);
10230 __ocs_hw_port_done(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10232 ocs_sli_port_t *sport = ctx->app;
10233 ocs_hw_t *hw = sport->hw;
10238 case OCS_EVT_ENTER:
10239 /* free SLI resource */
10240 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator);
10242 /* free mailbox buffer */
10243 if (data != NULL) {
10244 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10248 __ocs_hw_port_common(__func__, ctx, evt, data);
10256 __ocs_hw_port_allocated(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10258 ocs_sli_port_t *sport = ctx->app;
10259 ocs_hw_t *hw = sport->hw;
10264 case OCS_EVT_ENTER:
10265 if (data != NULL) {
10266 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10268 if (hw->callback.port != NULL) {
10269 hw->callback.port(hw->args.port,
10270 OCS_HW_PORT_ALLOC_OK, sport);
10272 /* If there is a pending free request, then handle it now */
10273 if (sport->sm_free_req_pending) {
10274 ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
10277 case OCS_EVT_HW_PORT_REQ_ATTACH:
10278 /* virtual port requests attach */
10279 ocs_sm_transition(ctx, __ocs_hw_port_attach_reg_vpi, data);
10281 case OCS_EVT_HW_PORT_ATTACH_OK:
10282 /* physical port attached (as part of attaching domain) */
10283 ocs_sm_transition(ctx, __ocs_hw_port_attached, data);
10285 case OCS_EVT_HW_PORT_REQ_FREE:
10286 /* virtual port request free */
10287 if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) {
10288 ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
10291 * Note: BE3/Skyhawk will respond with a status of 0x20
10292 * unless the reg_vpi has been issued, so we can
10293 * skip the unreg_vpi for these adapters.
10295 * Send a nop to make sure that free doesn't occur in
10298 ocs_sm_transition(ctx, __ocs_hw_port_free_nop, NULL);
10302 __ocs_hw_port_common(__func__, ctx, evt, data);
10310 __ocs_hw_port_alloc_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10312 ocs_sli_port_t *sport = ctx->app;
10313 ocs_hw_t *hw = sport->hw;
10318 case OCS_EVT_ENTER:
10319 /* free SLI resource */
10320 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator);
10322 /* free mailbox buffer */
10323 if (data != NULL) {
10324 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10327 if (hw->callback.port != NULL) {
10328 hw->callback.port(hw->args.port,
10329 OCS_HW_PORT_ALLOC_FAIL, sport);
10332 /* If there is a pending free request, then handle it now */
10333 if (sport->sm_free_req_pending) {
10334 ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
10338 __ocs_hw_port_common(__func__, ctx, evt, data);
10346 __ocs_hw_port_alloc_read_sparm64(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10348 ocs_sli_port_t *sport = ctx->app;
10349 ocs_hw_t *hw = sport->hw;
10350 uint8_t *payload = NULL;
10355 case OCS_EVT_ENTER:
10356 /* allocate memory for the service parameters */
10357 if (ocs_dma_alloc(hw->os, &sport->dma, 112, 4)) {
10358 ocs_log_err(hw->os, "Failed to allocate DMA memory\n");
10359 ocs_sm_transition(ctx, __ocs_hw_port_done, data);
10363 if (0 == sli_cmd_read_sparm64(&hw->sli, data, SLI4_BMBX_SIZE,
10364 &sport->dma, sport->indicator)) {
10365 ocs_log_err(hw->os, "READ_SPARM64 allocation failure\n");
10366 ocs_dma_free(hw->os, &sport->dma);
10367 ocs_sm_transition(ctx, __ocs_hw_port_done, data);
10371 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) {
10372 ocs_log_err(hw->os, "READ_SPARM64 command failure\n");
10373 ocs_dma_free(hw->os, &sport->dma);
10374 ocs_sm_transition(ctx, __ocs_hw_port_done, data);
10378 case OCS_EVT_RESPONSE:
10379 payload = sport->dma.virt;
10381 ocs_display_sparams(sport->display_name, "sport sparm64", 0, NULL, payload);
10383 ocs_memcpy(&sport->sli_wwpn, payload + SLI4_READ_SPARM64_WWPN_OFFSET,
10384 sizeof(sport->sli_wwpn));
10385 ocs_memcpy(&sport->sli_wwnn, payload + SLI4_READ_SPARM64_WWNN_OFFSET,
10386 sizeof(sport->sli_wwnn));
10388 ocs_dma_free(hw->os, &sport->dma);
10389 ocs_sm_transition(ctx, __ocs_hw_port_alloc_init_vpi, data);
10391 case OCS_EVT_ERROR:
10392 ocs_dma_free(hw->os, &sport->dma);
10393 ocs_sm_transition(ctx, __ocs_hw_port_alloc_report_fail, data);
10395 case OCS_EVT_HW_PORT_REQ_FREE:
10396 /* Wait for attach response and then free */
10397 sport->sm_free_req_pending = 1;
10402 __ocs_hw_port_common(__func__, ctx, evt, data);
10410 __ocs_hw_port_alloc_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10412 ocs_sli_port_t *sport = ctx->app;
10417 case OCS_EVT_ENTER:
10420 case OCS_EVT_HW_PORT_ALLOC_OK:
10421 ocs_sm_transition(ctx, __ocs_hw_port_allocated, NULL);
10423 case OCS_EVT_HW_PORT_ALLOC_FAIL:
10424 ocs_sm_transition(ctx, __ocs_hw_port_alloc_report_fail, NULL);
10426 case OCS_EVT_HW_PORT_REQ_FREE:
10427 /* Wait for attach response and then free */
10428 sport->sm_free_req_pending = 1;
10431 __ocs_hw_port_common(__func__, ctx, evt, data);
10439 __ocs_hw_port_alloc_init_vpi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10441 ocs_sli_port_t *sport = ctx->app;
10442 ocs_hw_t *hw = sport->hw;
10447 case OCS_EVT_ENTER:
10448 /* If there is a pending free request, then handle it now */
10449 if (sport->sm_free_req_pending) {
10450 ocs_sm_transition(ctx, __ocs_hw_port_freed, NULL);
10454 /* TODO XXX transitioning to done only works if this is called
10455 * directly from ocs_hw_port_alloc BUT not if called from
10456 * read_sparm64. In the later case, we actually want to go
10457 * through report_ok/fail
10459 if (0 == sli_cmd_init_vpi(&hw->sli, data, SLI4_BMBX_SIZE,
10460 sport->indicator, sport->domain->indicator)) {
10461 ocs_log_err(hw->os, "INIT_VPI allocation failure\n");
10462 ocs_sm_transition(ctx, __ocs_hw_port_done, data);
10466 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) {
10467 ocs_log_err(hw->os, "INIT_VPI command failure\n");
10468 ocs_sm_transition(ctx, __ocs_hw_port_done, data);
10472 case OCS_EVT_RESPONSE:
10473 ocs_sm_transition(ctx, __ocs_hw_port_allocated, data);
10475 case OCS_EVT_ERROR:
10476 ocs_sm_transition(ctx, __ocs_hw_port_alloc_report_fail, data);
10478 case OCS_EVT_HW_PORT_REQ_FREE:
10479 /* Wait for attach response and then free */
10480 sport->sm_free_req_pending = 1;
10485 __ocs_hw_port_common(__func__, ctx, evt, data);
10493 __ocs_hw_port_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
10495 ocs_sli_port_t *sport = arg;
10496 sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe;
10497 ocs_sm_event_t evt;
10499 if (status || hdr->status) {
10500 ocs_log_debug(hw->os, "bad status vpi=%#x st=%x hdr=%x\n",
10501 sport->indicator, status, hdr->status);
10502 evt = OCS_EVT_ERROR;
10504 evt = OCS_EVT_RESPONSE;
10507 ocs_sm_post_event(&sport->ctx, evt, mqe);
10513 __ocs_hw_port_realloc_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
10515 ocs_sli_port_t *sport = arg;
10516 sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe;
10517 ocs_sm_event_t evt;
10520 if (status || hdr->status) {
10521 ocs_log_debug(hw->os, "bad status vpi=%#x st=%x hdr=%x\n",
10522 sport->indicator, status, hdr->status);
10523 evt = OCS_EVT_ERROR;
10525 evt = OCS_EVT_RESPONSE;
10529 * In this case we have to malloc a mailbox command buffer, as it is reused
10530 * in the state machine post event call, and eventually freed
10532 mqecpy = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
10533 if (mqecpy == NULL) {
10534 ocs_log_err(hw->os, "malloc mqecpy failed\n");
10537 ocs_memcpy(mqecpy, mqe, SLI4_BMBX_SIZE);
10539 ocs_sm_post_event(&sport->ctx, evt, mqecpy);
10544 /***************************************************************************
10545 * Domain state machine
10549 __ocs_hw_domain_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10551 ocs_domain_t *domain = ctx->app;
10552 ocs_hw_t *hw = domain->hw;
10562 ocs_log_test(hw->os, "%s %-20s not handled\n", funcname, ocs_sm_event_name(evt));
10570 __ocs_hw_domain_alloc_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10572 ocs_domain_t *domain = ctx->app;
10573 ocs_hw_t *hw = domain->hw;
10578 case OCS_EVT_ENTER:
10579 /* free command buffer */
10580 if (data != NULL) {
10581 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10583 /* free SLI resources */
10584 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI, domain->indicator);
10585 /* TODO how to free FCFI (or do we at all)? */
10587 if (hw->callback.domain != NULL) {
10588 hw->callback.domain(hw->args.domain,
10589 OCS_HW_DOMAIN_ALLOC_FAIL,
10594 __ocs_hw_domain_common(__func__, ctx, evt, data);
10602 __ocs_hw_domain_attached(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10604 ocs_domain_t *domain = ctx->app;
10605 ocs_hw_t *hw = domain->hw;
10610 case OCS_EVT_ENTER:
10611 /* free mailbox buffer and send alloc ok to physical sport */
10612 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10613 ocs_sm_post_event(&domain->sport->ctx, OCS_EVT_HW_PORT_ATTACH_OK, NULL);
10615 /* now inform registered callbacks */
10616 if (hw->callback.domain != NULL) {
10617 hw->callback.domain(hw->args.domain,
10618 OCS_HW_DOMAIN_ATTACH_OK,
10622 case OCS_EVT_HW_DOMAIN_REQ_FREE:
10623 ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_vfi, NULL);
10626 __ocs_hw_domain_common(__func__, ctx, evt, data);
10634 __ocs_hw_domain_attach_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10636 ocs_domain_t *domain = ctx->app;
10637 ocs_hw_t *hw = domain->hw;
10642 case OCS_EVT_ENTER:
10643 if (data != NULL) {
10644 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10646 /* free SLI resources */
10647 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI, domain->indicator);
10648 /* TODO how to free FCFI (or do we at all)? */
10650 if (hw->callback.domain != NULL) {
10651 hw->callback.domain(hw->args.domain,
10652 OCS_HW_DOMAIN_ATTACH_FAIL,
10659 __ocs_hw_domain_common(__func__, ctx, evt, data);
10667 __ocs_hw_domain_attach_reg_vfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10669 ocs_domain_t *domain = ctx->app;
10670 ocs_hw_t *hw = domain->hw;
10675 case OCS_EVT_ENTER:
10677 ocs_display_sparams("", "reg vpi", 0, NULL, domain->dma.virt);
10679 if (0 == sli_cmd_reg_vfi(&hw->sli, data, SLI4_BMBX_SIZE, domain)) {
10680 ocs_log_err(hw->os, "REG_VFI format failure\n");
10681 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10685 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
10686 ocs_log_err(hw->os, "REG_VFI command failure\n");
10687 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10691 case OCS_EVT_RESPONSE:
10692 ocs_sm_transition(ctx, __ocs_hw_domain_attached, data);
10694 case OCS_EVT_ERROR:
10695 ocs_sm_transition(ctx, __ocs_hw_domain_attach_report_fail, data);
10698 __ocs_hw_domain_common(__func__, ctx, evt, data);
10706 __ocs_hw_domain_allocated(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10708 ocs_domain_t *domain = ctx->app;
10709 ocs_hw_t *hw = domain->hw;
10714 case OCS_EVT_ENTER:
10715 /* free mailbox buffer and send alloc ok to physical sport */
10716 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10717 ocs_sm_post_event(&domain->sport->ctx, OCS_EVT_HW_PORT_ALLOC_OK, NULL);
10719 ocs_hw_domain_add(hw, domain);
10721 /* now inform registered callbacks */
10722 if (hw->callback.domain != NULL) {
10723 hw->callback.domain(hw->args.domain,
10724 OCS_HW_DOMAIN_ALLOC_OK,
10728 case OCS_EVT_HW_DOMAIN_REQ_ATTACH:
10729 ocs_sm_transition(ctx, __ocs_hw_domain_attach_reg_vfi, data);
10731 case OCS_EVT_HW_DOMAIN_REQ_FREE:
10732 /* unreg_fcfi/vfi */
10733 if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) {
10734 ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_fcfi, NULL);
10736 ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_vfi, NULL);
10740 __ocs_hw_domain_common(__func__, ctx, evt, data);
10748 __ocs_hw_domain_alloc_read_sparm64(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10750 ocs_domain_t *domain = ctx->app;
10751 ocs_hw_t *hw = domain->hw;
10756 case OCS_EVT_ENTER:
10757 if (0 == sli_cmd_read_sparm64(&hw->sli, data, SLI4_BMBX_SIZE,
10758 &domain->dma, SLI4_READ_SPARM64_VPI_DEFAULT)) {
10759 ocs_log_err(hw->os, "READ_SPARM64 format failure\n");
10760 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10764 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
10765 ocs_log_err(hw->os, "READ_SPARM64 command failure\n");
10766 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10772 case OCS_EVT_RESPONSE:
10773 ocs_display_sparams(domain->display_name, "domain sparm64", 0, NULL, domain->dma.virt);
10775 ocs_sm_transition(ctx, __ocs_hw_domain_allocated, data);
10777 case OCS_EVT_ERROR:
10778 ocs_sm_transition(ctx, __ocs_hw_domain_alloc_report_fail, data);
10781 __ocs_hw_domain_common(__func__, ctx, evt, data);
10789 __ocs_hw_domain_alloc_init_vfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10791 ocs_domain_t *domain = ctx->app;
10792 ocs_sli_port_t *sport = domain->sport;
10793 ocs_hw_t *hw = domain->hw;
10798 case OCS_EVT_ENTER:
10799 if (0 == sli_cmd_init_vfi(&hw->sli, data, SLI4_BMBX_SIZE, domain->indicator,
10800 domain->fcf_indicator, sport->indicator)) {
10801 ocs_log_err(hw->os, "INIT_VFI format failure\n");
10802 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10805 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
10806 ocs_log_err(hw->os, "INIT_VFI command failure\n");
10807 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10813 case OCS_EVT_RESPONSE:
10814 ocs_sm_transition(ctx, __ocs_hw_domain_alloc_read_sparm64, data);
10816 case OCS_EVT_ERROR:
10817 ocs_sm_transition(ctx, __ocs_hw_domain_alloc_report_fail, data);
10820 __ocs_hw_domain_common(__func__, ctx, evt, data);
10828 __ocs_hw_domain_alloc_reg_fcfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10830 ocs_domain_t *domain = ctx->app;
10831 ocs_hw_t *hw = domain->hw;
10836 case OCS_EVT_ENTER: {
10837 sli4_cmd_rq_cfg_t rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG];
10840 /* Set the filter match/mask values from hw's filter_def values */
10841 for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
10842 rq_cfg[i].rq_id = 0xffff;
10843 rq_cfg[i].r_ctl_mask = (uint8_t) hw->config.filter_def[i];
10844 rq_cfg[i].r_ctl_match = (uint8_t) (hw->config.filter_def[i] >> 8);
10845 rq_cfg[i].type_mask = (uint8_t) (hw->config.filter_def[i] >> 16);
10846 rq_cfg[i].type_match = (uint8_t) (hw->config.filter_def[i] >> 24);
10849 /* Set the rq_id for each, in order of RQ definition */
10850 for (i = 0; i < hw->hw_rq_count; i++) {
10851 if (i >= ARRAY_SIZE(rq_cfg)) {
10852 ocs_log_warn(hw->os, "more RQs than REG_FCFI filter entries\n");
10855 rq_cfg[i].rq_id = hw->hw_rq[i]->hdr->id;
10859 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10863 if (hw->hw_mrq_count) {
10864 if (OCS_HW_RTN_SUCCESS != ocs_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_FCFI_MODE,
10865 domain->vlan_id, domain->fcf)) {
10866 ocs_log_err(hw->os, "REG_FCFI_MRQ format failure\n");
10867 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10872 if (0 == sli_cmd_reg_fcfi(&hw->sli, data, SLI4_BMBX_SIZE, domain->fcf,
10873 rq_cfg, domain->vlan_id)) {
10874 ocs_log_err(hw->os, "REG_FCFI format failure\n");
10875 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10880 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
10881 ocs_log_err(hw->os, "REG_FCFI command failure\n");
10882 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10889 case OCS_EVT_RESPONSE:
10891 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10895 domain->fcf_indicator = ((sli4_cmd_reg_fcfi_t *)data)->fcfi;
10898 * IF_TYPE 0 devices do not support explicit VFI and VPI initialization
10899 * and instead rely on implicit initialization during VFI registration.
10900 * Short circuit normal processing here for those devices.
10902 if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) {
10903 ocs_sm_transition(ctx, __ocs_hw_domain_alloc_read_sparm64, data);
10905 ocs_sm_transition(ctx, __ocs_hw_domain_alloc_init_vfi, data);
10908 case OCS_EVT_ERROR:
10909 ocs_sm_transition(ctx, __ocs_hw_domain_alloc_report_fail, data);
10912 __ocs_hw_domain_common(__func__, ctx, evt, data);
10920 __ocs_hw_domain_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10922 ocs_domain_t *domain = ctx->app;
10923 ocs_hw_t *hw = domain->hw;
10928 case OCS_EVT_ENTER:
10929 if (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC) {
10931 * For FC, the HW alread registered a FCFI
10932 * Copy FCF information into the domain and jump to INIT_VFI
10934 domain->fcf_indicator = hw->fcf_indicator;
10935 ocs_sm_transition(&domain->sm, __ocs_hw_domain_alloc_init_vfi, data);
10937 ocs_sm_transition(&domain->sm, __ocs_hw_domain_alloc_reg_fcfi, data);
10941 __ocs_hw_domain_common(__func__, ctx, evt, data);
10949 __ocs_hw_domain_free_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10951 ocs_domain_t *domain = ctx->app;
10956 case OCS_EVT_ENTER:
10957 if (domain != NULL) {
10958 ocs_hw_t *hw = domain->hw;
10960 ocs_hw_domain_del(hw, domain);
10962 if (hw->callback.domain != NULL) {
10963 hw->callback.domain(hw->args.domain,
10964 OCS_HW_DOMAIN_FREE_FAIL,
10969 /* free command buffer */
10970 if (data != NULL) {
10971 ocs_free(domain != NULL ? domain->hw->os : NULL, data, SLI4_BMBX_SIZE);
10977 __ocs_hw_domain_common(__func__, ctx, evt, data);
10985 __ocs_hw_domain_freed(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10987 ocs_domain_t *domain = ctx->app;
10992 case OCS_EVT_ENTER:
10993 /* Free DMA and mailbox buffer */
10994 if (domain != NULL) {
10995 ocs_hw_t *hw = domain->hw;
10997 /* free VFI resource */
10998 sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI,
10999 domain->indicator);
11001 ocs_hw_domain_del(hw, domain);
11003 /* inform registered callbacks */
11004 if (hw->callback.domain != NULL) {
11005 hw->callback.domain(hw->args.domain,
11006 OCS_HW_DOMAIN_FREE_OK,
11010 if (data != NULL) {
11011 ocs_free(NULL, data, SLI4_BMBX_SIZE);
11017 __ocs_hw_domain_common(__func__, ctx, evt, data);
11026 __ocs_hw_domain_free_redisc_fcf(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
11028 ocs_domain_t *domain = ctx->app;
11029 ocs_hw_t *hw = domain->hw;
11034 case OCS_EVT_ENTER:
11035 /* if we're in the middle of a teardown, skip sending rediscover */
11036 if (hw->state == OCS_HW_STATE_TEARDOWN_IN_PROGRESS) {
11037 ocs_sm_transition(ctx, __ocs_hw_domain_freed, data);
11040 if (0 == sli_cmd_fcoe_rediscover_fcf(&hw->sli, data, SLI4_BMBX_SIZE, domain->fcf)) {
11041 ocs_log_err(hw->os, "REDISCOVER_FCF format failure\n");
11042 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11046 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
11047 ocs_log_err(hw->os, "REDISCOVER_FCF command failure\n");
11048 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11051 case OCS_EVT_RESPONSE:
11052 case OCS_EVT_ERROR:
11053 /* REDISCOVER_FCF can fail if none exist */
11054 ocs_sm_transition(ctx, __ocs_hw_domain_freed, data);
11059 __ocs_hw_domain_common(__func__, ctx, evt, data);
11067 __ocs_hw_domain_free_unreg_fcfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
11069 ocs_domain_t *domain = ctx->app;
11070 ocs_hw_t *hw = domain->hw;
11075 case OCS_EVT_ENTER:
11076 if (data == NULL) {
11077 data = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
11079 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11084 if (0 == sli_cmd_unreg_fcfi(&hw->sli, data, SLI4_BMBX_SIZE, domain->fcf_indicator)) {
11085 ocs_log_err(hw->os, "UNREG_FCFI format failure\n");
11086 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
11087 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11091 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
11092 ocs_log_err(hw->os, "UNREG_FCFI command failure\n");
11093 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
11094 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11098 case OCS_EVT_RESPONSE:
11099 if (domain->req_rediscover_fcf) {
11100 domain->req_rediscover_fcf = FALSE;
11101 ocs_sm_transition(ctx, __ocs_hw_domain_free_redisc_fcf, data);
11103 ocs_sm_transition(ctx, __ocs_hw_domain_freed, data);
11106 case OCS_EVT_ERROR:
11107 ocs_sm_transition(ctx, __ocs_hw_domain_free_report_fail, data);
11112 __ocs_hw_domain_common(__func__, ctx, evt, data);
11120 __ocs_hw_domain_free_unreg_vfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
11122 ocs_domain_t *domain = ctx->app;
11123 ocs_hw_t *hw = domain->hw;
11124 uint8_t is_fc = FALSE;
11128 is_fc = (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC);
11131 case OCS_EVT_ENTER:
11132 if (data == NULL) {
11133 data = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
11135 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11140 if (0 == sli_cmd_unreg_vfi(&hw->sli, data, SLI4_BMBX_SIZE, domain,
11141 SLI4_UNREG_TYPE_DOMAIN)) {
11142 ocs_log_err(hw->os, "UNREG_VFI format failure\n");
11143 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
11144 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11148 if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
11149 ocs_log_err(hw->os, "UNREG_VFI command failure\n");
11150 ocs_free(hw->os, data, SLI4_BMBX_SIZE);
11151 ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11155 case OCS_EVT_ERROR:
11157 ocs_sm_transition(ctx, __ocs_hw_domain_free_report_fail, data);
11159 ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_fcfi, data);
11162 case OCS_EVT_RESPONSE:
11164 ocs_sm_transition(ctx, __ocs_hw_domain_freed, data);
11166 ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_fcfi, data);
11170 __ocs_hw_domain_common(__func__, ctx, evt, data);
11177 /* callback for domain alloc/attach/free */
11179 __ocs_hw_domain_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
11181 ocs_domain_t *domain = arg;
11182 sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe;
11183 ocs_sm_event_t evt;
11185 if (status || hdr->status) {
11186 ocs_log_debug(hw->os, "bad status vfi=%#x st=%x hdr=%x\n",
11187 domain->indicator, status, hdr->status);
11188 evt = OCS_EVT_ERROR;
11190 evt = OCS_EVT_RESPONSE;
11193 ocs_sm_post_event(&domain->sm, evt, mqe);
11199 target_wqe_timer_nop_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
11201 ocs_hw_io_t *io = NULL;
11202 ocs_hw_io_t *io_next = NULL;
11203 uint64_t ticks_current = ocs_get_os_ticks();
11204 uint32_t sec_elapsed;
11206 sli4_mbox_command_header_t *hdr = (sli4_mbox_command_header_t *)mqe;
11208 if (status || hdr->status) {
11209 ocs_log_debug(hw->os, "bad status st=%x hdr=%x\n",
11210 status, hdr->status);
11211 /* go ahead and proceed with wqe timer checks... */
11214 /* loop through active WQE list and check for timeouts */
11215 ocs_lock(&hw->io_lock);
11216 ocs_list_foreach_safe(&hw->io_timed_wqe, io, io_next) {
11217 sec_elapsed = ((ticks_current - io->submit_ticks) / ocs_get_os_tick_freq());
11220 * If elapsed time > timeout, abort it. No need to check type since
11221 * it wouldn't be on this list unless it was a target WQE
11223 if (sec_elapsed > io->tgt_wqe_timeout) {
11224 ocs_log_test(hw->os, "IO timeout xri=0x%x tag=0x%x type=%d\n",
11225 io->indicator, io->reqtag, io->type);
11227 /* remove from active_wqe list so won't try to abort again */
11228 ocs_list_remove(&hw->io_timed_wqe, io);
11230 /* save status of "timed out" for when abort completes */
11231 io->status_saved = 1;
11232 io->saved_status = SLI4_FC_WCQE_STATUS_TARGET_WQE_TIMEOUT;
11236 /* now abort outstanding IO */
11237 ocs_hw_io_abort(hw, io, FALSE, NULL, NULL);
11240 * need to go through entire list since each IO could have a
11241 * different timeout value
11244 ocs_unlock(&hw->io_lock);
11246 /* if we're not in the middle of shutting down, schedule next timer */
11247 if (!hw->active_wqe_timer_shutdown) {
11248 ocs_setup_timer(hw->os, &hw->wqe_timer, target_wqe_timer_cb, hw, OCS_HW_WQ_TIMER_PERIOD_MS);
11250 hw->in_active_wqe_timer = FALSE;
11255 target_wqe_timer_cb(void *arg)
11257 ocs_hw_t *hw = (ocs_hw_t *)arg;
11259 /* delete existing timer; will kick off new timer after checking wqe timeouts */
11260 hw->in_active_wqe_timer = TRUE;
11261 ocs_del_timer(&hw->wqe_timer);
11263 /* Forward timer callback to execute in the mailbox completion processing context */
11264 if (ocs_hw_async_call(hw, target_wqe_timer_nop_cb, hw)) {
11265 ocs_log_test(hw->os, "ocs_hw_async_call failed\n");
11270 shutdown_target_wqe_timer(ocs_hw_t *hw)
11272 uint32_t iters = 100;
11274 if (hw->config.emulate_tgt_wqe_timeout) {
11275 /* request active wqe timer shutdown, then wait for it to complete */
11276 hw->active_wqe_timer_shutdown = TRUE;
11278 /* delete WQE timer and wait for timer handler to complete (if necessary) */
11279 ocs_del_timer(&hw->wqe_timer);
11281 /* now wait for timer handler to complete (if necessary) */
11282 while (hw->in_active_wqe_timer && iters) {
11284 * if we happen to have just sent NOP mailbox command, make sure
11285 * completions are being processed
11292 ocs_log_test(hw->os, "Failed to shutdown active wqe timer\n");
11298 * @brief Determine if HW IO is owned by the port.
11301 * Determines if the given HW IO has been posted to the chip.
11303 * @param hw Hardware context allocated by the caller.
11306 * @return Returns TRUE if given HW IO is port-owned.
11309 ocs_hw_is_io_port_owned(ocs_hw_t *hw, ocs_hw_io_t *io)
11311 /* Check to see if this is a port owned XRI */
11312 return io->is_port_owned;
11316 * @brief Return TRUE if exchange is port-owned.
11319 * Test to see if the xri is a port-owned xri.
11321 * @param hw Hardware context.
11322 * @param xri Exchange indicator.
11324 * @return Returns TRUE if XRI is a port owned XRI.
11328 ocs_hw_is_xri_port_owned(ocs_hw_t *hw, uint32_t xri)
11330 ocs_hw_io_t *io = ocs_hw_io_lookup(hw, xri);
11331 return (io == NULL ? FALSE : io->is_port_owned);
11335 * @brief Returns an XRI from the port owned list to the host.
11338 * Used when the POST_XRI command fails as well as when the RELEASE_XRI completes.
11340 * @param hw Hardware context.
11341 * @param xri_base The starting XRI number.
11342 * @param xri_count The number of XRIs to free from the base.
11345 ocs_hw_reclaim_xri(ocs_hw_t *hw, uint16_t xri_base, uint16_t xri_count)
11350 for (i = 0; i < xri_count; i++) {
11351 io = ocs_hw_io_lookup(hw, xri_base + i);
11354 * if this is an auto xfer rdy XRI, then we need to release any
11355 * buffer attached to the XRI before moving the XRI back to the free pool.
11357 if (hw->auto_xfer_rdy_enabled) {
11358 ocs_hw_rqpair_auto_xfer_rdy_move_to_host(hw, io);
11361 ocs_lock(&hw->io_lock);
11362 ocs_list_remove(&hw->io_port_owned, io);
11363 io->is_port_owned = 0;
11364 ocs_list_add_tail(&hw->io_free, io);
11365 ocs_unlock(&hw->io_lock);
11370 * @brief Called when the POST_XRI command completes.
11373 * Free the mailbox command buffer and reclaim the XRIs on failure.
11375 * @param hw Hardware context.
11376 * @param status Status field from the mbox completion.
11377 * @param mqe Mailbox response structure.
11378 * @param arg Pointer to a callback function that signals the caller that the command is done.
11380 * @return Returns 0.
11383 ocs_hw_cb_post_xri(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
11385 sli4_cmd_post_xri_t *post_xri = (sli4_cmd_post_xri_t*)mqe;
11387 /* Reclaim the XRIs as host owned if the command fails */
11389 ocs_log_debug(hw->os, "Status 0x%x for XRI base 0x%x, cnt =x%x\n",
11390 status, post_xri->xri_base, post_xri->xri_count);
11391 ocs_hw_reclaim_xri(hw, post_xri->xri_base, post_xri->xri_count);
11394 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
11399 * @brief Issues a mailbox command to move XRIs from the host-controlled pool to the port.
11401 * @param hw Hardware context.
11402 * @param xri_start The starting XRI to post.
11403 * @param num_to_post The number of XRIs to post.
11405 * @return Returns OCS_HW_RTN_NO_MEMORY, OCS_HW_RTN_ERROR, or OCS_HW_RTN_SUCCESS.
11408 static ocs_hw_rtn_e
11409 ocs_hw_post_xri(ocs_hw_t *hw, uint32_t xri_start, uint32_t num_to_post)
11412 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
11414 /* Since we need to allocate for mailbox queue, just always allocate */
11415 post_xri = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
11416 if (post_xri == NULL) {
11417 ocs_log_err(hw->os, "no buffer for command\n");
11418 return OCS_HW_RTN_NO_MEMORY;
11421 /* Register the XRIs */
11422 if (sli_cmd_post_xri(&hw->sli, post_xri, SLI4_BMBX_SIZE,
11423 xri_start, num_to_post)) {
11424 rc = ocs_hw_command(hw, post_xri, OCS_CMD_NOWAIT, ocs_hw_cb_post_xri, NULL);
11425 if (rc != OCS_HW_RTN_SUCCESS) {
11426 ocs_free(hw->os, post_xri, SLI4_BMBX_SIZE);
11427 ocs_log_err(hw->os, "post_xri failed\n");
11434 * @brief Move XRIs from the host-controlled pool to the port.
11437 * Removes IOs from the free list and moves them to the port.
11439 * @param hw Hardware context.
11440 * @param num_xri The number of XRIs being requested to move to the chip.
11442 * @return Returns the number of XRIs that were moved.
11446 ocs_hw_xri_move_to_port_owned(ocs_hw_t *hw, uint32_t num_xri)
11450 uint32_t num_posted = 0;
11453 * Note: We cannot use ocs_hw_io_alloc() because that would place the
11454 * IO on the io_inuse list. We need to move from the io_free to
11455 * the io_port_owned list.
11457 ocs_lock(&hw->io_lock);
11459 for (i = 0; i < num_xri; i++) {
11461 if (NULL != (io = ocs_list_remove_head(&hw->io_free))) {
11465 * if this is an auto xfer rdy XRI, then we need to attach a
11466 * buffer to the XRI before submitting it to the chip. If a
11467 * buffer is unavailable, then we cannot post it, so return it
11468 * to the free pool.
11470 if (hw->auto_xfer_rdy_enabled) {
11471 /* Note: uses the IO lock to get the auto xfer rdy buffer */
11472 ocs_unlock(&hw->io_lock);
11473 rc = ocs_hw_rqpair_auto_xfer_rdy_move_to_port(hw, io);
11474 ocs_lock(&hw->io_lock);
11475 if (rc != OCS_HW_RTN_SUCCESS) {
11476 ocs_list_add_head(&hw->io_free, io);
11480 ocs_lock_init(hw->os, &io->axr_lock, "HW_axr_lock[%d]", io->indicator);
11481 io->is_port_owned = 1;
11482 ocs_list_add_tail(&hw->io_port_owned, io);
11485 if (ocs_hw_post_xri(hw, io->indicator, 1) != OCS_HW_RTN_SUCCESS ) {
11486 ocs_hw_reclaim_xri(hw, io->indicator, i);
11491 /* no more free XRIs */
11495 ocs_unlock(&hw->io_lock);
11501 * @brief Called when the RELEASE_XRI command completes.
11504 * Move the IOs back to the free pool on success.
11506 * @param hw Hardware context.
11507 * @param status Status field from the mbox completion.
11508 * @param mqe Mailbox response structure.
11509 * @param arg Pointer to a callback function that signals the caller that the command is done.
11511 * @return Returns 0.
11514 ocs_hw_cb_release_xri(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
11516 sli4_cmd_release_xri_t *release_xri = (sli4_cmd_release_xri_t*)mqe;
11519 /* Reclaim the XRIs as host owned if the command fails */
11521 ocs_log_err(hw->os, "Status 0x%x\n", status);
11523 for (i = 0; i < release_xri->released_xri_count; i++) {
11524 uint16_t xri = ((i & 1) == 0 ? release_xri->xri_tbl[i/2].xri_tag0 :
11525 release_xri->xri_tbl[i/2].xri_tag1);
11526 ocs_hw_reclaim_xri(hw, xri, 1);
11530 ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
11535 * @brief Move XRIs from the port-controlled pool to the host.
11537 * Requests XRIs from the FW to return to the host-owned pool.
11539 * @param hw Hardware context.
11540 * @param num_xri The number of XRIs being requested to moved from the chip.
11542 * @return Returns 0 for success, or a negative error code value for failure.
11546 ocs_hw_xri_move_to_host_owned(ocs_hw_t *hw, uint8_t num_xri)
11548 uint8_t *release_xri;
11549 ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
11551 /* non-local buffer required for mailbox queue */
11552 release_xri = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
11553 if (release_xri == NULL) {
11554 ocs_log_err(hw->os, "no buffer for command\n");
11555 return OCS_HW_RTN_NO_MEMORY;
11558 /* release the XRIs */
11559 if (sli_cmd_release_xri(&hw->sli, release_xri, SLI4_BMBX_SIZE, num_xri)) {
11560 rc = ocs_hw_command(hw, release_xri, OCS_CMD_NOWAIT, ocs_hw_cb_release_xri, NULL);
11561 if (rc != OCS_HW_RTN_SUCCESS) {
11562 ocs_log_err(hw->os, "release_xri failed\n");
11565 /* If we are polling or an error occurred, then free the mailbox buffer */
11566 if (release_xri != NULL && rc != OCS_HW_RTN_SUCCESS) {
11567 ocs_free(hw->os, release_xri, SLI4_BMBX_SIZE);
11574 * @brief Allocate an ocs_hw_rx_buffer_t array.
11577 * An ocs_hw_rx_buffer_t array is allocated, along with the required DMA memory.
11579 * @param hw Pointer to HW object.
11580 * @param rqindex RQ index for this buffer.
11581 * @param count Count of buffers in array.
11582 * @param size Size of buffer.
11584 * @return Returns the pointer to the allocated ocs_hw_rq_buffer_t array.
11586 static ocs_hw_rq_buffer_t *
11587 ocs_hw_rx_buffer_alloc(ocs_hw_t *hw, uint32_t rqindex, uint32_t count, uint32_t size)
11589 ocs_t *ocs = hw->os;
11590 ocs_hw_rq_buffer_t *rq_buf = NULL;
11591 ocs_hw_rq_buffer_t *prq;
11595 rq_buf = ocs_malloc(hw->os, sizeof(*rq_buf) * count, OCS_M_NOWAIT | OCS_M_ZERO);
11596 if (rq_buf == NULL) {
11597 ocs_log_err(hw->os, "Failure to allocate unsolicited DMA trackers\n");
11601 for (i = 0, prq = rq_buf; i < count; i ++, prq++) {
11602 prq->rqindex = rqindex;
11603 if (ocs_dma_alloc(ocs, &prq->dma, size, OCS_MIN_DMA_ALIGNMENT)) {
11604 ocs_log_err(hw->os, "DMA allocation failed\n");
11605 ocs_free(hw->os, rq_buf, sizeof(*rq_buf) * count);
11615 * @brief Free an ocs_hw_rx_buffer_t array.
11618 * The ocs_hw_rx_buffer_t array is freed, along with allocated DMA memory.
11620 * @param hw Pointer to HW object.
11621 * @param rq_buf Pointer to ocs_hw_rx_buffer_t array.
11622 * @param count Count of buffers in array.
11627 ocs_hw_rx_buffer_free(ocs_hw_t *hw, ocs_hw_rq_buffer_t *rq_buf, uint32_t count)
11629 ocs_t *ocs = hw->os;
11631 ocs_hw_rq_buffer_t *prq;
11633 if (rq_buf != NULL) {
11634 for (i = 0, prq = rq_buf; i < count; i++, prq++) {
11635 ocs_dma_free(ocs, &prq->dma);
11637 ocs_free(hw->os, rq_buf, sizeof(*rq_buf) * count);
11642 * @brief Allocate the RQ data buffers.
11644 * @param hw Pointer to HW object.
11646 * @return Returns 0 on success, or a non-zero value on failure.
11649 ocs_hw_rx_allocate(ocs_hw_t *hw)
11651 ocs_t *ocs = hw->os;
11653 int32_t rc = OCS_HW_RTN_SUCCESS;
11654 uint32_t rqindex = 0;
11656 uint32_t hdr_size = OCS_HW_RQ_SIZE_HDR;
11657 uint32_t payload_size = hw->config.rq_default_buffer_size;
11661 for (i = 0; i < hw->hw_rq_count; i++) {
11664 /* Allocate header buffers */
11665 rq->hdr_buf = ocs_hw_rx_buffer_alloc(hw, rqindex, rq->entry_count, hdr_size);
11666 if (rq->hdr_buf == NULL) {
11667 ocs_log_err(ocs, "ocs_hw_rx_buffer_alloc hdr_buf failed\n");
11668 rc = OCS_HW_RTN_ERROR;
11672 ocs_log_debug(hw->os, "rq[%2d] rq_id %02d header %4d by %4d bytes\n", i, rq->hdr->id,
11673 rq->entry_count, hdr_size);
11677 /* Allocate payload buffers */
11678 rq->payload_buf = ocs_hw_rx_buffer_alloc(hw, rqindex, rq->entry_count, payload_size);
11679 if (rq->payload_buf == NULL) {
11680 ocs_log_err(ocs, "ocs_hw_rx_buffer_alloc fb_buf failed\n");
11681 rc = OCS_HW_RTN_ERROR;
11684 ocs_log_debug(hw->os, "rq[%2d] rq_id %02d default %4d by %4d bytes\n", i, rq->data->id,
11685 rq->entry_count, payload_size);
11689 return rc ? OCS_HW_RTN_ERROR : OCS_HW_RTN_SUCCESS;
11693 * @brief Post the RQ data buffers to the chip.
11695 * @param hw Pointer to HW object.
11697 * @return Returns 0 on success, or a non-zero value on failure.
11700 ocs_hw_rx_post(ocs_hw_t *hw)
11708 * In RQ pair mode, we MUST post the header and payload buffer at the
11711 for (rq_idx = 0, idx = 0; rq_idx < hw->hw_rq_count; rq_idx++) {
11712 hw_rq_t *rq = hw->hw_rq[rq_idx];
11714 for (i = 0; i < rq->entry_count-1; i++) {
11715 ocs_hw_sequence_t *seq = ocs_array_get(hw->seq_pool, idx++);
11716 ocs_hw_assert(seq != NULL);
11718 seq->header = &rq->hdr_buf[i];
11720 seq->payload = &rq->payload_buf[i];
11722 rc = ocs_hw_sequence_free(hw, seq);
11736 * @brief Free the RQ data buffers.
11738 * @param hw Pointer to HW object.
11742 ocs_hw_rx_free(ocs_hw_t *hw)
11747 /* Free hw_rq buffers */
11748 for (i = 0; i < hw->hw_rq_count; i++) {
11751 ocs_hw_rx_buffer_free(hw, rq->hdr_buf, rq->entry_count);
11752 rq->hdr_buf = NULL;
11753 ocs_hw_rx_buffer_free(hw, rq->payload_buf, rq->entry_count);
11754 rq->payload_buf = NULL;
11760 * @brief HW async call context structure.
11763 ocs_hw_async_cb_t callback;
11765 uint8_t cmd[SLI4_BMBX_SIZE];
11766 } ocs_hw_async_call_ctx_t;
11769 * @brief HW async callback handler
11772 * This function is called when the NOP mailbox command completes. The callback stored
11773 * in the requesting context is invoked.
11775 * @param hw Pointer to HW object.
11776 * @param status Completion status.
11777 * @param mqe Pointer to mailbox completion queue entry.
11778 * @param arg Caller-provided argument.
11783 ocs_hw_async_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
11785 ocs_hw_async_call_ctx_t *ctx = arg;
11788 if (ctx->callback != NULL) {
11789 (*ctx->callback)(hw, status, mqe, ctx->arg);
11791 ocs_free(hw->os, ctx, sizeof(*ctx));
11796 * @brief Make an async callback using NOP mailbox command
11799 * Post a NOP mailbox command; the callback with argument is invoked upon completion
11800 * while in the event processing context.
11802 * @param hw Pointer to HW object.
11803 * @param callback Pointer to callback function.
11804 * @param arg Caller-provided callback.
11806 * @return Returns 0 on success, or a negative error code value on failure.
11809 ocs_hw_async_call(ocs_hw_t *hw, ocs_hw_async_cb_t callback, void *arg)
11812 ocs_hw_async_call_ctx_t *ctx;
11815 * Allocate a callback context (which includes the mailbox command buffer), we need
11816 * this to be persistent as the mailbox command submission may be queued and executed later
11819 ctx = ocs_malloc(hw->os, sizeof(*ctx), OCS_M_ZERO | OCS_M_NOWAIT);
11821 ocs_log_err(hw->os, "failed to malloc async call context\n");
11822 return OCS_HW_RTN_NO_MEMORY;
11824 ctx->callback = callback;
11827 /* Build and send a NOP mailbox command */
11828 if (sli_cmd_common_nop(&hw->sli, ctx->cmd, sizeof(ctx->cmd), 0) == 0) {
11829 ocs_log_err(hw->os, "COMMON_NOP format failure\n");
11830 ocs_free(hw->os, ctx, sizeof(*ctx));
11834 if (ocs_hw_command(hw, ctx->cmd, OCS_CMD_NOWAIT, ocs_hw_async_cb, ctx)) {
11835 ocs_log_err(hw->os, "COMMON_NOP command failure\n");
11836 ocs_free(hw->os, ctx, sizeof(*ctx));
11843 * @brief Initialize the reqtag pool.
11846 * The WQ request tag pool is initialized.
11848 * @param hw Pointer to HW object.
11850 * @return Returns 0 on success, or a negative error code value on failure.
11853 ocs_hw_reqtag_init(ocs_hw_t *hw)
11855 if (hw->wq_reqtag_pool == NULL) {
11856 hw->wq_reqtag_pool = ocs_pool_alloc(hw->os, sizeof(hw_wq_callback_t), 65536, TRUE);
11857 if (hw->wq_reqtag_pool == NULL) {
11858 ocs_log_err(hw->os, "ocs_pool_alloc hw_wq_callback_t failed\n");
11859 return OCS_HW_RTN_NO_MEMORY;
11862 ocs_hw_reqtag_reset(hw);
11863 return OCS_HW_RTN_SUCCESS;
11867 * @brief Allocate a WQ request tag.
11869 * Allocate and populate a WQ request tag from the WQ request tag pool.
11871 * @param hw Pointer to HW object.
11872 * @param callback Callback function.
11873 * @param arg Pointer to callback argument.
11875 * @return Returns pointer to allocated WQ request tag, or NULL if object cannot be allocated.
11878 ocs_hw_reqtag_alloc(ocs_hw_t *hw, void (*callback)(void *arg, uint8_t *cqe, int32_t status), void *arg)
11880 hw_wq_callback_t *wqcb;
11882 ocs_hw_assert(callback != NULL);
11884 wqcb = ocs_pool_get(hw->wq_reqtag_pool);
11885 if (wqcb != NULL) {
11886 ocs_hw_assert(wqcb->callback == NULL);
11887 wqcb->callback = callback;
11894 * @brief Free a WQ request tag.
11896 * Free the passed in WQ request tag.
11898 * @param hw Pointer to HW object.
11899 * @param wqcb Pointer to WQ request tag object to free.
11904 ocs_hw_reqtag_free(ocs_hw_t *hw, hw_wq_callback_t *wqcb)
11906 ocs_hw_assert(wqcb->callback != NULL);
11907 wqcb->callback = NULL;
11909 ocs_pool_put(hw->wq_reqtag_pool, wqcb);
11913 * @brief Return WQ request tag by index.
11916 * Return pointer to WQ request tag object given an index.
11918 * @param hw Pointer to HW object.
11919 * @param instance_index Index of WQ request tag to return.
11921 * @return Pointer to WQ request tag, or NULL.
11924 ocs_hw_reqtag_get_instance(ocs_hw_t *hw, uint32_t instance_index)
11926 hw_wq_callback_t *wqcb;
11928 wqcb = ocs_pool_get_instance(hw->wq_reqtag_pool, instance_index);
11929 if (wqcb == NULL) {
11930 ocs_log_err(hw->os, "wqcb for instance %d is null\n", instance_index);
11936 * @brief Reset the WQ request tag pool.
11939 * Reset the WQ request tag pool, returning all to the free list.
11941 * @param hw pointer to HW object.
11946 ocs_hw_reqtag_reset(ocs_hw_t *hw)
11948 hw_wq_callback_t *wqcb;
11951 /* Remove all from freelist */
11952 while(ocs_pool_get(hw->wq_reqtag_pool) != NULL) {
11956 /* Put them all back */
11957 for (i = 0; ((wqcb = ocs_pool_get_instance(hw->wq_reqtag_pool, i)) != NULL); i++) {
11958 wqcb->instance_index = i;
11959 wqcb->callback = NULL;
11961 ocs_pool_put(hw->wq_reqtag_pool, wqcb);
11966 * @brief Handle HW assertion
11968 * HW assert, display diagnostic message, and abort.
11970 * @param cond string describing failing assertion condition
11971 * @param filename file name
11972 * @param linenum line number
11977 _ocs_hw_assert(const char *cond, const char *filename, int linenum)
11979 ocs_printf("%s(%d): HW assertion (%s) failed\n", filename, linenum, cond);
11985 * @brief Handle HW verify
11987 * HW verify, display diagnostic message, dump stack and return.
11989 * @param cond string describing failing verify condition
11990 * @param filename file name
11991 * @param linenum line number
11996 _ocs_hw_verify(const char *cond, const char *filename, int linenum)
11998 ocs_printf("%s(%d): HW verify (%s) failed\n", filename, linenum, cond);
12008 * @param hw Pointer to HW object.
12009 * @param io Pointer to HW IO
12011 * @return Return 0 if successful else returns -1
12014 ocs_hw_reque_xri( ocs_hw_t *hw, ocs_hw_io_t *io )
12018 rc = ocs_hw_rqpair_auto_xfer_rdy_buffer_post(hw, io, 1);
12020 ocs_list_add_tail(&hw->io_port_dnrx, io);
12022 goto exit_ocs_hw_reque_xri;
12025 io->auto_xfer_rdy_dnrx = 0;
12026 io->type = OCS_HW_IO_DNRX_REQUEUE;
12027 if (sli_requeue_xri_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->indicator, OCS_HW_REQUE_XRI_REGTAG, SLI4_CQ_DEFAULT)) {
12028 /* Clear buffer from XRI */
12029 ocs_pool_put(hw->auto_xfer_rdy_buf_pool, io->axr_buf);
12030 io->axr_buf = NULL;
12032 ocs_log_err(hw->os, "requeue_xri WQE error\n");
12033 ocs_list_add_tail(&hw->io_port_dnrx, io);
12036 goto exit_ocs_hw_reque_xri;
12039 if (io->wq == NULL) {
12040 io->wq = ocs_hw_queue_next_wq(hw, io);
12041 ocs_hw_assert(io->wq != NULL);
12045 * Add IO to active io wqe list before submitting, in case the
12046 * wcqe processing preempts this thread.
12048 OCS_STAT(hw->tcmd_wq_submit[io->wq->instance]++);
12049 OCS_STAT(io->wq->use_count++);
12051 rc = hw_wq_write(io->wq, &io->wqe);
12053 ocs_log_err(hw->os, "sli_queue_write reque xri failed: %d\n", rc);
12057 exit_ocs_hw_reque_xri:
12062 ocs_hw_get_def_wwn(ocs_t *ocs, uint32_t chan, uint64_t *wwpn, uint64_t *wwnn)
12064 sli4_t *sli4 = &ocs->hw.sli;
12066 uint8_t *payload = NULL;
12068 int indicator = sli4->config.extent[SLI_RSRC_FCOE_VPI].base[0] + chan;
12070 /* allocate memory for the service parameters */
12071 if (ocs_dma_alloc(ocs, &dma, 112, 4)) {
12072 ocs_log_err(ocs, "Failed to allocate DMA memory\n");
12076 if (0 == sli_cmd_read_sparm64(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE,
12077 &dma, indicator)) {
12078 ocs_log_err(ocs, "READ_SPARM64 allocation failure\n");
12079 ocs_dma_free(ocs, &dma);
12083 if (sli_bmbx_command(sli4)) {
12084 ocs_log_err(ocs, "READ_SPARM64 command failure\n");
12085 ocs_dma_free(ocs, &dma);
12089 payload = dma.virt;
12090 ocs_memcpy(wwpn, payload + SLI4_READ_SPARM64_WWPN_OFFSET, sizeof(*wwpn));
12091 ocs_memcpy(wwnn, payload + SLI4_READ_SPARM64_WWNN_OFFSET, sizeof(*wwnn));
12092 ocs_dma_free(ocs, &dma);
12097 * @page fc_hw_api_overview HW APIs
12098 * - @ref devInitShutdown
12105 * <div class="overview">
12106 * The Hardware Abstraction Layer (HW) insulates the higher-level code from the SLI-4
12107 * message details, but the higher level code must still manage domains, ports,
12108 * IT nexuses, and IOs. The HW API is designed to help the higher level manage
12109 * these objects.<br><br>
12111 * The HW uses function callbacks to notify the higher-level code of events
12112 * that are received from the chip. There are currently three types of
12113 * functions that may be registered:
12115 * <ul><li>domain – This function is called whenever a domain event is generated
12116 * within the HW. Examples include a new FCF is discovered, a connection
12117 * to a domain is disrupted, and allocation callbacks.</li>
12118 * <li>unsolicited – This function is called whenever new data is received in
12119 * the SLI-4 receive queue.</li>
12120 * <li>rnode – This function is called for remote node events, such as attach status
12121 * and allocation callbacks.</li></ul>
12123 * Upper layer functions may be registered by using the ocs_hw_callback() function.
12125 * <img src="elx_fc_hw.jpg" alt="FC/FCoE HW" title="FC/FCoE HW" align="right"/>
12126 * <h2>FC/FCoE HW API</h2>
12127 * The FC/FCoE HW component builds upon the SLI-4 component to establish a flexible
12128 * interface for creating the necessary common objects and sending I/Os. It may be used
12129 * “as is” in customer implementations or it can serve as an example of typical interactions
12130 * between a driver and the SLI-4 hardware. The broad categories of functionality include:
12132 * <ul><li>Setting-up and tearing-down of the HW.</li>
12133 * <li>Allocating and using the common objects (SLI Port, domain, remote node).</li>
12134 * <li>Sending and receiving I/Os.</li></ul>
12136 * <h3>HW Setup</h3>
12137 * To set up the HW:
12140 * <li>Set up the HW object using ocs_hw_setup().<br>
12141 * This step performs a basic configuration of the SLI-4 component and the HW to
12142 * enable querying the hardware for its capabilities. At this stage, the HW is not
12143 * capable of general operations (such as, receiving events or sending I/Os).</li><br><br>
12144 * <li>Configure the HW according to the driver requirements.<br>
12145 * The HW provides functions to discover hardware capabilities (ocs_hw_get()), as
12146 * well as configures the amount of resources required (ocs_hw_set()). The driver
12147 * must also register callback functions (ocs_hw_callback()) to receive notification of
12148 * various asynchronous events.<br><br>
12149 * @b Note: Once configured, the driver must initialize the HW (ocs_hw_init()). This
12150 * step creates the underlying queues, commits resources to the hardware, and
12151 * prepares the hardware for operation. While the hardware is operational, the
12152 * port is not online, and cannot send or receive data.</li><br><br>
12154 * <li>Finally, the driver can bring the port online (ocs_hw_port_control()).<br>
12155 * When the link comes up, the HW determines if a domain is present and notifies the
12156 * driver using the domain callback function. This is the starting point of the driver's
12157 * interaction with the common objects.<br><br>
12158 * @b Note: For FCoE, there may be more than one domain available and, therefore,
12159 * more than one callback.</li>
12162 * <h3>Allocating and Using Common Objects</h3>
12163 * Common objects provide a mechanism through which the various OneCore Storage
12164 * driver components share and track information. These data structures are primarily
12165 * used to track SLI component information but can be extended by other components, if
12166 * needed. The main objects are:
12168 * <ul><li>DMA – the ocs_dma_t object describes a memory region suitable for direct
12169 * memory access (DMA) transactions.</li>
12170 * <li>SCSI domain – the ocs_domain_t object represents the SCSI domain, including
12171 * any infrastructure devices such as FC switches and FC forwarders. The domain
12172 * object contains both an FCFI and a VFI.</li>
12173 * <li>SLI Port (sport) – the ocs_sli_port_t object represents the connection between
12174 * the driver and the SCSI domain. The SLI Port object contains a VPI.</li>
12175 * <li>Remote node – the ocs_remote_node_t represents a connection between the SLI
12176 * Port and another device in the SCSI domain. The node object contains an RPI.</li></ul>
12178 * Before the driver can send I/Os, it must allocate the SCSI domain, SLI Port, and remote
12179 * node common objects and establish the connections between them. The goal is to
12180 * connect the driver to the SCSI domain to exchange I/Os with other devices. These
12181 * common object connections are shown in the following figure, FC Driver Common Objects:
12182 * <img src="elx_fc_common_objects.jpg"
12183 * alt="FC Driver Common Objects" title="FC Driver Common Objects" align="center"/>
12185 * The first step is to create a connection to the domain by allocating an SLI Port object.
12186 * The SLI Port object represents a particular FC ID and must be initialized with one. With
12187 * the SLI Port object, the driver can discover the available SCSI domain(s). On identifying
12188 * a domain, the driver allocates a domain object and attaches to it using the previous SLI
12189 * port object.<br><br>
12191 * @b Note: In some cases, the driver may need to negotiate service parameters (that is,
12192 * FLOGI) with the domain before attaching.<br><br>
12194 * Once attached to the domain, the driver can discover and attach to other devices
12195 * (remote nodes). The exact discovery method depends on the driver, but it typically
12196 * includes using a position map, querying the fabric name server, or an out-of-band
12197 * method. In most cases, it is necessary to log in with devices before performing I/Os.
12198 * Prior to sending login-related ELS commands (ocs_hw_srrs_send()), the driver must
12199 * allocate a remote node object (ocs_hw_node_alloc()). If the login negotiation is
12200 * successful, the driver must attach the nodes (ocs_hw_node_attach()) to the SLI Port
12201 * before exchanging FCP I/O.<br><br>
12203 * @b Note: The HW manages both the well known fabric address and the name server as
12204 * nodes in the domain. Therefore, the driver must allocate node objects prior to
12205 * communicating with either of these entities.
12207 * <h3>Sending and Receiving I/Os</h3>
12208 * The HW provides separate interfaces for sending BLS/ ELS/ FC-CT and FCP, but the
12209 * commands are conceptually similar. Since the commands complete asynchronously,
12210 * the caller must provide a HW I/O object that maintains the I/O state, as well as
12211 * provide a callback function. The driver may use the same callback function for all I/O
12212 * operations, but each operation must use a unique HW I/O object. In the SLI-4
12213 * architecture, there is a direct association between the HW I/O object and the SGL used
12214 * to describe the data. Therefore, a driver typically performs the following operations:
12216 * <ul><li>Allocates a HW I/O object (ocs_hw_io_alloc()).</li>
12217 * <li>Formats the SGL, specifying both the HW I/O object and the SGL.
12218 * (ocs_hw_io_init_sges() and ocs_hw_io_add_sge()).</li>
12219 * <li>Sends the HW I/O (ocs_hw_io_send()).</li></ul>
12221 * <h3>HW Tear Down</h3>
12222 * To tear-down the HW:
12224 * <ol><li>Take the port offline (ocs_hw_port_control()) to prevent receiving further
12225 * data andevents.</li>
12226 * <li>Destroy the HW object (ocs_hw_teardown()).</li>
12227 * <li>Free any memory used by the HW, such as buffers for unsolicited data.</li></ol>
12229 * </div><!-- overview -->
12237 * This contains all hw runtime workaround code. Based on the asic type,
12238 * asic revision, and range of fw revisions, a particular workaround may be enabled.
12240 * A workaround may consist of overriding a particular HW/SLI4 value that was initialized
12241 * during ocs_hw_setup() (for example the MAX_QUEUE overrides for mis-reported queue
12242 * sizes). Or if required, elements of the ocs_hw_workaround_t structure may be set to
12243 * control specific runtime behavior.
12245 * It is intended that the controls in ocs_hw_workaround_t be defined functionally. So we
12246 * would have the driver look like: "if (hw->workaround.enable_xxx) then ...", rather than
12247 * what we might previously see as "if this is a BE3, then do xxx"
12252 #define HW_FWREV_ZERO (0ull)
12253 #define HW_FWREV_MAX (~0ull)
12255 #define SLI4_ASIC_TYPE_ANY 0
12256 #define SLI4_ASIC_REV_ANY 0
12259 * @brief Internal definition of workarounds
12263 HW_WORKAROUND_TEST = 1,
12264 HW_WORKAROUND_MAX_QUEUE, /**< Limits all queues */
12265 HW_WORKAROUND_MAX_RQ, /**< Limits only the RQ */
12266 HW_WORKAROUND_RETAIN_TSEND_IO_LENGTH,
12267 HW_WORKAROUND_WQE_COUNT_METHOD,
12268 HW_WORKAROUND_RQE_COUNT_METHOD,
12269 HW_WORKAROUND_USE_UNREGISTERD_RPI,
12270 HW_WORKAROUND_DISABLE_AR_TGT_DIF, /**< Disable of auto-response target DIF */
12271 HW_WORKAROUND_DISABLE_SET_DUMP_LOC,
12272 HW_WORKAROUND_USE_DIF_QUARANTINE,
12273 HW_WORKAROUND_USE_DIF_SEC_XRI, /**< Use secondary xri for multiple data phases */
12274 HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB, /**< FCFI reported in SRB not correct, use "first" registered domain */
12275 HW_WORKAROUND_FW_VERSION_TOO_LOW, /**< The FW version is not the min version supported by this driver */
12276 HW_WORKAROUND_SGLC_MISREPORTED, /**< Chip supports SGL Chaining but SGLC is not set in SLI4_PARAMS */
12277 HW_WORKAROUND_IGNORE_SEND_FRAME_CAPABLE, /**< Don't use SEND_FRAME capable if FW version is too old */
12281 * @brief Internal workaround structure instance
12285 sli4_asic_type_e asic_type;
12286 sli4_asic_rev_e asic_rev;
12287 uint64_t fwrev_low;
12288 uint64_t fwrev_high;
12290 hw_workaround_e workaround;
12294 static hw_workaround_t hw_workarounds[] = {
12295 {SLI4_ASIC_TYPE_ANY, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12296 HW_WORKAROUND_TEST, 999},
12298 /* Bug: 127585: if_type == 2 returns 0 for total length placed on
12299 * FCP_TSEND64_WQE completions. Note, original driver code enables this
12300 * workaround for all asic types
12302 {SLI4_ASIC_TYPE_ANY, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12303 HW_WORKAROUND_RETAIN_TSEND_IO_LENGTH, 0},
12305 /* Bug: unknown, Lancer A0 has mis-reported max queue depth */
12306 {SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_A0, HW_FWREV_ZERO, HW_FWREV_MAX,
12307 HW_WORKAROUND_MAX_QUEUE, 2048},
12309 /* Bug: 143399, BE3 has mis-reported max RQ queue depth */
12310 {SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(4,6,293,0),
12311 HW_WORKAROUND_MAX_RQ, 2048},
12313 /* Bug: 143399, skyhawk has mis-reported max RQ queue depth */
12314 {SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(10,0,594,0),
12315 HW_WORKAROUND_MAX_RQ, 2048},
12317 /* Bug: 103487, BE3 before f/w 4.2.314.0 has mis-reported WQE count method */
12318 {SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(4,2,314,0),
12319 HW_WORKAROUND_WQE_COUNT_METHOD, 1},
12321 /* Bug: 103487, BE3 before f/w 4.2.314.0 has mis-reported RQE count method */
12322 {SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(4,2,314,0),
12323 HW_WORKAROUND_RQE_COUNT_METHOD, 1},
12325 /* Bug: 142968, BE3 UE with RPI == 0xffff */
12326 {SLI4_ASIC_TYPE_BE3, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12327 HW_WORKAROUND_USE_UNREGISTERD_RPI, 0},
12329 /* Bug: unknown, Skyhawk won't support auto-response on target T10-PI */
12330 {SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12331 HW_WORKAROUND_DISABLE_AR_TGT_DIF, 0},
12333 {SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(1,1,65,0),
12334 HW_WORKAROUND_DISABLE_SET_DUMP_LOC, 0},
12336 /* Bug: 160124, Skyhawk quarantine DIF XRIs */
12337 {SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12338 HW_WORKAROUND_USE_DIF_QUARANTINE, 0},
12340 /* Bug: 161832, Skyhawk use secondary XRI for multiple data phase TRECV */
12341 {SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12342 HW_WORKAROUND_USE_DIF_SEC_XRI, 0},
12344 /* Bug: xxxxxx, FCFI reported in SRB not corrrect */
12345 {SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12346 HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB, 0},
12348 /* Bug: 165642, FW version check for driver */
12349 {SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_1(OCS_MIN_FW_VER_LANCER),
12350 HW_WORKAROUND_FW_VERSION_TOO_LOW, 0},
12352 {SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_1(OCS_MIN_FW_VER_SKYHAWK),
12353 HW_WORKAROUND_FW_VERSION_TOO_LOW, 0},
12355 /* Bug 177061, Lancer FW does not set the SGLC bit */
12356 {SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12357 HW_WORKAROUND_SGLC_MISREPORTED, 0},
12359 /* BZ 181208/183914, enable this workaround for ALL revisions */
12360 {SLI4_ASIC_TYPE_ANY, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12361 HW_WORKAROUND_IGNORE_SEND_FRAME_CAPABLE, 0},
12365 * @brief Function prototypes
12368 static int32_t ocs_hw_workaround_match(ocs_hw_t *hw, hw_workaround_t *w);
12371 * @brief Parse the firmware version (name)
12373 * Parse a string of the form a.b.c.d, returning a uint64_t packed as defined
12374 * by the HW_FWREV() macro
12376 * @param fwrev_string pointer to the firmware string
12378 * @return packed firmware revision value
12382 parse_fw_version(const char *fwrev_string)
12388 for (p = fwrev_string, i = 0; *p && (i < 4); i ++) {
12389 v[i] = ocs_strtoul(p, 0, 0);
12390 while(*p && *p != '.') {
12398 /* Special case for bootleg releases with f/w rev 0.0.9999.0, set to max value */
12399 if (v[2] == 9999) {
12400 return HW_FWREV_MAX;
12402 return HW_FWREV(v[0], v[1], v[2], v[3]);
12407 * @brief Test for a workaround match
12409 * Looks at the asic type, asic revision, and fw revision, and returns TRUE if match.
12411 * @param hw Pointer to the HW structure
12412 * @param w Pointer to a workaround structure entry
12414 * @return Return TRUE for a match
12418 ocs_hw_workaround_match(ocs_hw_t *hw, hw_workaround_t *w)
12420 return (((w->asic_type == SLI4_ASIC_TYPE_ANY) || (w->asic_type == hw->sli.asic_type)) &&
12421 ((w->asic_rev == SLI4_ASIC_REV_ANY) || (w->asic_rev == hw->sli.asic_rev)) &&
12422 (w->fwrev_low <= hw->workaround.fwrev) &&
12423 ((w->fwrev_high == HW_FWREV_MAX) || (hw->workaround.fwrev < w->fwrev_high)));
12427 * @brief Setup HW runtime workarounds
12429 * The function is called at the end of ocs_hw_setup() to setup any runtime workarounds
12430 * based on the HW/SLI setup.
12432 * @param hw Pointer to HW structure
12438 ocs_hw_workaround_setup(struct ocs_hw_s *hw)
12440 hw_workaround_t *w;
12441 sli4_t *sli4 = &hw->sli;
12444 /* Initialize the workaround settings */
12445 ocs_memset(&hw->workaround, 0, sizeof(hw->workaround));
12447 /* If hw_war_version is non-null, then its a value that was set by a module parameter
12448 * (sorry for the break in abstraction, but workarounds are ... well, workarounds)
12451 if (hw->hw_war_version) {
12452 hw->workaround.fwrev = parse_fw_version(hw->hw_war_version);
12454 hw->workaround.fwrev = parse_fw_version((char*) sli4->config.fw_name[0]);
12457 /* Walk the workaround list, if a match is found, then handle it */
12458 for (i = 0, w = hw_workarounds; i < ARRAY_SIZE(hw_workarounds); i++, w++) {
12459 if (ocs_hw_workaround_match(hw, w)) {
12460 switch(w->workaround) {
12462 case HW_WORKAROUND_TEST: {
12463 ocs_log_debug(hw->os, "Override: test: %d\n", w->value);
12467 case HW_WORKAROUND_RETAIN_TSEND_IO_LENGTH: {
12468 ocs_log_debug(hw->os, "HW Workaround: retain TSEND IO length\n");
12469 hw->workaround.retain_tsend_io_length = 1;
12472 case HW_WORKAROUND_MAX_QUEUE: {
12475 ocs_log_debug(hw->os, "HW Workaround: override max_qentries: %d\n", w->value);
12476 for (q = SLI_QTYPE_EQ; q < SLI_QTYPE_MAX; q++) {
12477 if (hw->num_qentries[q] > w->value) {
12478 hw->num_qentries[q] = w->value;
12483 case HW_WORKAROUND_MAX_RQ: {
12484 ocs_log_debug(hw->os, "HW Workaround: override RQ max_qentries: %d\n", w->value);
12485 if (hw->num_qentries[SLI_QTYPE_RQ] > w->value) {
12486 hw->num_qentries[SLI_QTYPE_RQ] = w->value;
12490 case HW_WORKAROUND_WQE_COUNT_METHOD: {
12491 ocs_log_debug(hw->os, "HW Workaround: set WQE count method=%d\n", w->value);
12492 sli4->config.count_method[SLI_QTYPE_WQ] = w->value;
12493 sli_calc_max_qentries(sli4);
12496 case HW_WORKAROUND_RQE_COUNT_METHOD: {
12497 ocs_log_debug(hw->os, "HW Workaround: set RQE count method=%d\n", w->value);
12498 sli4->config.count_method[SLI_QTYPE_RQ] = w->value;
12499 sli_calc_max_qentries(sli4);
12502 case HW_WORKAROUND_USE_UNREGISTERD_RPI:
12503 ocs_log_debug(hw->os, "HW Workaround: use unreg'd RPI if rnode->indicator == 0xFFFF\n");
12504 hw->workaround.use_unregistered_rpi = TRUE;
12506 * Allocate an RPI that is never registered, to be used in the case where
12507 * a node has been unregistered, and its indicator (RPI) value is set to 0xFFFF
12509 if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_RPI, &hw->workaround.unregistered_rid,
12510 &hw->workaround.unregistered_index)) {
12511 ocs_log_err(hw->os, "sli_resource_alloc unregistered RPI failed\n");
12512 hw->workaround.use_unregistered_rpi = FALSE;
12515 case HW_WORKAROUND_DISABLE_AR_TGT_DIF:
12516 ocs_log_debug(hw->os, "HW Workaround: disable AR on T10-PI TSEND\n");
12517 hw->workaround.disable_ar_tgt_dif = TRUE;
12519 case HW_WORKAROUND_DISABLE_SET_DUMP_LOC:
12520 ocs_log_debug(hw->os, "HW Workaround: disable set_dump_loc\n");
12521 hw->workaround.disable_dump_loc = TRUE;
12523 case HW_WORKAROUND_USE_DIF_QUARANTINE:
12524 ocs_log_debug(hw->os, "HW Workaround: use DIF quarantine\n");
12525 hw->workaround.use_dif_quarantine = TRUE;
12527 case HW_WORKAROUND_USE_DIF_SEC_XRI:
12528 ocs_log_debug(hw->os, "HW Workaround: use DIF secondary xri\n");
12529 hw->workaround.use_dif_sec_xri = TRUE;
12531 case HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB:
12532 ocs_log_debug(hw->os, "HW Workaround: override FCFI in SRB\n");
12533 hw->workaround.override_fcfi = TRUE;
12536 case HW_WORKAROUND_FW_VERSION_TOO_LOW:
12537 ocs_log_debug(hw->os, "HW Workaround: fw version is below the minimum for this driver\n");
12538 hw->workaround.fw_version_too_low = TRUE;
12540 case HW_WORKAROUND_SGLC_MISREPORTED:
12541 ocs_log_debug(hw->os, "HW Workaround: SGLC misreported - chaining is enabled\n");
12542 hw->workaround.sglc_misreported = TRUE;
12544 case HW_WORKAROUND_IGNORE_SEND_FRAME_CAPABLE:
12545 ocs_log_debug(hw->os, "HW Workaround: not SEND_FRAME capable - disabled\n");
12546 hw->workaround.ignore_send_frame = TRUE;
12548 } /* switch(w->workaround) */