1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright (c) 2020, Intel Corporation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the Intel Corporation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 #include "ice_common.h"
34 #include "ice_sched.h"
35 #include "ice_adminq_cmd.h"
38 #include "ice_switch.h"
40 #define ICE_PF_RESET_WAIT_COUNT 300
43 * ice_set_mac_type - Sets MAC type
44 * @hw: pointer to the HW structure
46 * This function sets the MAC type of the adapter based on the
47 * vendor ID and device ID stored in the HW structure.
49 enum ice_status ice_set_mac_type(struct ice_hw *hw)
51 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
53 if (hw->vendor_id != ICE_INTEL_VENDOR_ID)
54 return ICE_ERR_DEVICE_NOT_SUPPORTED;
56 switch (hw->device_id) {
57 case ICE_DEV_ID_E810C_BACKPLANE:
58 case ICE_DEV_ID_E810C_QSFP:
59 case ICE_DEV_ID_E810C_SFP:
60 case ICE_DEV_ID_E810_XXV_BACKPLANE:
61 case ICE_DEV_ID_E810_XXV_QSFP:
62 case ICE_DEV_ID_E810_XXV_SFP:
63 hw->mac_type = ICE_MAC_E810;
65 case ICE_DEV_ID_E822C_10G_BASE_T:
66 case ICE_DEV_ID_E822C_BACKPLANE:
67 case ICE_DEV_ID_E822C_QSFP:
68 case ICE_DEV_ID_E822C_SFP:
69 case ICE_DEV_ID_E822C_SGMII:
70 case ICE_DEV_ID_E822L_10G_BASE_T:
71 case ICE_DEV_ID_E822L_BACKPLANE:
72 case ICE_DEV_ID_E822L_SFP:
73 case ICE_DEV_ID_E822L_SGMII:
74 case ICE_DEV_ID_E823L_10G_BASE_T:
75 case ICE_DEV_ID_E823L_1GBE:
76 case ICE_DEV_ID_E823L_BACKPLANE:
77 case ICE_DEV_ID_E823L_QSFP:
78 case ICE_DEV_ID_E823L_SFP:
79 hw->mac_type = ICE_MAC_GENERIC;
82 hw->mac_type = ICE_MAC_UNKNOWN;
86 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
91 * ice_clear_pf_cfg - Clear PF configuration
92 * @hw: pointer to the hardware structure
94 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
95 * configuration, flow director filters, etc.).
97 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
99 struct ice_aq_desc desc;
101 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
103 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
107 * ice_aq_manage_mac_read - manage MAC address read command
108 * @hw: pointer to the HW struct
109 * @buf: a virtual buffer to hold the manage MAC read response
110 * @buf_size: Size of the virtual buffer
111 * @cd: pointer to command details structure or NULL
113 * This function is used to return per PF station MAC address (0x0107).
114 * NOTE: Upon successful completion of this command, MAC address information
115 * is returned in user specified buffer. Please interpret user specified
116 * buffer as "manage_mac_read" response.
117 * Response such as various MAC addresses are stored in HW struct (port.mac)
118 * ice_aq_discover_caps is expected to be called before this function is called.
121 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
122 struct ice_sq_cd *cd)
124 struct ice_aqc_manage_mac_read_resp *resp;
125 struct ice_aqc_manage_mac_read *cmd;
126 struct ice_aq_desc desc;
127 enum ice_status status;
131 cmd = &desc.params.mac_read;
133 if (buf_size < sizeof(*resp))
134 return ICE_ERR_BUF_TOO_SHORT;
136 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
138 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
142 resp = (struct ice_aqc_manage_mac_read_resp *)buf;
143 flags = LE16_TO_CPU(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
145 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
146 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
150 /* A single port can report up to two (LAN and WoL) addresses */
151 for (i = 0; i < cmd->num_addr; i++)
152 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
153 ice_memcpy(hw->port_info->mac.lan_addr,
154 resp[i].mac_addr, ETH_ALEN,
156 ice_memcpy(hw->port_info->mac.perm_addr,
158 ETH_ALEN, ICE_DMA_TO_NONDMA);
165 * ice_aq_get_phy_caps - returns PHY capabilities
166 * @pi: port information structure
167 * @qual_mods: report qualified modules
168 * @report_mode: report mode capabilities
169 * @pcaps: structure for PHY capabilities to be filled
170 * @cd: pointer to command details structure or NULL
172 * Returns the various PHY capabilities supported on the Port (0x0600)
175 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
176 struct ice_aqc_get_phy_caps_data *pcaps,
177 struct ice_sq_cd *cd)
179 struct ice_aqc_get_phy_caps *cmd;
180 u16 pcaps_size = sizeof(*pcaps);
181 struct ice_aq_desc desc;
182 enum ice_status status;
184 cmd = &desc.params.get_phy;
186 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
187 return ICE_ERR_PARAM;
189 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
192 cmd->param0 |= CPU_TO_LE16(ICE_AQC_GET_PHY_RQM);
194 cmd->param0 |= CPU_TO_LE16(report_mode);
195 status = ice_aq_send_cmd(pi->hw, &desc, pcaps, pcaps_size, cd);
197 if (status == ICE_SUCCESS && report_mode == ICE_AQC_REPORT_TOPO_CAP) {
198 pi->phy.phy_type_low = LE64_TO_CPU(pcaps->phy_type_low);
199 pi->phy.phy_type_high = LE64_TO_CPU(pcaps->phy_type_high);
206 * ice_aq_get_link_topo_handle - get link topology node return status
207 * @pi: port information structure
208 * @node_type: requested node type
209 * @cd: pointer to command details structure or NULL
211 * Get link topology node return status for specified node type (0x06E0)
213 * Node type cage can be used to determine if cage is present. If AQC
214 * returns error (ENOENT), then no cage present. If no cage present, then
215 * connection type is backplane or BASE-T.
217 static enum ice_status
218 ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
219 struct ice_sq_cd *cd)
221 struct ice_aqc_get_link_topo *cmd;
222 struct ice_aq_desc desc;
224 cmd = &desc.params.get_link_topo;
226 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
228 cmd->addr.node_type_ctx = (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
229 ICE_AQC_LINK_TOPO_NODE_CTX_S);
232 cmd->addr.node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
234 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
238 * ice_is_media_cage_present
239 * @pi: port information structure
241 * Returns true if media cage is present, else false. If no cage, then
242 * media type is backplane or BASE-T.
244 static bool ice_is_media_cage_present(struct ice_port_info *pi)
246 /* Node type cage can be used to determine if cage is present. If AQC
247 * returns error (ENOENT), then no cage present. If no cage present then
248 * connection type is backplane or BASE-T.
250 return !ice_aq_get_link_topo_handle(pi,
251 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
256 * ice_get_media_type - Gets media type
257 * @pi: port information structure
259 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
261 struct ice_link_status *hw_link_info;
264 return ICE_MEDIA_UNKNOWN;
266 hw_link_info = &pi->phy.link_info;
267 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
268 /* If more than one media type is selected, report unknown */
269 return ICE_MEDIA_UNKNOWN;
271 if (hw_link_info->phy_type_low) {
272 switch (hw_link_info->phy_type_low) {
273 case ICE_PHY_TYPE_LOW_1000BASE_SX:
274 case ICE_PHY_TYPE_LOW_1000BASE_LX:
275 case ICE_PHY_TYPE_LOW_10GBASE_SR:
276 case ICE_PHY_TYPE_LOW_10GBASE_LR:
277 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
278 case ICE_PHY_TYPE_LOW_25GBASE_SR:
279 case ICE_PHY_TYPE_LOW_25GBASE_LR:
280 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
281 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
282 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
283 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
284 case ICE_PHY_TYPE_LOW_50GBASE_SR:
285 case ICE_PHY_TYPE_LOW_50GBASE_FR:
286 case ICE_PHY_TYPE_LOW_50GBASE_LR:
287 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
288 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
289 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
290 case ICE_PHY_TYPE_LOW_100GBASE_DR:
291 return ICE_MEDIA_FIBER;
292 case ICE_PHY_TYPE_LOW_100BASE_TX:
293 case ICE_PHY_TYPE_LOW_1000BASE_T:
294 case ICE_PHY_TYPE_LOW_2500BASE_T:
295 case ICE_PHY_TYPE_LOW_5GBASE_T:
296 case ICE_PHY_TYPE_LOW_10GBASE_T:
297 case ICE_PHY_TYPE_LOW_25GBASE_T:
298 return ICE_MEDIA_BASET;
299 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
300 case ICE_PHY_TYPE_LOW_25GBASE_CR:
301 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
302 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
303 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
304 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
305 case ICE_PHY_TYPE_LOW_50GBASE_CP:
306 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
307 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
308 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
310 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
311 case ICE_PHY_TYPE_LOW_40G_XLAUI:
312 case ICE_PHY_TYPE_LOW_50G_LAUI2:
313 case ICE_PHY_TYPE_LOW_50G_AUI2:
314 case ICE_PHY_TYPE_LOW_50G_AUI1:
315 case ICE_PHY_TYPE_LOW_100G_AUI4:
316 case ICE_PHY_TYPE_LOW_100G_CAUI4:
317 if (ice_is_media_cage_present(pi))
320 case ICE_PHY_TYPE_LOW_1000BASE_KX:
321 case ICE_PHY_TYPE_LOW_2500BASE_KX:
322 case ICE_PHY_TYPE_LOW_2500BASE_X:
323 case ICE_PHY_TYPE_LOW_5GBASE_KR:
324 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
325 case ICE_PHY_TYPE_LOW_25GBASE_KR:
326 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
327 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
328 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
329 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
330 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
331 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
332 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
333 return ICE_MEDIA_BACKPLANE;
336 switch (hw_link_info->phy_type_high) {
337 case ICE_PHY_TYPE_HIGH_100G_AUI2:
338 if (ice_is_media_cage_present(pi))
341 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
342 return ICE_MEDIA_BACKPLANE;
345 return ICE_MEDIA_UNKNOWN;
349 * ice_aq_get_link_info
350 * @pi: port information structure
351 * @ena_lse: enable/disable LinkStatusEvent reporting
352 * @link: pointer to link status structure - optional
353 * @cd: pointer to command details structure or NULL
355 * Get Link Status (0x607). Returns the link status of the adapter.
358 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
359 struct ice_link_status *link, struct ice_sq_cd *cd)
361 struct ice_aqc_get_link_status_data link_data = { 0 };
362 struct ice_aqc_get_link_status *resp;
363 struct ice_link_status *li_old, *li;
364 enum ice_media_type *hw_media_type;
365 struct ice_fc_info *hw_fc_info;
366 bool tx_pause, rx_pause;
367 struct ice_aq_desc desc;
368 enum ice_status status;
373 return ICE_ERR_PARAM;
376 li_old = &pi->phy.link_info_old;
377 hw_media_type = &pi->phy.media_type;
378 li = &pi->phy.link_info;
379 hw_fc_info = &pi->fc;
381 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
382 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
383 resp = &desc.params.get_link_status;
384 resp->cmd_flags = CPU_TO_LE16(cmd_flags);
385 resp->lport_num = pi->lport;
387 status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
389 if (status != ICE_SUCCESS)
392 /* save off old link status information */
395 /* update current link status information */
396 li->link_speed = LE16_TO_CPU(link_data.link_speed);
397 li->phy_type_low = LE64_TO_CPU(link_data.phy_type_low);
398 li->phy_type_high = LE64_TO_CPU(link_data.phy_type_high);
399 *hw_media_type = ice_get_media_type(pi);
400 li->link_info = link_data.link_info;
401 li->an_info = link_data.an_info;
402 li->ext_info = link_data.ext_info;
403 li->max_frame_size = LE16_TO_CPU(link_data.max_frame_size);
404 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
405 li->topo_media_conflict = link_data.topo_media_conflict;
406 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
407 ICE_AQ_CFG_PACING_TYPE_M);
410 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
411 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
412 if (tx_pause && rx_pause)
413 hw_fc_info->current_mode = ICE_FC_FULL;
415 hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
417 hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
419 hw_fc_info->current_mode = ICE_FC_NONE;
421 li->lse_ena = !!(resp->cmd_flags & CPU_TO_LE16(ICE_AQ_LSE_IS_ENABLED));
423 ice_debug(hw, ICE_DBG_LINK, "link_speed = 0x%x\n", li->link_speed);
424 ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n",
425 (unsigned long long)li->phy_type_low);
426 ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n",
427 (unsigned long long)li->phy_type_high);
428 ice_debug(hw, ICE_DBG_LINK, "media_type = 0x%x\n", *hw_media_type);
429 ice_debug(hw, ICE_DBG_LINK, "link_info = 0x%x\n", li->link_info);
430 ice_debug(hw, ICE_DBG_LINK, "an_info = 0x%x\n", li->an_info);
431 ice_debug(hw, ICE_DBG_LINK, "ext_info = 0x%x\n", li->ext_info);
432 ice_debug(hw, ICE_DBG_LINK, "lse_ena = 0x%x\n", li->lse_ena);
433 ice_debug(hw, ICE_DBG_LINK, "max_frame = 0x%x\n", li->max_frame_size);
434 ice_debug(hw, ICE_DBG_LINK, "pacing = 0x%x\n", li->pacing);
436 /* save link status information */
440 /* flag cleared so calling functions don't call AQ again */
441 pi->phy.get_link_info = false;
448 * @hw: pointer to the HW struct
449 * @max_frame_size: Maximum Frame Size to be supported
450 * @cd: pointer to command details structure or NULL
452 * Set MAC configuration (0x0603)
455 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
457 u16 fc_threshold_val, tx_timer_val;
458 struct ice_aqc_set_mac_cfg *cmd;
459 struct ice_aq_desc desc;
462 cmd = &desc.params.set_mac_cfg;
464 if (max_frame_size == 0)
465 return ICE_ERR_PARAM;
467 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
469 cmd->max_frame_size = CPU_TO_LE16(max_frame_size);
471 /* We read back the transmit timer and fc threshold value of
472 * LFC. Thus, we will use index =
473 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
475 * Also, because we are opearating on transmit timer and fc
476 * threshold of LFC, we don't turn on any bit in tx_tmr_priority
478 #define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
480 /* Retrieve the transmit timer */
482 PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
483 tx_timer_val = reg_val &
484 PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
485 cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val);
487 /* Retrieve the fc threshold */
489 PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
490 fc_threshold_val = reg_val & MAKEMASK(0xFFFF, 0);
491 cmd->fc_refresh_threshold = CPU_TO_LE16(fc_threshold_val);
493 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
497 * ice_init_fltr_mgmt_struct - initializes filter management list and locks
498 * @hw: pointer to the HW struct
500 static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
502 struct ice_switch_info *sw;
504 hw->switch_info = (struct ice_switch_info *)
505 ice_malloc(hw, sizeof(*hw->switch_info));
507 sw = hw->switch_info;
510 return ICE_ERR_NO_MEMORY;
512 INIT_LIST_HEAD(&sw->vsi_list_map_head);
514 return ice_init_def_sw_recp(hw, &hw->switch_info->recp_list);
518 * ice_cleanup_fltr_mgmt_struct - cleanup filter management list and locks
519 * @hw: pointer to the HW struct
521 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
523 struct ice_switch_info *sw = hw->switch_info;
524 struct ice_vsi_list_map_info *v_pos_map;
525 struct ice_vsi_list_map_info *v_tmp_map;
526 struct ice_sw_recipe *recps;
529 LIST_FOR_EACH_ENTRY_SAFE(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
530 ice_vsi_list_map_info, list_entry) {
531 LIST_DEL(&v_pos_map->list_entry);
532 ice_free(hw, v_pos_map);
534 recps = hw->switch_info->recp_list;
535 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
536 struct ice_recp_grp_entry *rg_entry, *tmprg_entry;
538 recps[i].root_rid = i;
539 LIST_FOR_EACH_ENTRY_SAFE(rg_entry, tmprg_entry,
540 &recps[i].rg_list, ice_recp_grp_entry,
542 LIST_DEL(&rg_entry->l_entry);
543 ice_free(hw, rg_entry);
546 if (recps[i].adv_rule) {
547 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
548 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
550 ice_destroy_lock(&recps[i].filt_rule_lock);
551 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
552 &recps[i].filt_rules,
553 ice_adv_fltr_mgmt_list_entry,
555 LIST_DEL(&lst_itr->list_entry);
556 ice_free(hw, lst_itr->lkups);
557 ice_free(hw, lst_itr);
560 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
562 ice_destroy_lock(&recps[i].filt_rule_lock);
563 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
564 &recps[i].filt_rules,
565 ice_fltr_mgmt_list_entry,
567 LIST_DEL(&lst_itr->list_entry);
568 ice_free(hw, lst_itr);
571 if (recps[i].root_buf)
572 ice_free(hw, recps[i].root_buf);
574 ice_rm_all_sw_replay_rule_info(hw);
575 ice_free(hw, sw->recp_list);
580 * ice_get_itr_intrl_gran
581 * @hw: pointer to the HW struct
583 * Determines the ITR/INTRL granularities based on the maximum aggregate
584 * bandwidth according to the device's configuration during power-on.
586 static void ice_get_itr_intrl_gran(struct ice_hw *hw)
588 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
589 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
590 GL_PWR_MODE_CTL_CAR_MAX_BW_S;
592 switch (max_agg_bw) {
593 case ICE_MAX_AGG_BW_200G:
594 case ICE_MAX_AGG_BW_100G:
595 case ICE_MAX_AGG_BW_50G:
596 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
597 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
599 case ICE_MAX_AGG_BW_25G:
600 hw->itr_gran = ICE_ITR_GRAN_MAX_25;
601 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
607 * ice_print_rollback_msg - print FW rollback message
608 * @hw: pointer to the hardware structure
610 void ice_print_rollback_msg(struct ice_hw *hw)
612 char nvm_str[ICE_NVM_VER_LEN] = { 0 };
613 struct ice_nvm_info *nvm = &hw->nvm;
614 struct ice_orom_info *orom;
618 SNPRINTF(nvm_str, sizeof(nvm_str), "%x.%02x 0x%x %d.%d.%d",
619 nvm->major_ver, nvm->minor_ver, nvm->eetrack, orom->major,
620 orom->build, orom->patch);
622 "Firmware rollback mode detected. Current version is NVM: %s, FW: %d.%d. Device may exhibit limited functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware rollback mode\n",
623 nvm_str, hw->fw_maj_ver, hw->fw_min_ver);
627 * ice_init_hw - main hardware initialization routine
628 * @hw: pointer to the hardware structure
630 enum ice_status ice_init_hw(struct ice_hw *hw)
632 struct ice_aqc_get_phy_caps_data *pcaps;
633 enum ice_status status;
637 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
639 /* Set MAC type based on DeviceID */
640 status = ice_set_mac_type(hw);
644 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
645 PF_FUNC_RID_FUNCTION_NUMBER_M) >>
646 PF_FUNC_RID_FUNCTION_NUMBER_S;
648 status = ice_reset(hw, ICE_RESET_PFR);
652 ice_get_itr_intrl_gran(hw);
654 status = ice_create_all_ctrlq(hw);
656 goto err_unroll_cqinit;
658 status = ice_init_nvm(hw);
660 goto err_unroll_cqinit;
662 if (ice_get_fw_mode(hw) == ICE_FW_MODE_ROLLBACK)
663 ice_print_rollback_msg(hw);
665 status = ice_clear_pf_cfg(hw);
667 goto err_unroll_cqinit;
669 ice_clear_pxe_mode(hw);
671 status = ice_get_caps(hw);
673 goto err_unroll_cqinit;
675 hw->port_info = (struct ice_port_info *)
676 ice_malloc(hw, sizeof(*hw->port_info));
677 if (!hw->port_info) {
678 status = ICE_ERR_NO_MEMORY;
679 goto err_unroll_cqinit;
682 /* set the back pointer to HW */
683 hw->port_info->hw = hw;
685 /* Initialize port_info struct with switch configuration data */
686 status = ice_get_initial_sw_cfg(hw);
688 goto err_unroll_alloc;
691 /* Query the allocated resources for Tx scheduler */
692 status = ice_sched_query_res_alloc(hw);
694 ice_debug(hw, ICE_DBG_SCHED,
695 "Failed to get scheduler allocated resources\n");
696 goto err_unroll_alloc;
698 ice_sched_get_psm_clk_freq(hw);
700 /* Initialize port_info struct with scheduler data */
701 status = ice_sched_init_port(hw->port_info);
703 goto err_unroll_sched;
705 pcaps = (struct ice_aqc_get_phy_caps_data *)
706 ice_malloc(hw, sizeof(*pcaps));
708 status = ICE_ERR_NO_MEMORY;
709 goto err_unroll_sched;
712 /* Initialize port_info struct with PHY capabilities */
713 status = ice_aq_get_phy_caps(hw->port_info, false,
714 ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
717 goto err_unroll_sched;
719 /* Initialize port_info struct with link information */
720 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
722 goto err_unroll_sched;
723 /* need a valid SW entry point to build a Tx tree */
724 if (!hw->sw_entry_point_layer) {
725 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
726 status = ICE_ERR_CFG;
727 goto err_unroll_sched;
729 INIT_LIST_HEAD(&hw->agg_list);
730 /* Initialize max burst size */
731 if (!hw->max_burst_size)
732 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
734 status = ice_init_fltr_mgmt_struct(hw);
736 goto err_unroll_sched;
738 /* Get MAC information */
739 /* A single port can report up to two (LAN and WoL) addresses */
740 mac_buf = ice_calloc(hw, 2,
741 sizeof(struct ice_aqc_manage_mac_read_resp));
742 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
745 status = ICE_ERR_NO_MEMORY;
746 goto err_unroll_fltr_mgmt_struct;
749 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
750 ice_free(hw, mac_buf);
753 goto err_unroll_fltr_mgmt_struct;
754 status = ice_init_hw_tbls(hw);
756 goto err_unroll_fltr_mgmt_struct;
757 ice_init_lock(&hw->tnl_lock);
760 err_unroll_fltr_mgmt_struct:
761 ice_cleanup_fltr_mgmt_struct(hw);
763 ice_sched_cleanup_all(hw);
765 ice_free(hw, hw->port_info);
766 hw->port_info = NULL;
768 ice_destroy_all_ctrlq(hw);
773 * ice_deinit_hw - unroll initialization operations done by ice_init_hw
774 * @hw: pointer to the hardware structure
776 * This should be called only during nominal operation, not as a result of
777 * ice_init_hw() failing since ice_init_hw() will take care of unrolling
778 * applicable initializations if it fails for any reason.
780 void ice_deinit_hw(struct ice_hw *hw)
782 ice_cleanup_fltr_mgmt_struct(hw);
784 ice_sched_cleanup_all(hw);
785 ice_sched_clear_agg(hw);
787 ice_free_hw_tbls(hw);
788 ice_destroy_lock(&hw->tnl_lock);
791 ice_free(hw, hw->port_info);
792 hw->port_info = NULL;
795 ice_destroy_all_ctrlq(hw);
797 /* Clear VSI contexts if not already cleared */
798 ice_clear_all_vsi_ctx(hw);
802 * ice_check_reset - Check to see if a global reset is complete
803 * @hw: pointer to the hardware structure
805 enum ice_status ice_check_reset(struct ice_hw *hw)
807 u32 cnt, reg = 0, grst_delay, uld_mask;
809 /* Poll for Device Active state in case a recent CORER, GLOBR,
810 * or EMPR has occurred. The grst delay value is in 100ms units.
811 * Add 1sec for outstanding AQ commands that can take a long time.
813 grst_delay = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
814 GLGEN_RSTCTL_GRSTDEL_S) + 10;
816 for (cnt = 0; cnt < grst_delay; cnt++) {
817 ice_msec_delay(100, true);
818 reg = rd32(hw, GLGEN_RSTAT);
819 if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
823 if (cnt == grst_delay) {
824 ice_debug(hw, ICE_DBG_INIT,
825 "Global reset polling failed to complete.\n");
826 return ICE_ERR_RESET_FAILED;
829 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
830 GLNVM_ULD_PCIER_DONE_1_M |\
831 GLNVM_ULD_CORER_DONE_M |\
832 GLNVM_ULD_GLOBR_DONE_M |\
833 GLNVM_ULD_POR_DONE_M |\
834 GLNVM_ULD_POR_DONE_1_M |\
835 GLNVM_ULD_PCIER_DONE_2_M)
837 uld_mask = ICE_RESET_DONE_MASK;
839 /* Device is Active; check Global Reset processes are done */
840 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
841 reg = rd32(hw, GLNVM_ULD) & uld_mask;
842 if (reg == uld_mask) {
843 ice_debug(hw, ICE_DBG_INIT,
844 "Global reset processes done. %d\n", cnt);
847 ice_msec_delay(10, true);
850 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
851 ice_debug(hw, ICE_DBG_INIT,
852 "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
854 return ICE_ERR_RESET_FAILED;
861 * ice_pf_reset - Reset the PF
862 * @hw: pointer to the hardware structure
864 * If a global reset has been triggered, this function checks
865 * for its completion and then issues the PF reset
867 static enum ice_status ice_pf_reset(struct ice_hw *hw)
871 /* If at function entry a global reset was already in progress, i.e.
872 * state is not 'device active' or any of the reset done bits are not
873 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
874 * global reset is done.
876 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
877 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
878 /* poll on global reset currently in progress until done */
879 if (ice_check_reset(hw))
880 return ICE_ERR_RESET_FAILED;
886 reg = rd32(hw, PFGEN_CTRL);
888 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
890 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
891 reg = rd32(hw, PFGEN_CTRL);
892 if (!(reg & PFGEN_CTRL_PFSWR_M))
895 ice_msec_delay(1, true);
898 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
899 ice_debug(hw, ICE_DBG_INIT,
900 "PF reset polling failed to complete.\n");
901 return ICE_ERR_RESET_FAILED;
908 * ice_reset - Perform different types of reset
909 * @hw: pointer to the hardware structure
910 * @req: reset request
912 * This function triggers a reset as specified by the req parameter.
915 * If anything other than a PF reset is triggered, PXE mode is restored.
916 * This has to be cleared using ice_clear_pxe_mode again, once the AQ
917 * interface has been restored in the rebuild flow.
919 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
925 return ice_pf_reset(hw);
926 case ICE_RESET_CORER:
927 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
928 val = GLGEN_RTRIG_CORER_M;
930 case ICE_RESET_GLOBR:
931 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
932 val = GLGEN_RTRIG_GLOBR_M;
935 return ICE_ERR_PARAM;
938 val |= rd32(hw, GLGEN_RTRIG);
939 wr32(hw, GLGEN_RTRIG, val);
942 /* wait for the FW to be ready */
943 return ice_check_reset(hw);
947 * ice_copy_rxq_ctx_to_hw
948 * @hw: pointer to the hardware structure
949 * @ice_rxq_ctx: pointer to the rxq context
950 * @rxq_index: the index of the Rx queue
952 * Copies rxq context from dense structure to HW register space
954 static enum ice_status
955 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
960 return ICE_ERR_BAD_PTR;
962 if (rxq_index > QRX_CTRL_MAX_INDEX)
963 return ICE_ERR_PARAM;
965 /* Copy each dword separately to HW */
966 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
967 wr32(hw, QRX_CONTEXT(i, rxq_index),
968 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
970 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
971 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
977 /* LAN Rx Queue Context */
978 static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
979 /* Field Width LSB */
980 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
981 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
982 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
983 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
984 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
985 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
986 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
987 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
988 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
989 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
990 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
991 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
992 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
993 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
994 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
995 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
996 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
997 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
998 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
999 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
1005 * @hw: pointer to the hardware structure
1006 * @rlan_ctx: pointer to the rxq context
1007 * @rxq_index: the index of the Rx queue
1009 * Converts rxq context from sparse to dense structure and then writes
1010 * it to HW register space and enables the hardware to prefetch descriptors
1011 * instead of only fetching them on demand
1014 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1017 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1020 return ICE_ERR_BAD_PTR;
1022 rlan_ctx->prefena = 1;
1024 ice_set_ctx((u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1025 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1030 * @hw: pointer to the hardware structure
1031 * @rxq_index: the index of the Rx queue to clear
1033 * Clears rxq context in HW register space
1035 enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index)
1039 if (rxq_index > QRX_CTRL_MAX_INDEX)
1040 return ICE_ERR_PARAM;
1042 /* Clear each dword register separately */
1043 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++)
1044 wr32(hw, QRX_CONTEXT(i, rxq_index), 0);
1049 /* LAN Tx Queue Context */
1050 const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1051 /* Field Width LSB */
1052 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
1053 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
1054 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
1055 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
1056 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
1057 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
1058 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
1059 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
1060 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91),
1061 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
1062 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
1063 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
1064 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
1065 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
1066 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
1067 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
1068 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
1069 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
1070 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
1071 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
1072 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
1073 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
1074 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
1075 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
1076 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
1077 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
1078 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
1079 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
1084 * ice_copy_tx_cmpltnq_ctx_to_hw
1085 * @hw: pointer to the hardware structure
1086 * @ice_tx_cmpltnq_ctx: pointer to the Tx completion queue context
1087 * @tx_cmpltnq_index: the index of the completion queue
1089 * Copies Tx completion queue context from dense structure to HW register space
1091 static enum ice_status
1092 ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_cmpltnq_ctx,
1093 u32 tx_cmpltnq_index)
1097 if (!ice_tx_cmpltnq_ctx)
1098 return ICE_ERR_BAD_PTR;
1100 if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1101 return ICE_ERR_PARAM;
1103 /* Copy each dword separately to HW */
1104 for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++) {
1105 wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index),
1106 *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1108 ice_debug(hw, ICE_DBG_QCTX, "cmpltnqdata[%d]: %08X\n", i,
1109 *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1115 /* LAN Tx Completion Queue Context */
1116 static const struct ice_ctx_ele ice_tx_cmpltnq_ctx_info[] = {
1117 /* Field Width LSB */
1118 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, base, 57, 0),
1119 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, q_len, 18, 64),
1120 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, generation, 1, 96),
1121 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, wrt_ptr, 22, 97),
1122 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, pf_num, 3, 128),
1123 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_num, 10, 131),
1124 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_type, 2, 141),
1125 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, tph_desc_wr, 1, 160),
1126 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cpuid, 8, 161),
1127 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cmpltn_cache, 512, 192),
1132 * ice_write_tx_cmpltnq_ctx
1133 * @hw: pointer to the hardware structure
1134 * @tx_cmpltnq_ctx: pointer to the completion queue context
1135 * @tx_cmpltnq_index: the index of the completion queue
1137 * Converts completion queue context from sparse to dense structure and then
1138 * writes it to HW register space
1141 ice_write_tx_cmpltnq_ctx(struct ice_hw *hw,
1142 struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx,
1143 u32 tx_cmpltnq_index)
1145 u8 ctx_buf[ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1147 ice_set_ctx((u8 *)tx_cmpltnq_ctx, ctx_buf, ice_tx_cmpltnq_ctx_info);
1148 return ice_copy_tx_cmpltnq_ctx_to_hw(hw, ctx_buf, tx_cmpltnq_index);
1152 * ice_clear_tx_cmpltnq_ctx
1153 * @hw: pointer to the hardware structure
1154 * @tx_cmpltnq_index: the index of the completion queue to clear
1156 * Clears Tx completion queue context in HW register space
1159 ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index)
1163 if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1164 return ICE_ERR_PARAM;
1166 /* Clear each dword register separately */
1167 for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++)
1168 wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index), 0);
1174 * ice_copy_tx_drbell_q_ctx_to_hw
1175 * @hw: pointer to the hardware structure
1176 * @ice_tx_drbell_q_ctx: pointer to the doorbell queue context
1177 * @tx_drbell_q_index: the index of the doorbell queue
1179 * Copies doorbell queue context from dense structure to HW register space
1181 static enum ice_status
1182 ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_drbell_q_ctx,
1183 u32 tx_drbell_q_index)
1187 if (!ice_tx_drbell_q_ctx)
1188 return ICE_ERR_BAD_PTR;
1190 if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1191 return ICE_ERR_PARAM;
1193 /* Copy each dword separately to HW */
1194 for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++) {
1195 wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index),
1196 *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1198 ice_debug(hw, ICE_DBG_QCTX, "tx_drbell_qdata[%d]: %08X\n", i,
1199 *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1205 /* LAN Tx Doorbell Queue Context info */
1206 static const struct ice_ctx_ele ice_tx_drbell_q_ctx_info[] = {
1207 /* Field Width LSB */
1208 ICE_CTX_STORE(ice_tx_drbell_q_ctx, base, 57, 0),
1209 ICE_CTX_STORE(ice_tx_drbell_q_ctx, ring_len, 13, 64),
1210 ICE_CTX_STORE(ice_tx_drbell_q_ctx, pf_num, 3, 80),
1211 ICE_CTX_STORE(ice_tx_drbell_q_ctx, vf_num, 8, 84),
1212 ICE_CTX_STORE(ice_tx_drbell_q_ctx, vmvf_type, 2, 94),
1213 ICE_CTX_STORE(ice_tx_drbell_q_ctx, cpuid, 8, 96),
1214 ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_rd, 1, 104),
1215 ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_wr, 1, 108),
1216 ICE_CTX_STORE(ice_tx_drbell_q_ctx, db_q_en, 1, 112),
1217 ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_head, 13, 128),
1218 ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_tail, 13, 144),
1223 * ice_write_tx_drbell_q_ctx
1224 * @hw: pointer to the hardware structure
1225 * @tx_drbell_q_ctx: pointer to the doorbell queue context
1226 * @tx_drbell_q_index: the index of the doorbell queue
1228 * Converts doorbell queue context from sparse to dense structure and then
1229 * writes it to HW register space
1232 ice_write_tx_drbell_q_ctx(struct ice_hw *hw,
1233 struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx,
1234 u32 tx_drbell_q_index)
1236 u8 ctx_buf[ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1238 ice_set_ctx((u8 *)tx_drbell_q_ctx, ctx_buf, ice_tx_drbell_q_ctx_info);
1239 return ice_copy_tx_drbell_q_ctx_to_hw(hw, ctx_buf, tx_drbell_q_index);
1243 * ice_clear_tx_drbell_q_ctx
1244 * @hw: pointer to the hardware structure
1245 * @tx_drbell_q_index: the index of the doorbell queue to clear
1247 * Clears doorbell queue context in HW register space
1250 ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index)
1254 if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1255 return ICE_ERR_PARAM;
1257 /* Clear each dword register separately */
1258 for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++)
1259 wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index), 0);
1264 /* FW Admin Queue command wrappers */
1267 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1268 * @hw: pointer to the HW struct
1269 * @desc: descriptor describing the command
1270 * @buf: buffer to use for indirect commands (NULL for direct commands)
1271 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1272 * @cd: pointer to command details structure
1274 * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1277 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1278 u16 buf_size, struct ice_sq_cd *cd)
1280 return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
1285 * @hw: pointer to the HW struct
1286 * @cd: pointer to command details structure or NULL
1288 * Get the firmware version (0x0001) from the admin queue commands
1290 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1292 struct ice_aqc_get_ver *resp;
1293 struct ice_aq_desc desc;
1294 enum ice_status status;
1296 resp = &desc.params.get_ver;
1298 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1300 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1303 hw->fw_branch = resp->fw_branch;
1304 hw->fw_maj_ver = resp->fw_major;
1305 hw->fw_min_ver = resp->fw_minor;
1306 hw->fw_patch = resp->fw_patch;
1307 hw->fw_build = LE32_TO_CPU(resp->fw_build);
1308 hw->api_branch = resp->api_branch;
1309 hw->api_maj_ver = resp->api_major;
1310 hw->api_min_ver = resp->api_minor;
1311 hw->api_patch = resp->api_patch;
1318 * ice_aq_send_driver_ver
1319 * @hw: pointer to the HW struct
1320 * @dv: driver's major, minor version
1321 * @cd: pointer to command details structure or NULL
1323 * Send the driver version (0x0002) to the firmware
1326 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1327 struct ice_sq_cd *cd)
1329 struct ice_aqc_driver_ver *cmd;
1330 struct ice_aq_desc desc;
1333 cmd = &desc.params.driver_ver;
1336 return ICE_ERR_PARAM;
1338 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1340 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1341 cmd->major_ver = dv->major_ver;
1342 cmd->minor_ver = dv->minor_ver;
1343 cmd->build_ver = dv->build_ver;
1344 cmd->subbuild_ver = dv->subbuild_ver;
1347 while (len < sizeof(dv->driver_string) &&
1348 IS_ASCII(dv->driver_string[len]) && dv->driver_string[len])
1351 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1356 * @hw: pointer to the HW struct
1357 * @unloading: is the driver unloading itself
1359 * Tell the Firmware that we're shutting down the AdminQ and whether
1360 * or not the driver is unloading as well (0x0003).
1362 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1364 struct ice_aqc_q_shutdown *cmd;
1365 struct ice_aq_desc desc;
1367 cmd = &desc.params.q_shutdown;
1369 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1372 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1374 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1379 * @hw: pointer to the HW struct
1381 * @access: access type
1382 * @sdp_number: resource number
1383 * @timeout: the maximum time in ms that the driver may hold the resource
1384 * @cd: pointer to command details structure or NULL
1386 * Requests common resource using the admin queue commands (0x0008).
1387 * When attempting to acquire the Global Config Lock, the driver can
1388 * learn of three states:
1389 * 1) ICE_SUCCESS - acquired lock, and can perform download package
1390 * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load
1391 * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
1392 * successfully downloaded the package; the driver does
1393 * not have to download the package and can continue
1396 * Note that if the caller is in an acquire lock, perform action, release lock
1397 * phase of operation, it is possible that the FW may detect a timeout and issue
1398 * a CORER. In this case, the driver will receive a CORER interrupt and will
1399 * have to determine its cause. The calling thread that is handling this flow
1400 * will likely get an error propagated back to it indicating the Download
1401 * Package, Update Package or the Release Resource AQ commands timed out.
1403 static enum ice_status
1404 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1405 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1406 struct ice_sq_cd *cd)
1408 struct ice_aqc_req_res *cmd_resp;
1409 struct ice_aq_desc desc;
1410 enum ice_status status;
1412 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1414 cmd_resp = &desc.params.res_owner;
1416 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1418 cmd_resp->res_id = CPU_TO_LE16(res);
1419 cmd_resp->access_type = CPU_TO_LE16(access);
1420 cmd_resp->res_number = CPU_TO_LE32(sdp_number);
1421 cmd_resp->timeout = CPU_TO_LE32(*timeout);
1424 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1426 /* The completion specifies the maximum time in ms that the driver
1427 * may hold the resource in the Timeout field.
1430 /* Global config lock response utilizes an additional status field.
1432 * If the Global config lock resource is held by some other driver, the
1433 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1434 * and the timeout field indicates the maximum time the current owner
1435 * of the resource has to free it.
1437 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1438 if (LE16_TO_CPU(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1439 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1441 } else if (LE16_TO_CPU(cmd_resp->status) ==
1442 ICE_AQ_RES_GLBL_IN_PROG) {
1443 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1444 return ICE_ERR_AQ_ERROR;
1445 } else if (LE16_TO_CPU(cmd_resp->status) ==
1446 ICE_AQ_RES_GLBL_DONE) {
1447 return ICE_ERR_AQ_NO_WORK;
1450 /* invalid FW response, force a timeout immediately */
1452 return ICE_ERR_AQ_ERROR;
1455 /* If the resource is held by some other driver, the command completes
1456 * with a busy return value and the timeout field indicates the maximum
1457 * time the current owner of the resource has to free it.
1459 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1460 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1466 * ice_aq_release_res
1467 * @hw: pointer to the HW struct
1469 * @sdp_number: resource number
1470 * @cd: pointer to command details structure or NULL
1472 * release common resource using the admin queue commands (0x0009)
1474 static enum ice_status
1475 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1476 struct ice_sq_cd *cd)
1478 struct ice_aqc_req_res *cmd;
1479 struct ice_aq_desc desc;
1481 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1483 cmd = &desc.params.res_owner;
1485 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1487 cmd->res_id = CPU_TO_LE16(res);
1488 cmd->res_number = CPU_TO_LE32(sdp_number);
1490 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1495 * @hw: pointer to the HW structure
1497 * @access: access type (read or write)
1498 * @timeout: timeout in milliseconds
1500 * This function will attempt to acquire the ownership of a resource.
1503 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1504 enum ice_aq_res_access_type access, u32 timeout)
1506 #define ICE_RES_POLLING_DELAY_MS 10
1507 u32 delay = ICE_RES_POLLING_DELAY_MS;
1508 u32 time_left = timeout;
1509 enum ice_status status;
1511 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1513 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1515 /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
1516 * previously acquired the resource and performed any necessary updates;
1517 * in this case the caller does not obtain the resource and has no
1518 * further work to do.
1520 if (status == ICE_ERR_AQ_NO_WORK)
1521 goto ice_acquire_res_exit;
1524 ice_debug(hw, ICE_DBG_RES,
1525 "resource %d acquire type %d failed.\n", res, access);
1527 /* If necessary, poll until the current lock owner timeouts */
1528 timeout = time_left;
1529 while (status && timeout && time_left) {
1530 ice_msec_delay(delay, true);
1531 timeout = (timeout > delay) ? timeout - delay : 0;
1532 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1534 if (status == ICE_ERR_AQ_NO_WORK)
1535 /* lock free, but no work to do */
1542 if (status && status != ICE_ERR_AQ_NO_WORK)
1543 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1545 ice_acquire_res_exit:
1546 if (status == ICE_ERR_AQ_NO_WORK) {
1547 if (access == ICE_RES_WRITE)
1548 ice_debug(hw, ICE_DBG_RES,
1549 "resource indicates no work to do.\n");
1551 ice_debug(hw, ICE_DBG_RES,
1552 "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1559 * @hw: pointer to the HW structure
1562 * This function will release a resource using the proper Admin Command.
1564 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1566 enum ice_status status;
1567 u32 total_delay = 0;
1569 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1571 status = ice_aq_release_res(hw, res, 0, NULL);
1573 /* there are some rare cases when trying to release the resource
1574 * results in an admin queue timeout, so handle them correctly
1576 while ((status == ICE_ERR_AQ_TIMEOUT) &&
1577 (total_delay < hw->adminq.sq_cmd_timeout)) {
1578 ice_msec_delay(1, true);
1579 status = ice_aq_release_res(hw, res, 0, NULL);
1585 * ice_aq_alloc_free_res - command to allocate/free resources
1586 * @hw: pointer to the HW struct
1587 * @num_entries: number of resource entries in buffer
1588 * @buf: Indirect buffer to hold data parameters and response
1589 * @buf_size: size of buffer for indirect commands
1590 * @opc: pass in the command opcode
1591 * @cd: pointer to command details structure or NULL
1593 * Helper function to allocate/free resources using the admin queue commands
1596 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
1597 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
1598 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1600 struct ice_aqc_alloc_free_res_cmd *cmd;
1601 struct ice_aq_desc desc;
1603 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1605 cmd = &desc.params.sw_res_ctrl;
1608 return ICE_ERR_PARAM;
1610 if (buf_size < (num_entries * sizeof(buf->elem[0])))
1611 return ICE_ERR_PARAM;
1613 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1615 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1617 cmd->num_entries = CPU_TO_LE16(num_entries);
1619 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1623 * ice_alloc_hw_res - allocate resource
1624 * @hw: pointer to the HW struct
1625 * @type: type of resource
1626 * @num: number of resources to allocate
1627 * @btm: allocate from bottom
1628 * @res: pointer to array that will receive the resources
1631 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
1633 struct ice_aqc_alloc_free_res_elem *buf;
1634 enum ice_status status;
1637 buf_len = ice_struct_size(buf, elem, num - 1);
1638 buf = (struct ice_aqc_alloc_free_res_elem *)
1639 ice_malloc(hw, buf_len);
1641 return ICE_ERR_NO_MEMORY;
1643 /* Prepare buffer to allocate resource. */
1644 buf->num_elems = CPU_TO_LE16(num);
1645 buf->res_type = CPU_TO_LE16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
1646 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
1648 buf->res_type |= CPU_TO_LE16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
1650 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
1651 ice_aqc_opc_alloc_res, NULL);
1653 goto ice_alloc_res_exit;
1655 ice_memcpy(res, buf->elem, sizeof(buf->elem) * num,
1656 ICE_NONDMA_TO_NONDMA);
1664 * ice_free_hw_res - free allocated HW resource
1665 * @hw: pointer to the HW struct
1666 * @type: type of resource to free
1667 * @num: number of resources
1668 * @res: pointer to array that contains the resources to free
1671 ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
1673 struct ice_aqc_alloc_free_res_elem *buf;
1674 enum ice_status status;
1677 buf_len = ice_struct_size(buf, elem, num - 1);
1678 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1680 return ICE_ERR_NO_MEMORY;
1682 /* Prepare buffer to free resource. */
1683 buf->num_elems = CPU_TO_LE16(num);
1684 buf->res_type = CPU_TO_LE16(type);
1685 ice_memcpy(buf->elem, res, sizeof(buf->elem) * num,
1686 ICE_NONDMA_TO_NONDMA);
1688 status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
1689 ice_aqc_opc_free_res, NULL);
1691 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1698 * ice_get_num_per_func - determine number of resources per PF
1699 * @hw: pointer to the HW structure
1700 * @max: value to be evenly split between each PF
1702 * Determine the number of valid functions by going through the bitmap returned
1703 * from parsing capabilities and use this to calculate the number of resources
1704 * per PF based on the max value passed in.
1706 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
1710 #define ICE_CAPS_VALID_FUNCS_M 0xFF
1711 funcs = ice_hweight8(hw->dev_caps.common_cap.valid_functions &
1712 ICE_CAPS_VALID_FUNCS_M);
1721 * ice_print_led_caps - print LED capabilities
1722 * @hw: pointer to the ice_hw instance
1723 * @caps: pointer to common caps instance
1724 * @prefix: string to prefix when printing
1725 * @debug: set to indicate debug print
1728 ice_print_led_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
1729 char const *prefix, bool debug)
1734 ice_debug(hw, ICE_DBG_INIT, "%s: led_pin_num = %d\n", prefix,
1737 ice_info(hw, "%s: led_pin_num = %d\n", prefix,
1740 for (i = 0; i < ICE_MAX_SUPPORTED_GPIO_LED; i++) {
1745 ice_debug(hw, ICE_DBG_INIT, "%s: led[%d] = %d\n",
1746 prefix, i, caps->led[i]);
1748 ice_info(hw, "%s: led[%d] = %d\n", prefix, i,
1754 * ice_print_sdp_caps - print SDP capabilities
1755 * @hw: pointer to the ice_hw instance
1756 * @caps: pointer to common caps instance
1757 * @prefix: string to prefix when printing
1758 * @debug: set to indicate debug print
1761 ice_print_sdp_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
1762 char const *prefix, bool debug)
1767 ice_debug(hw, ICE_DBG_INIT, "%s: sdp_pin_num = %d\n", prefix,
1770 ice_info(hw, "%s: sdp_pin_num = %d\n", prefix,
1773 for (i = 0; i < ICE_MAX_SUPPORTED_GPIO_SDP; i++) {
1778 ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%d] = %d\n",
1779 prefix, i, caps->sdp[i]);
1781 ice_info(hw, "%s: sdp[%d] = %d\n", prefix,
1787 * ice_parse_caps - parse function/device capabilities
1788 * @hw: pointer to the HW struct
1789 * @buf: pointer to a buffer containing function/device capability records
1790 * @cap_count: number of capability records in the list
1791 * @opc: type of capabilities list to parse
1793 * Helper function to parse function(0x000a)/device(0x000b) capabilities list.
1796 ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count,
1797 enum ice_adminq_opc opc)
1799 struct ice_aqc_list_caps_elem *cap_resp;
1800 struct ice_hw_func_caps *func_p = NULL;
1801 struct ice_hw_dev_caps *dev_p = NULL;
1802 struct ice_hw_common_caps *caps;
1809 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
1811 if (opc == ice_aqc_opc_list_dev_caps) {
1812 dev_p = &hw->dev_caps;
1813 caps = &dev_p->common_cap;
1815 } else if (opc == ice_aqc_opc_list_func_caps) {
1816 func_p = &hw->func_caps;
1817 caps = &func_p->common_cap;
1818 prefix = "func cap";
1820 ice_debug(hw, ICE_DBG_INIT, "wrong opcode\n");
1824 for (i = 0; caps && i < cap_count; i++, cap_resp++) {
1825 u32 logical_id = LE32_TO_CPU(cap_resp->logical_id);
1826 u32 phys_id = LE32_TO_CPU(cap_resp->phys_id);
1827 u32 number = LE32_TO_CPU(cap_resp->number);
1828 u16 cap = LE16_TO_CPU(cap_resp->cap);
1831 case ICE_AQC_CAPS_SWITCHING_MODE:
1832 caps->switching_mode = number;
1833 ice_debug(hw, ICE_DBG_INIT,
1834 "%s: switching_mode = %d\n", prefix,
1835 caps->switching_mode);
1837 case ICE_AQC_CAPS_MANAGEABILITY_MODE:
1838 caps->mgmt_mode = number;
1839 caps->mgmt_protocols_mctp = logical_id;
1840 ice_debug(hw, ICE_DBG_INIT,
1841 "%s: mgmt_mode = %d\n", prefix,
1843 ice_debug(hw, ICE_DBG_INIT,
1844 "%s: mgmt_protocols_mctp = %d\n", prefix,
1845 caps->mgmt_protocols_mctp);
1847 case ICE_AQC_CAPS_OS2BMC:
1848 caps->os2bmc = number;
1849 ice_debug(hw, ICE_DBG_INIT,
1850 "%s: os2bmc = %d\n", prefix, caps->os2bmc);
1852 case ICE_AQC_CAPS_VALID_FUNCTIONS:
1853 caps->valid_functions = number;
1854 ice_debug(hw, ICE_DBG_INIT,
1855 "%s: valid_functions (bitmap) = %d\n", prefix,
1856 caps->valid_functions);
1858 /* store func count for resource management purposes */
1860 dev_p->num_funcs = ice_hweight32(number);
1862 case ICE_AQC_CAPS_SRIOV:
1863 caps->sr_iov_1_1 = (number == 1);
1864 ice_debug(hw, ICE_DBG_INIT,
1865 "%s: sr_iov_1_1 = %d\n", prefix,
1868 case ICE_AQC_CAPS_VF:
1870 dev_p->num_vfs_exposed = number;
1871 ice_debug(hw, ICE_DBG_INIT,
1872 "%s: num_vfs_exposed = %d\n", prefix,
1873 dev_p->num_vfs_exposed);
1874 } else if (func_p) {
1875 func_p->num_allocd_vfs = number;
1876 func_p->vf_base_id = logical_id;
1877 ice_debug(hw, ICE_DBG_INIT,
1878 "%s: num_allocd_vfs = %d\n", prefix,
1879 func_p->num_allocd_vfs);
1880 ice_debug(hw, ICE_DBG_INIT,
1881 "%s: vf_base_id = %d\n", prefix,
1882 func_p->vf_base_id);
1885 case ICE_AQC_CAPS_802_1QBG:
1886 caps->evb_802_1_qbg = (number == 1);
1887 ice_debug(hw, ICE_DBG_INIT,
1888 "%s: evb_802_1_qbg = %d\n", prefix, number);
1890 case ICE_AQC_CAPS_802_1BR:
1891 caps->evb_802_1_qbh = (number == 1);
1892 ice_debug(hw, ICE_DBG_INIT,
1893 "%s: evb_802_1_qbh = %d\n", prefix, number);
1895 case ICE_AQC_CAPS_VSI:
1897 dev_p->num_vsi_allocd_to_host = number;
1898 ice_debug(hw, ICE_DBG_INIT,
1899 "%s: num_vsi_allocd_to_host = %d\n",
1901 dev_p->num_vsi_allocd_to_host);
1902 } else if (func_p) {
1903 func_p->guar_num_vsi =
1904 ice_get_num_per_func(hw, ICE_MAX_VSI);
1905 ice_debug(hw, ICE_DBG_INIT,
1906 "%s: guar_num_vsi (fw) = %d\n",
1908 ice_debug(hw, ICE_DBG_INIT,
1909 "%s: guar_num_vsi = %d\n",
1910 prefix, func_p->guar_num_vsi);
1913 case ICE_AQC_CAPS_DCB:
1914 caps->dcb = (number == 1);
1915 caps->active_tc_bitmap = logical_id;
1916 caps->maxtc = phys_id;
1917 ice_debug(hw, ICE_DBG_INIT,
1918 "%s: dcb = %d\n", prefix, caps->dcb);
1919 ice_debug(hw, ICE_DBG_INIT,
1920 "%s: active_tc_bitmap = %d\n", prefix,
1921 caps->active_tc_bitmap);
1922 ice_debug(hw, ICE_DBG_INIT,
1923 "%s: maxtc = %d\n", prefix, caps->maxtc);
1925 case ICE_AQC_CAPS_ISCSI:
1926 caps->iscsi = (number == 1);
1927 ice_debug(hw, ICE_DBG_INIT,
1928 "%s: iscsi = %d\n", prefix, caps->iscsi);
1930 case ICE_AQC_CAPS_RSS:
1931 caps->rss_table_size = number;
1932 caps->rss_table_entry_width = logical_id;
1933 ice_debug(hw, ICE_DBG_INIT,
1934 "%s: rss_table_size = %d\n", prefix,
1935 caps->rss_table_size);
1936 ice_debug(hw, ICE_DBG_INIT,
1937 "%s: rss_table_entry_width = %d\n", prefix,
1938 caps->rss_table_entry_width);
1940 case ICE_AQC_CAPS_RXQS:
1941 caps->num_rxq = number;
1942 caps->rxq_first_id = phys_id;
1943 ice_debug(hw, ICE_DBG_INIT,
1944 "%s: num_rxq = %d\n", prefix,
1946 ice_debug(hw, ICE_DBG_INIT,
1947 "%s: rxq_first_id = %d\n", prefix,
1948 caps->rxq_first_id);
1950 case ICE_AQC_CAPS_TXQS:
1951 caps->num_txq = number;
1952 caps->txq_first_id = phys_id;
1953 ice_debug(hw, ICE_DBG_INIT,
1954 "%s: num_txq = %d\n", prefix,
1956 ice_debug(hw, ICE_DBG_INIT,
1957 "%s: txq_first_id = %d\n", prefix,
1958 caps->txq_first_id);
1960 case ICE_AQC_CAPS_MSIX:
1961 caps->num_msix_vectors = number;
1962 caps->msix_vector_first_id = phys_id;
1963 ice_debug(hw, ICE_DBG_INIT,
1964 "%s: num_msix_vectors = %d\n", prefix,
1965 caps->num_msix_vectors);
1966 ice_debug(hw, ICE_DBG_INIT,
1967 "%s: msix_vector_first_id = %d\n", prefix,
1968 caps->msix_vector_first_id);
1970 case ICE_AQC_CAPS_NVM_VER:
1972 case ICE_AQC_CAPS_NVM_MGMT:
1973 caps->nvm_unified_update =
1974 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
1976 ice_debug(hw, ICE_DBG_INIT,
1977 "%s: nvm_unified_update = %d\n", prefix,
1978 caps->nvm_unified_update);
1980 case ICE_AQC_CAPS_CEM:
1981 caps->mgmt_cem = (number == 1);
1982 ice_debug(hw, ICE_DBG_INIT,
1983 "%s: mgmt_cem = %d\n", prefix,
1986 case ICE_AQC_CAPS_LED:
1987 if (phys_id < ICE_MAX_SUPPORTED_GPIO_LED) {
1988 caps->led[phys_id] = true;
1989 caps->led_pin_num++;
1992 case ICE_AQC_CAPS_SDP:
1993 if (phys_id < ICE_MAX_SUPPORTED_GPIO_SDP) {
1994 caps->sdp[phys_id] = true;
1995 caps->sdp_pin_num++;
1998 case ICE_AQC_CAPS_WR_CSR_PROT:
1999 caps->wr_csr_prot = number;
2000 caps->wr_csr_prot |= (u64)logical_id << 32;
2001 ice_debug(hw, ICE_DBG_INIT,
2002 "%s: wr_csr_prot = 0x%llX\n", prefix,
2003 (unsigned long long)caps->wr_csr_prot);
2005 case ICE_AQC_CAPS_WOL_PROXY:
2006 caps->num_wol_proxy_fltr = number;
2007 caps->wol_proxy_vsi_seid = logical_id;
2008 caps->apm_wol_support = !!(phys_id & ICE_WOL_SUPPORT_M);
2009 caps->acpi_prog_mthd = !!(phys_id &
2010 ICE_ACPI_PROG_MTHD_M);
2011 caps->proxy_support = !!(phys_id & ICE_PROXY_SUPPORT_M);
2012 ice_debug(hw, ICE_DBG_INIT,
2013 "%s: num_wol_proxy_fltr = %d\n", prefix,
2014 caps->num_wol_proxy_fltr);
2015 ice_debug(hw, ICE_DBG_INIT,
2016 "%s: wol_proxy_vsi_seid = %d\n", prefix,
2017 caps->wol_proxy_vsi_seid);
2019 case ICE_AQC_CAPS_MAX_MTU:
2020 caps->max_mtu = number;
2021 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
2022 prefix, caps->max_mtu);
2025 ice_debug(hw, ICE_DBG_INIT,
2026 "%s: unknown capability[%d]: 0x%x\n", prefix,
2032 ice_print_led_caps(hw, caps, prefix, true);
2033 ice_print_sdp_caps(hw, caps, prefix, true);
2035 /* Re-calculate capabilities that are dependent on the number of
2036 * physical ports; i.e. some features are not supported or function
2037 * differently on devices with more than 4 ports.
2039 if (hw->dev_caps.num_funcs > 4) {
2040 /* Max 4 TCs per port */
2042 ice_debug(hw, ICE_DBG_INIT,
2043 "%s: maxtc = %d (based on #ports)\n", prefix,
2049 * ice_aq_discover_caps - query function/device capabilities
2050 * @hw: pointer to the HW struct
2051 * @buf: a virtual buffer to hold the capabilities
2052 * @buf_size: Size of the virtual buffer
2053 * @cap_count: cap count needed if AQ err==ENOMEM
2054 * @opc: capabilities type to discover - pass in the command opcode
2055 * @cd: pointer to command details structure or NULL
2057 * Get the function(0x000a)/device(0x000b) capabilities description from
2061 ice_aq_discover_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
2062 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2064 struct ice_aqc_list_caps *cmd;
2065 struct ice_aq_desc desc;
2066 enum ice_status status;
2068 cmd = &desc.params.get_cap;
2070 if (opc != ice_aqc_opc_list_func_caps &&
2071 opc != ice_aqc_opc_list_dev_caps)
2072 return ICE_ERR_PARAM;
2074 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2076 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2078 ice_parse_caps(hw, buf, LE32_TO_CPU(cmd->count), opc);
2079 else if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOMEM)
2080 *cap_count = LE32_TO_CPU(cmd->count);
2085 * ice_discover_caps - get info about the HW
2086 * @hw: pointer to the hardware structure
2087 * @opc: capabilities type to discover - pass in the command opcode
2089 static enum ice_status
2090 ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc)
2092 enum ice_status status;
2097 /* The driver doesn't know how many capabilities the device will return
2098 * so the buffer size required isn't known ahead of time. The driver
2099 * starts with cbuf_len and if this turns out to be insufficient, the
2100 * device returns ICE_AQ_RC_ENOMEM and also the cap_count it needs.
2101 * The driver then allocates the buffer based on the count and retries
2102 * the operation. So it follows that the retry count is 2.
2104 #define ICE_GET_CAP_BUF_COUNT 40
2105 #define ICE_GET_CAP_RETRY_COUNT 2
2107 cap_count = ICE_GET_CAP_BUF_COUNT;
2108 retries = ICE_GET_CAP_RETRY_COUNT;
2113 cbuf_len = (u16)(cap_count *
2114 sizeof(struct ice_aqc_list_caps_elem));
2115 cbuf = ice_malloc(hw, cbuf_len);
2117 return ICE_ERR_NO_MEMORY;
2119 status = ice_aq_discover_caps(hw, cbuf, cbuf_len, &cap_count,
2123 if (!status || hw->adminq.sq_last_status != ICE_AQ_RC_ENOMEM)
2126 /* If ENOMEM is returned, try again with bigger buffer */
2127 } while (--retries);
2133 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
2134 * @hw: pointer to the hardware structure
2136 void ice_set_safe_mode_caps(struct ice_hw *hw)
2138 struct ice_hw_func_caps *func_caps = &hw->func_caps;
2139 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
2140 u32 valid_func, rxq_first_id, txq_first_id;
2141 u32 msix_vector_first_id, max_mtu;
2144 /* cache some func_caps values that should be restored after memset */
2145 valid_func = func_caps->common_cap.valid_functions;
2146 txq_first_id = func_caps->common_cap.txq_first_id;
2147 rxq_first_id = func_caps->common_cap.rxq_first_id;
2148 msix_vector_first_id = func_caps->common_cap.msix_vector_first_id;
2149 max_mtu = func_caps->common_cap.max_mtu;
2151 /* unset func capabilities */
2152 memset(func_caps, 0, sizeof(*func_caps));
2154 /* restore cached values */
2155 func_caps->common_cap.valid_functions = valid_func;
2156 func_caps->common_cap.txq_first_id = txq_first_id;
2157 func_caps->common_cap.rxq_first_id = rxq_first_id;
2158 func_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
2159 func_caps->common_cap.max_mtu = max_mtu;
2161 /* one Tx and one Rx queue in safe mode */
2162 func_caps->common_cap.num_rxq = 1;
2163 func_caps->common_cap.num_txq = 1;
2165 /* two MSIX vectors, one for traffic and one for misc causes */
2166 func_caps->common_cap.num_msix_vectors = 2;
2167 func_caps->guar_num_vsi = 1;
2169 /* cache some dev_caps values that should be restored after memset */
2170 valid_func = dev_caps->common_cap.valid_functions;
2171 txq_first_id = dev_caps->common_cap.txq_first_id;
2172 rxq_first_id = dev_caps->common_cap.rxq_first_id;
2173 msix_vector_first_id = dev_caps->common_cap.msix_vector_first_id;
2174 max_mtu = dev_caps->common_cap.max_mtu;
2175 num_funcs = dev_caps->num_funcs;
2177 /* unset dev capabilities */
2178 memset(dev_caps, 0, sizeof(*dev_caps));
2180 /* restore cached values */
2181 dev_caps->common_cap.valid_functions = valid_func;
2182 dev_caps->common_cap.txq_first_id = txq_first_id;
2183 dev_caps->common_cap.rxq_first_id = rxq_first_id;
2184 dev_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
2185 dev_caps->common_cap.max_mtu = max_mtu;
2186 dev_caps->num_funcs = num_funcs;
2188 /* one Tx and one Rx queue per function in safe mode */
2189 dev_caps->common_cap.num_rxq = num_funcs;
2190 dev_caps->common_cap.num_txq = num_funcs;
2192 /* two MSIX vectors per function */
2193 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
2197 * ice_get_caps - get info about the HW
2198 * @hw: pointer to the hardware structure
2200 enum ice_status ice_get_caps(struct ice_hw *hw)
2202 enum ice_status status;
2204 status = ice_discover_caps(hw, ice_aqc_opc_list_dev_caps);
2206 status = ice_discover_caps(hw, ice_aqc_opc_list_func_caps);
2212 * ice_aq_manage_mac_write - manage MAC address write command
2213 * @hw: pointer to the HW struct
2214 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
2215 * @flags: flags to control write behavior
2216 * @cd: pointer to command details structure or NULL
2218 * This function is used to write MAC address to the NVM (0x0108).
2221 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
2222 struct ice_sq_cd *cd)
2224 struct ice_aqc_manage_mac_write *cmd;
2225 struct ice_aq_desc desc;
2227 cmd = &desc.params.mac_write;
2228 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
2231 ice_memcpy(cmd->mac_addr, mac_addr, ETH_ALEN, ICE_NONDMA_TO_DMA);
2233 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2237 * ice_aq_clear_pxe_mode
2238 * @hw: pointer to the HW struct
2240 * Tell the firmware that the driver is taking over from PXE (0x0110).
2242 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
2244 struct ice_aq_desc desc;
2246 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
2247 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
2249 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2253 * ice_clear_pxe_mode - clear pxe operations mode
2254 * @hw: pointer to the HW struct
2256 * Make sure all PXE mode settings are cleared, including things
2257 * like descriptor fetch/write-back mode.
2259 void ice_clear_pxe_mode(struct ice_hw *hw)
2261 if (ice_check_sq_alive(hw, &hw->adminq))
2262 ice_aq_clear_pxe_mode(hw);
2266 * ice_aq_set_port_params - set physical port parameters.
2267 * @pi: pointer to the port info struct
2268 * @bad_frame_vsi: defines the VSI to which bad frames are forwarded
2269 * @save_bad_pac: if set packets with errors are forwarded to the bad frames VSI
2270 * @pad_short_pac: if set transmit packets smaller than 60 bytes are padded
2271 * @double_vlan: if set double VLAN is enabled
2272 * @cd: pointer to command details structure or NULL
2274 * Set Physical port parameters (0x0203)
2277 ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi,
2278 bool save_bad_pac, bool pad_short_pac, bool double_vlan,
2279 struct ice_sq_cd *cd)
2282 struct ice_aqc_set_port_params *cmd;
2283 struct ice_hw *hw = pi->hw;
2284 struct ice_aq_desc desc;
2287 cmd = &desc.params.set_port_params;
2289 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params);
2290 cmd->bad_frame_vsi = CPU_TO_LE16(bad_frame_vsi);
2292 cmd_flags |= ICE_AQC_SET_P_PARAMS_SAVE_BAD_PACKETS;
2294 cmd_flags |= ICE_AQC_SET_P_PARAMS_PAD_SHORT_PACKETS;
2296 cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA;
2297 cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
2299 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2303 * ice_get_link_speed_based_on_phy_type - returns link speed
2304 * @phy_type_low: lower part of phy_type
2305 * @phy_type_high: higher part of phy_type
2307 * This helper function will convert an entry in PHY type structure
2308 * [phy_type_low, phy_type_high] to its corresponding link speed.
2309 * Note: In the structure of [phy_type_low, phy_type_high], there should
2310 * be one bit set, as this function will convert one PHY type to its
2312 * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2313 * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2316 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
2318 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2319 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2321 switch (phy_type_low) {
2322 case ICE_PHY_TYPE_LOW_100BASE_TX:
2323 case ICE_PHY_TYPE_LOW_100M_SGMII:
2324 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
2326 case ICE_PHY_TYPE_LOW_1000BASE_T:
2327 case ICE_PHY_TYPE_LOW_1000BASE_SX:
2328 case ICE_PHY_TYPE_LOW_1000BASE_LX:
2329 case ICE_PHY_TYPE_LOW_1000BASE_KX:
2330 case ICE_PHY_TYPE_LOW_1G_SGMII:
2331 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
2333 case ICE_PHY_TYPE_LOW_2500BASE_T:
2334 case ICE_PHY_TYPE_LOW_2500BASE_X:
2335 case ICE_PHY_TYPE_LOW_2500BASE_KX:
2336 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
2338 case ICE_PHY_TYPE_LOW_5GBASE_T:
2339 case ICE_PHY_TYPE_LOW_5GBASE_KR:
2340 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
2342 case ICE_PHY_TYPE_LOW_10GBASE_T:
2343 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
2344 case ICE_PHY_TYPE_LOW_10GBASE_SR:
2345 case ICE_PHY_TYPE_LOW_10GBASE_LR:
2346 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
2347 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
2348 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
2349 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
2351 case ICE_PHY_TYPE_LOW_25GBASE_T:
2352 case ICE_PHY_TYPE_LOW_25GBASE_CR:
2353 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
2354 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
2355 case ICE_PHY_TYPE_LOW_25GBASE_SR:
2356 case ICE_PHY_TYPE_LOW_25GBASE_LR:
2357 case ICE_PHY_TYPE_LOW_25GBASE_KR:
2358 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
2359 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
2360 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
2361 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
2362 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
2364 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
2365 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
2366 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
2367 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
2368 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
2369 case ICE_PHY_TYPE_LOW_40G_XLAUI:
2370 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
2372 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
2373 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
2374 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
2375 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
2376 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
2377 case ICE_PHY_TYPE_LOW_50G_LAUI2:
2378 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
2379 case ICE_PHY_TYPE_LOW_50G_AUI2:
2380 case ICE_PHY_TYPE_LOW_50GBASE_CP:
2381 case ICE_PHY_TYPE_LOW_50GBASE_SR:
2382 case ICE_PHY_TYPE_LOW_50GBASE_FR:
2383 case ICE_PHY_TYPE_LOW_50GBASE_LR:
2384 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
2385 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
2386 case ICE_PHY_TYPE_LOW_50G_AUI1:
2387 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
2389 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
2390 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
2391 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
2392 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
2393 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
2394 case ICE_PHY_TYPE_LOW_100G_CAUI4:
2395 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
2396 case ICE_PHY_TYPE_LOW_100G_AUI4:
2397 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
2398 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
2399 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
2400 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
2401 case ICE_PHY_TYPE_LOW_100GBASE_DR:
2402 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
2405 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2409 switch (phy_type_high) {
2410 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
2411 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
2412 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
2413 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
2414 case ICE_PHY_TYPE_HIGH_100G_AUI2:
2415 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
2418 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2422 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
2423 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2424 return ICE_AQ_LINK_SPEED_UNKNOWN;
2425 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2426 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
2427 return ICE_AQ_LINK_SPEED_UNKNOWN;
2428 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2429 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2430 return speed_phy_type_low;
2432 return speed_phy_type_high;
2436 * ice_update_phy_type
2437 * @phy_type_low: pointer to the lower part of phy_type
2438 * @phy_type_high: pointer to the higher part of phy_type
2439 * @link_speeds_bitmap: targeted link speeds bitmap
2441 * Note: For the link_speeds_bitmap structure, you can check it at
2442 * [ice_aqc_get_link_status->link_speed]. Caller can pass in
2443 * link_speeds_bitmap include multiple speeds.
2445 * Each entry in this [phy_type_low, phy_type_high] structure will
2446 * present a certain link speed. This helper function will turn on bits
2447 * in [phy_type_low, phy_type_high] structure based on the value of
2448 * link_speeds_bitmap input parameter.
2451 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
2452 u16 link_speeds_bitmap)
2459 /* We first check with low part of phy_type */
2460 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
2461 pt_low = BIT_ULL(index);
2462 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
2464 if (link_speeds_bitmap & speed)
2465 *phy_type_low |= BIT_ULL(index);
2468 /* We then check with high part of phy_type */
2469 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
2470 pt_high = BIT_ULL(index);
2471 speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
2473 if (link_speeds_bitmap & speed)
2474 *phy_type_high |= BIT_ULL(index);
2479 * ice_aq_set_phy_cfg
2480 * @hw: pointer to the HW struct
2481 * @pi: port info structure of the interested logical port
2482 * @cfg: structure with PHY configuration data to be set
2483 * @cd: pointer to command details structure or NULL
2485 * Set the various PHY configuration parameters supported on the Port.
2486 * One or more of the Set PHY config parameters may be ignored in an MFP
2487 * mode as the PF may not have the privilege to set some of the PHY Config
2488 * parameters. This status will be indicated by the command response (0x0601).
2491 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
2492 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
2494 struct ice_aq_desc desc;
2495 enum ice_status status;
2498 return ICE_ERR_PARAM;
2500 /* Ensure that only valid bits of cfg->caps can be turned on. */
2501 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
2502 ice_debug(hw, ICE_DBG_PHY,
2503 "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
2506 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
2509 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
2510 desc.params.set_phy.lport_num = pi->lport;
2511 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2513 ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n",
2514 (unsigned long long)LE64_TO_CPU(cfg->phy_type_low));
2515 ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n",
2516 (unsigned long long)LE64_TO_CPU(cfg->phy_type_high));
2517 ice_debug(hw, ICE_DBG_LINK, "caps = 0x%x\n", cfg->caps);
2518 ice_debug(hw, ICE_DBG_LINK, "low_power_ctrl_an = 0x%x\n",
2519 cfg->low_power_ctrl_an);
2520 ice_debug(hw, ICE_DBG_LINK, "eee_cap = 0x%x\n", cfg->eee_cap);
2521 ice_debug(hw, ICE_DBG_LINK, "eeer_value = 0x%x\n", cfg->eeer_value);
2522 ice_debug(hw, ICE_DBG_LINK, "link_fec_opt = 0x%x\n", cfg->link_fec_opt);
2524 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
2527 pi->phy.curr_user_phy_cfg = *cfg;
2533 * ice_update_link_info - update status of the HW network link
2534 * @pi: port info structure of the interested logical port
2536 enum ice_status ice_update_link_info(struct ice_port_info *pi)
2538 struct ice_link_status *li;
2539 enum ice_status status;
2542 return ICE_ERR_PARAM;
2544 li = &pi->phy.link_info;
2546 status = ice_aq_get_link_info(pi, true, NULL, NULL);
2550 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
2551 struct ice_aqc_get_phy_caps_data *pcaps;
2555 pcaps = (struct ice_aqc_get_phy_caps_data *)
2556 ice_malloc(hw, sizeof(*pcaps));
2558 return ICE_ERR_NO_MEMORY;
2560 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
2562 if (status == ICE_SUCCESS)
2563 ice_memcpy(li->module_type, &pcaps->module_type,
2564 sizeof(li->module_type),
2565 ICE_NONDMA_TO_NONDMA);
2567 ice_free(hw, pcaps);
2574 * ice_cache_phy_user_req
2575 * @pi: port information structure
2576 * @cache_data: PHY logging data
2577 * @cache_mode: PHY logging mode
2579 * Log the user request on (FC, FEC, SPEED) for later user.
2582 ice_cache_phy_user_req(struct ice_port_info *pi,
2583 struct ice_phy_cache_mode_data cache_data,
2584 enum ice_phy_cache_mode cache_mode)
2589 switch (cache_mode) {
2591 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
2593 case ICE_SPEED_MODE:
2594 pi->phy.curr_user_speed_req =
2595 cache_data.data.curr_user_speed_req;
2598 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
2606 * ice_caps_to_fc_mode
2607 * @caps: PHY capabilities
2609 * Convert PHY FC capabilities to ice FC mode
2611 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
2613 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
2614 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2617 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
2618 return ICE_FC_TX_PAUSE;
2620 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2621 return ICE_FC_RX_PAUSE;
2627 * ice_caps_to_fec_mode
2628 * @caps: PHY capabilities
2629 * @fec_options: Link FEC options
2631 * Convert PHY FEC capabilities to ice FEC mode
2633 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
2635 if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
2636 return ICE_FEC_AUTO;
2638 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
2639 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
2640 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
2641 ICE_AQC_PHY_FEC_25G_KR_REQ))
2642 return ICE_FEC_BASER;
2644 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
2645 ICE_AQC_PHY_FEC_25G_RS_544_REQ |
2646 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
2649 return ICE_FEC_NONE;
2654 * @pi: port information structure
2655 * @aq_failures: pointer to status code, specific to ice_set_fc routine
2656 * @ena_auto_link_update: enable automatic link update
2658 * Set the requested flow control mode.
2661 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
2663 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
2664 struct ice_phy_cache_mode_data cache_data;
2665 struct ice_aqc_get_phy_caps_data *pcaps;
2666 enum ice_status status;
2667 u8 pause_mask = 0x0;
2670 if (!pi || !aq_failures)
2671 return ICE_ERR_PARAM;
2674 *aq_failures = ICE_SET_FC_AQ_FAIL_NONE;
2676 /* Cache user FC request */
2677 cache_data.data.curr_user_fc_req = pi->fc.req_mode;
2678 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
2680 pcaps = (struct ice_aqc_get_phy_caps_data *)
2681 ice_malloc(hw, sizeof(*pcaps));
2683 return ICE_ERR_NO_MEMORY;
2685 switch (pi->fc.req_mode) {
2687 /* Query the value of FC that both the NIC and attached media
2690 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
2693 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
2697 pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2698 pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2701 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2702 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2704 case ICE_FC_RX_PAUSE:
2705 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2707 case ICE_FC_TX_PAUSE:
2708 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2714 /* Get the current PHY config */
2715 ice_memset(pcaps, 0, sizeof(*pcaps), ICE_NONDMA_MEM);
2716 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
2719 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
2723 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
2725 /* clear the old pause settings */
2726 cfg.caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
2727 ICE_AQC_PHY_EN_RX_LINK_PAUSE);
2729 /* set the new capabilities */
2730 cfg.caps |= pause_mask;
2732 /* If the capabilities have changed, then set the new config */
2733 if (cfg.caps != pcaps->caps) {
2734 int retry_count, retry_max = 10;
2736 /* Auto restart link so settings take effect */
2737 if (ena_auto_link_update)
2738 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2740 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
2742 *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
2746 /* Update the link info
2747 * It sometimes takes a really long time for link to
2748 * come back from the atomic reset. Thus, we wait a
2751 for (retry_count = 0; retry_count < retry_max; retry_count++) {
2752 status = ice_update_link_info(pi);
2754 if (status == ICE_SUCCESS)
2757 ice_msec_delay(100, true);
2761 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
2765 ice_free(hw, pcaps);
2770 * ice_phy_caps_equals_cfg
2771 * @phy_caps: PHY capabilities
2772 * @phy_cfg: PHY configuration
2774 * Helper function to determine if PHY capabilities matches PHY
2778 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
2779 struct ice_aqc_set_phy_cfg_data *phy_cfg)
2781 u8 caps_mask, cfg_mask;
2783 if (!phy_caps || !phy_cfg)
2786 /* These bits are not common between capabilities and configuration.
2787 * Do not use them to determine equality.
2789 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
2790 ICE_AQC_PHY_EN_MOD_QUAL);
2791 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
2793 if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
2794 phy_caps->phy_type_high != phy_cfg->phy_type_high ||
2795 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
2796 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
2797 phy_caps->eee_cap != phy_cfg->eee_cap ||
2798 phy_caps->eeer_value != phy_cfg->eeer_value ||
2799 phy_caps->link_fec_options != phy_cfg->link_fec_opt)
2806 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
2807 * @pi: port information structure
2808 * @caps: PHY ability structure to copy date from
2809 * @cfg: PHY configuration structure to copy data to
2811 * Helper function to copy AQC PHY get ability data to PHY set configuration
2815 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
2816 struct ice_aqc_get_phy_caps_data *caps,
2817 struct ice_aqc_set_phy_cfg_data *cfg)
2819 if (!pi || !caps || !cfg)
2822 ice_memset(cfg, 0, sizeof(*cfg), ICE_NONDMA_MEM);
2823 cfg->phy_type_low = caps->phy_type_low;
2824 cfg->phy_type_high = caps->phy_type_high;
2825 cfg->caps = caps->caps;
2826 cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
2827 cfg->eee_cap = caps->eee_cap;
2828 cfg->eeer_value = caps->eeer_value;
2829 cfg->link_fec_opt = caps->link_fec_options;
2830 cfg->module_compliance_enforcement =
2831 caps->module_compliance_enforcement;
2833 if (ice_fw_supports_link_override(pi->hw)) {
2834 struct ice_link_default_override_tlv tlv;
2836 if (ice_get_link_default_override(&tlv, pi))
2839 if (tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE)
2840 cfg->module_compliance_enforcement |=
2841 ICE_LINK_OVERRIDE_STRICT_MODE;
2846 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
2847 * @pi: port information structure
2848 * @cfg: PHY configuration data to set FEC mode
2849 * @fec: FEC mode to configure
2852 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
2853 enum ice_fec_mode fec)
2855 struct ice_aqc_get_phy_caps_data *pcaps;
2856 enum ice_status status = ICE_SUCCESS;
2860 return ICE_ERR_BAD_PTR;
2864 pcaps = (struct ice_aqc_get_phy_caps_data *)
2865 ice_malloc(hw, sizeof(*pcaps));
2867 return ICE_ERR_NO_MEMORY;
2869 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps,
2876 /* Clear RS bits, and AND BASE-R ability
2877 * bits and OR request bits.
2879 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
2880 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
2881 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
2882 ICE_AQC_PHY_FEC_25G_KR_REQ;
2885 /* Clear BASE-R bits, and AND RS ability
2886 * bits and OR request bits.
2888 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
2889 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
2890 ICE_AQC_PHY_FEC_25G_RS_544_REQ;
2893 /* Clear all FEC option bits. */
2894 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
2897 /* AND auto FEC bit, and all caps bits. */
2898 cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
2899 cfg->link_fec_opt |= pcaps->link_fec_options;
2902 status = ICE_ERR_PARAM;
2906 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw)) {
2907 struct ice_link_default_override_tlv tlv;
2909 if (ice_get_link_default_override(&tlv, pi))
2912 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
2913 (tlv.options & ICE_LINK_OVERRIDE_EN))
2914 cfg->link_fec_opt = tlv.fec_options;
2918 ice_free(hw, pcaps);
2924 * ice_get_link_status - get status of the HW network link
2925 * @pi: port information structure
2926 * @link_up: pointer to bool (true/false = linkup/linkdown)
2928 * Variable link_up is true if link is up, false if link is down.
2929 * The variable link_up is invalid if status is non zero. As a
2930 * result of this call, link status reporting becomes enabled
2932 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
2934 struct ice_phy_info *phy_info;
2935 enum ice_status status = ICE_SUCCESS;
2937 if (!pi || !link_up)
2938 return ICE_ERR_PARAM;
2940 phy_info = &pi->phy;
2942 if (phy_info->get_link_info) {
2943 status = ice_update_link_info(pi);
2946 ice_debug(pi->hw, ICE_DBG_LINK,
2947 "get link status error, status = %d\n",
2951 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
2957 * ice_aq_set_link_restart_an
2958 * @pi: pointer to the port information structure
2959 * @ena_link: if true: enable link, if false: disable link
2960 * @cd: pointer to command details structure or NULL
2962 * Sets up the link and restarts the Auto-Negotiation over the link.
2965 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
2966 struct ice_sq_cd *cd)
2968 struct ice_aqc_restart_an *cmd;
2969 struct ice_aq_desc desc;
2971 cmd = &desc.params.restart_an;
2973 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
2975 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
2976 cmd->lport_num = pi->lport;
2978 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
2980 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
2982 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
2986 * ice_aq_set_event_mask
2987 * @hw: pointer to the HW struct
2988 * @port_num: port number of the physical function
2989 * @mask: event mask to be set
2990 * @cd: pointer to command details structure or NULL
2992 * Set event mask (0x0613)
2995 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
2996 struct ice_sq_cd *cd)
2998 struct ice_aqc_set_event_mask *cmd;
2999 struct ice_aq_desc desc;
3001 cmd = &desc.params.set_event_mask;
3003 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
3005 cmd->lport_num = port_num;
3007 cmd->event_mask = CPU_TO_LE16(mask);
3008 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3012 * ice_aq_set_mac_loopback
3013 * @hw: pointer to the HW struct
3014 * @ena_lpbk: Enable or Disable loopback
3015 * @cd: pointer to command details structure or NULL
3017 * Enable/disable loopback on a given port
3020 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
3022 struct ice_aqc_set_mac_lb *cmd;
3023 struct ice_aq_desc desc;
3025 cmd = &desc.params.set_mac_lb;
3027 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
3029 cmd->lb_mode = ICE_AQ_MAC_LB_EN;
3031 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3035 * ice_aq_set_port_id_led
3036 * @pi: pointer to the port information
3037 * @is_orig_mode: is this LED set to original mode (by the net-list)
3038 * @cd: pointer to command details structure or NULL
3040 * Set LED value for the given port (0x06e9)
3043 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
3044 struct ice_sq_cd *cd)
3046 struct ice_aqc_set_port_id_led *cmd;
3047 struct ice_hw *hw = pi->hw;
3048 struct ice_aq_desc desc;
3050 cmd = &desc.params.set_port_id_led;
3052 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
3055 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
3057 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
3059 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3064 * @hw: pointer to the HW struct
3065 * @lport: bits [7:0] = logical port, bit [8] = logical port valid
3066 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
3067 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
3069 * @set_page: set or ignore the page
3070 * @data: pointer to data buffer to be read/written to the I2C device.
3071 * @length: 1-16 for read, 1 for write.
3072 * @write: 0 read, 1 for write.
3073 * @cd: pointer to command details structure or NULL
3075 * Read/Write SFF EEPROM (0x06EE)
3078 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
3079 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
3080 bool write, struct ice_sq_cd *cd)
3082 struct ice_aqc_sff_eeprom *cmd;
3083 struct ice_aq_desc desc;
3084 enum ice_status status;
3086 if (!data || (mem_addr & 0xff00))
3087 return ICE_ERR_PARAM;
3089 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
3090 cmd = &desc.params.read_write_sff_param;
3091 desc.flags = CPU_TO_LE16(ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF);
3092 cmd->lport_num = (u8)(lport & 0xff);
3093 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
3094 cmd->i2c_bus_addr = CPU_TO_LE16(((bus_addr >> 1) &
3095 ICE_AQC_SFF_I2CBUS_7BIT_M) |
3097 ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
3098 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
3099 cmd->i2c_mem_addr = CPU_TO_LE16(mem_addr & 0xff);
3100 cmd->eeprom_page = CPU_TO_LE16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
3102 cmd->i2c_bus_addr |= CPU_TO_LE16(ICE_AQC_SFF_IS_WRITE);
3104 status = ice_aq_send_cmd(hw, &desc, data, length, cd);
3109 * __ice_aq_get_set_rss_lut
3110 * @hw: pointer to the hardware structure
3111 * @vsi_id: VSI FW index
3112 * @lut_type: LUT table type
3113 * @lut: pointer to the LUT buffer provided by the caller
3114 * @lut_size: size of the LUT buffer
3115 * @glob_lut_idx: global LUT index
3116 * @set: set true to set the table, false to get the table
3118 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
3120 static enum ice_status
3121 __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
3122 u16 lut_size, u8 glob_lut_idx, bool set)
3124 struct ice_aqc_get_set_rss_lut *cmd_resp;
3125 struct ice_aq_desc desc;
3126 enum ice_status status;
3129 cmd_resp = &desc.params.get_set_rss_lut;
3132 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
3133 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3135 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
3138 cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
3139 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
3140 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
3141 ICE_AQC_GSET_RSS_LUT_VSI_VALID);
3144 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
3145 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
3146 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
3147 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
3148 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
3151 status = ICE_ERR_PARAM;
3152 goto ice_aq_get_set_rss_lut_exit;
3155 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
3156 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
3157 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
3160 goto ice_aq_get_set_rss_lut_send;
3161 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3163 goto ice_aq_get_set_rss_lut_send;
3165 goto ice_aq_get_set_rss_lut_send;
3168 /* LUT size is only valid for Global and PF table types */
3170 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
3171 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG <<
3172 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3173 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3175 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
3176 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
3177 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3178 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3180 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
3181 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3182 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
3183 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3184 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3189 status = ICE_ERR_PARAM;
3190 goto ice_aq_get_set_rss_lut_exit;
3193 ice_aq_get_set_rss_lut_send:
3194 cmd_resp->flags = CPU_TO_LE16(flags);
3195 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
3197 ice_aq_get_set_rss_lut_exit:
3202 * ice_aq_get_rss_lut
3203 * @hw: pointer to the hardware structure
3204 * @vsi_handle: software VSI handle
3205 * @lut_type: LUT table type
3206 * @lut: pointer to the LUT buffer provided by the caller
3207 * @lut_size: size of the LUT buffer
3209 * get the RSS lookup table, PF or VSI type
3212 ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
3213 u8 *lut, u16 lut_size)
3215 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3216 return ICE_ERR_PARAM;
3218 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3219 lut_type, lut, lut_size, 0, false);
3223 * ice_aq_set_rss_lut
3224 * @hw: pointer to the hardware structure
3225 * @vsi_handle: software VSI handle
3226 * @lut_type: LUT table type
3227 * @lut: pointer to the LUT buffer provided by the caller
3228 * @lut_size: size of the LUT buffer
3230 * set the RSS lookup table, PF or VSI type
3233 ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
3234 u8 *lut, u16 lut_size)
3236 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3237 return ICE_ERR_PARAM;
3239 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3240 lut_type, lut, lut_size, 0, true);
3244 * __ice_aq_get_set_rss_key
3245 * @hw: pointer to the HW struct
3246 * @vsi_id: VSI FW index
3247 * @key: pointer to key info struct
3248 * @set: set true to set the key, false to get the key
3250 * get (0x0B04) or set (0x0B02) the RSS key per VSI
3253 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
3254 struct ice_aqc_get_set_rss_keys *key,
3257 struct ice_aqc_get_set_rss_key *cmd_resp;
3258 u16 key_size = sizeof(*key);
3259 struct ice_aq_desc desc;
3261 cmd_resp = &desc.params.get_set_rss_key;
3264 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
3265 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3267 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
3270 cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
3271 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
3272 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
3273 ICE_AQC_GSET_RSS_KEY_VSI_VALID);
3275 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
3279 * ice_aq_get_rss_key
3280 * @hw: pointer to the HW struct
3281 * @vsi_handle: software VSI handle
3282 * @key: pointer to key info struct
3284 * get the RSS key per VSI
3287 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
3288 struct ice_aqc_get_set_rss_keys *key)
3290 if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
3291 return ICE_ERR_PARAM;
3293 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3298 * ice_aq_set_rss_key
3299 * @hw: pointer to the HW struct
3300 * @vsi_handle: software VSI handle
3301 * @keys: pointer to key info struct
3303 * set the RSS key per VSI
3306 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
3307 struct ice_aqc_get_set_rss_keys *keys)
3309 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
3310 return ICE_ERR_PARAM;
3312 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3317 * ice_aq_add_lan_txq
3318 * @hw: pointer to the hardware structure
3319 * @num_qgrps: Number of added queue groups
3320 * @qg_list: list of queue groups to be added
3321 * @buf_size: size of buffer for indirect command
3322 * @cd: pointer to command details structure or NULL
3324 * Add Tx LAN queue (0x0C30)
3327 * Prior to calling add Tx LAN queue:
3328 * Initialize the following as part of the Tx queue context:
3329 * Completion queue ID if the queue uses Completion queue, Quanta profile,
3330 * Cache profile and Packet shaper profile.
3332 * After add Tx LAN queue AQ command is completed:
3333 * Interrupts should be associated with specific queues,
3334 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
3338 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3339 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
3340 struct ice_sq_cd *cd)
3342 u16 i, sum_header_size, sum_q_size = 0;
3343 struct ice_aqc_add_tx_qgrp *list;
3344 struct ice_aqc_add_txqs *cmd;
3345 struct ice_aq_desc desc;
3347 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3349 cmd = &desc.params.add_txqs;
3351 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
3354 return ICE_ERR_PARAM;
3356 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3357 return ICE_ERR_PARAM;
3359 sum_header_size = num_qgrps *
3360 (sizeof(*qg_list) - sizeof(*qg_list->txqs));
3363 for (i = 0; i < num_qgrps; i++) {
3364 struct ice_aqc_add_txqs_perq *q = list->txqs;
3366 sum_q_size += list->num_txqs * sizeof(*q);
3367 list = (struct ice_aqc_add_tx_qgrp *)(q + list->num_txqs);
3370 if (buf_size != (sum_header_size + sum_q_size))
3371 return ICE_ERR_PARAM;
3373 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3375 cmd->num_qgrps = num_qgrps;
3377 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3381 * ice_aq_dis_lan_txq
3382 * @hw: pointer to the hardware structure
3383 * @num_qgrps: number of groups in the list
3384 * @qg_list: the list of groups to disable
3385 * @buf_size: the total size of the qg_list buffer in bytes
3386 * @rst_src: if called due to reset, specifies the reset source
3387 * @vmvf_num: the relative VM or VF number that is undergoing the reset
3388 * @cd: pointer to command details structure or NULL
3390 * Disable LAN Tx queue (0x0C31)
3392 static enum ice_status
3393 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3394 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
3395 enum ice_disq_rst_src rst_src, u16 vmvf_num,
3396 struct ice_sq_cd *cd)
3398 struct ice_aqc_dis_txqs *cmd;
3399 struct ice_aq_desc desc;
3400 enum ice_status status;
3403 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3404 cmd = &desc.params.dis_txqs;
3405 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
3407 /* qg_list can be NULL only in VM/VF reset flow */
3408 if (!qg_list && !rst_src)
3409 return ICE_ERR_PARAM;
3411 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3412 return ICE_ERR_PARAM;
3414 cmd->num_entries = num_qgrps;
3416 cmd->vmvf_and_timeout = CPU_TO_LE16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
3417 ICE_AQC_Q_DIS_TIMEOUT_M);
3421 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
3422 cmd->vmvf_and_timeout |=
3423 CPU_TO_LE16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
3426 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
3427 /* In this case, FW expects vmvf_num to be absolute VF ID */
3428 cmd->vmvf_and_timeout |=
3429 CPU_TO_LE16((vmvf_num + hw->func_caps.vf_base_id) &
3430 ICE_AQC_Q_DIS_VMVF_NUM_M);
3437 /* flush pipe on time out */
3438 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
3439 /* If no queue group info, we are in a reset flow. Issue the AQ */
3443 /* set RD bit to indicate that command buffer is provided by the driver
3444 * and it needs to be read by the firmware
3446 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3448 for (i = 0; i < num_qgrps; ++i) {
3449 /* Calculate the size taken up by the queue IDs in this group */
3450 sz += qg_list[i].num_qs * sizeof(qg_list[i].q_id);
3452 /* Add the size of the group header */
3453 sz += sizeof(qg_list[i]) - sizeof(qg_list[i].q_id);
3455 /* If the num of queues is even, add 2 bytes of padding */
3456 if ((qg_list[i].num_qs % 2) == 0)
3461 return ICE_ERR_PARAM;
3464 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3467 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
3468 vmvf_num, hw->adminq.sq_last_status);
3470 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
3471 LE16_TO_CPU(qg_list[0].q_id[0]),
3472 hw->adminq.sq_last_status);
3478 * ice_aq_move_recfg_lan_txq
3479 * @hw: pointer to the hardware structure
3480 * @num_qs: number of queues to move/reconfigure
3481 * @is_move: true if this operation involves node movement
3482 * @is_tc_change: true if this operation involves a TC change
3483 * @subseq_call: true if this operation is a subsequent call
3484 * @flush_pipe: on timeout, true to flush pipe, false to return EAGAIN
3485 * @timeout: timeout in units of 100 usec (valid values 0-50)
3486 * @blocked_cgds: out param, bitmap of CGDs that timed out if returning EAGAIN
3487 * @buf: struct containing src/dest TEID and per-queue info
3488 * @buf_size: size of buffer for indirect command
3489 * @txqs_moved: out param, number of queues successfully moved
3490 * @cd: pointer to command details structure or NULL
3492 * Move / Reconfigure Tx LAN queues (0x0C32)
3495 ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move,
3496 bool is_tc_change, bool subseq_call, bool flush_pipe,
3497 u8 timeout, u32 *blocked_cgds,
3498 struct ice_aqc_move_txqs_data *buf, u16 buf_size,
3499 u8 *txqs_moved, struct ice_sq_cd *cd)
3501 struct ice_aqc_move_txqs *cmd;
3502 struct ice_aq_desc desc;
3503 enum ice_status status;
3505 cmd = &desc.params.move_txqs;
3506 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_move_recfg_txqs);
3508 #define ICE_LAN_TXQ_MOVE_TIMEOUT_MAX 50
3509 if (timeout > ICE_LAN_TXQ_MOVE_TIMEOUT_MAX)
3510 return ICE_ERR_PARAM;
3512 if (is_tc_change && !flush_pipe && !blocked_cgds)
3513 return ICE_ERR_PARAM;
3515 if (!is_move && !is_tc_change)
3516 return ICE_ERR_PARAM;
3518 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3521 cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_MOVE;
3524 cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_TC_CHANGE;
3527 cmd->cmd_type |= ICE_AQC_Q_CMD_SUBSEQ_CALL;
3530 cmd->cmd_type |= ICE_AQC_Q_CMD_FLUSH_PIPE;
3532 cmd->num_qs = num_qs;
3533 cmd->timeout = ((timeout << ICE_AQC_Q_CMD_TIMEOUT_S) &
3534 ICE_AQC_Q_CMD_TIMEOUT_M);
3536 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3538 if (!status && txqs_moved)
3539 *txqs_moved = cmd->num_qs;
3541 if (hw->adminq.sq_last_status == ICE_AQ_RC_EAGAIN &&
3542 is_tc_change && !flush_pipe)
3543 *blocked_cgds = LE32_TO_CPU(cmd->blocked_cgds);
3548 /* End of FW Admin Queue command wrappers */
3551 * ice_write_byte - write a byte to a packed context structure
3552 * @src_ctx: the context structure to read from
3553 * @dest_ctx: the context to be written to
3554 * @ce_info: a description of the struct to be filled
3557 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3559 u8 src_byte, dest_byte, mask;
3563 /* copy from the next struct field */
3564 from = src_ctx + ce_info->offset;
3566 /* prepare the bits and mask */
3567 shift_width = ce_info->lsb % 8;
3568 mask = (u8)(BIT(ce_info->width) - 1);
3573 /* shift to correct alignment */
3574 mask <<= shift_width;
3575 src_byte <<= shift_width;
3577 /* get the current bits from the target bit string */
3578 dest = dest_ctx + (ce_info->lsb / 8);
3580 ice_memcpy(&dest_byte, dest, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
3582 dest_byte &= ~mask; /* get the bits not changing */
3583 dest_byte |= src_byte; /* add in the new bits */
3585 /* put it all back */
3586 ice_memcpy(dest, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
3590 * ice_write_word - write a word to a packed context structure
3591 * @src_ctx: the context structure to read from
3592 * @dest_ctx: the context to be written to
3593 * @ce_info: a description of the struct to be filled
3596 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3603 /* copy from the next struct field */
3604 from = src_ctx + ce_info->offset;
3606 /* prepare the bits and mask */
3607 shift_width = ce_info->lsb % 8;
3608 mask = BIT(ce_info->width) - 1;
3610 /* don't swizzle the bits until after the mask because the mask bits
3611 * will be in a different bit position on big endian machines
3613 src_word = *(u16 *)from;
3616 /* shift to correct alignment */
3617 mask <<= shift_width;
3618 src_word <<= shift_width;
3620 /* get the current bits from the target bit string */
3621 dest = dest_ctx + (ce_info->lsb / 8);
3623 ice_memcpy(&dest_word, dest, sizeof(dest_word), ICE_DMA_TO_NONDMA);
3625 dest_word &= ~(CPU_TO_LE16(mask)); /* get the bits not changing */
3626 dest_word |= CPU_TO_LE16(src_word); /* add in the new bits */
3628 /* put it all back */
3629 ice_memcpy(dest, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
3633 * ice_write_dword - write a dword to a packed context structure
3634 * @src_ctx: the context structure to read from
3635 * @dest_ctx: the context to be written to
3636 * @ce_info: a description of the struct to be filled
3639 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3641 u32 src_dword, mask;
3646 /* copy from the next struct field */
3647 from = src_ctx + ce_info->offset;
3649 /* prepare the bits and mask */
3650 shift_width = ce_info->lsb % 8;
3652 /* if the field width is exactly 32 on an x86 machine, then the shift
3653 * operation will not work because the SHL instructions count is masked
3654 * to 5 bits so the shift will do nothing
3656 if (ce_info->width < 32)
3657 mask = BIT(ce_info->width) - 1;
3661 /* don't swizzle the bits until after the mask because the mask bits
3662 * will be in a different bit position on big endian machines
3664 src_dword = *(u32 *)from;
3667 /* shift to correct alignment */
3668 mask <<= shift_width;
3669 src_dword <<= shift_width;
3671 /* get the current bits from the target bit string */
3672 dest = dest_ctx + (ce_info->lsb / 8);
3674 ice_memcpy(&dest_dword, dest, sizeof(dest_dword), ICE_DMA_TO_NONDMA);
3676 dest_dword &= ~(CPU_TO_LE32(mask)); /* get the bits not changing */
3677 dest_dword |= CPU_TO_LE32(src_dword); /* add in the new bits */
3679 /* put it all back */
3680 ice_memcpy(dest, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
3684 * ice_write_qword - write a qword to a packed context structure
3685 * @src_ctx: the context structure to read from
3686 * @dest_ctx: the context to be written to
3687 * @ce_info: a description of the struct to be filled
3690 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3692 u64 src_qword, mask;
3697 /* copy from the next struct field */
3698 from = src_ctx + ce_info->offset;
3700 /* prepare the bits and mask */
3701 shift_width = ce_info->lsb % 8;
3703 /* if the field width is exactly 64 on an x86 machine, then the shift
3704 * operation will not work because the SHL instructions count is masked
3705 * to 6 bits so the shift will do nothing
3707 if (ce_info->width < 64)
3708 mask = BIT_ULL(ce_info->width) - 1;
3712 /* don't swizzle the bits until after the mask because the mask bits
3713 * will be in a different bit position on big endian machines
3715 src_qword = *(u64 *)from;
3718 /* shift to correct alignment */
3719 mask <<= shift_width;
3720 src_qword <<= shift_width;
3722 /* get the current bits from the target bit string */
3723 dest = dest_ctx + (ce_info->lsb / 8);
3725 ice_memcpy(&dest_qword, dest, sizeof(dest_qword), ICE_DMA_TO_NONDMA);
3727 dest_qword &= ~(CPU_TO_LE64(mask)); /* get the bits not changing */
3728 dest_qword |= CPU_TO_LE64(src_qword); /* add in the new bits */
3730 /* put it all back */
3731 ice_memcpy(dest, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
3735 * ice_set_ctx - set context bits in packed structure
3736 * @src_ctx: pointer to a generic non-packed context structure
3737 * @dest_ctx: pointer to memory for the packed structure
3738 * @ce_info: a description of the structure to be transformed
3741 ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3745 for (f = 0; ce_info[f].width; f++) {
3746 /* We have to deal with each element of the FW response
3747 * using the correct size so that we are correct regardless
3748 * of the endianness of the machine.
3750 switch (ce_info[f].size_of) {
3752 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
3755 ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
3758 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
3761 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
3764 return ICE_ERR_INVAL_SIZE;
3772 * ice_read_byte - read context byte into struct
3773 * @src_ctx: the context structure to read from
3774 * @dest_ctx: the context to be written to
3775 * @ce_info: a description of the struct to be filled
3778 ice_read_byte(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
3784 /* prepare the bits and mask */
3785 shift_width = ce_info->lsb % 8;
3786 mask = (u8)(BIT(ce_info->width) - 1);
3788 /* shift to correct alignment */
3789 mask <<= shift_width;
3791 /* get the current bits from the src bit string */
3792 src = src_ctx + (ce_info->lsb / 8);
3794 ice_memcpy(&dest_byte, src, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
3796 dest_byte &= ~(mask);
3798 dest_byte >>= shift_width;
3800 /* get the address from the struct field */
3801 target = dest_ctx + ce_info->offset;
3803 /* put it back in the struct */
3804 ice_memcpy(target, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
3808 * ice_read_word - read context word into struct
3809 * @src_ctx: the context structure to read from
3810 * @dest_ctx: the context to be written to
3811 * @ce_info: a description of the struct to be filled
3814 ice_read_word(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
3816 u16 dest_word, mask;
3821 /* prepare the bits and mask */
3822 shift_width = ce_info->lsb % 8;
3823 mask = BIT(ce_info->width) - 1;
3825 /* shift to correct alignment */
3826 mask <<= shift_width;
3828 /* get the current bits from the src bit string */
3829 src = src_ctx + (ce_info->lsb / 8);
3831 ice_memcpy(&src_word, src, sizeof(src_word), ICE_DMA_TO_NONDMA);
3833 /* the data in the memory is stored as little endian so mask it
3836 src_word &= ~(CPU_TO_LE16(mask));
3838 /* get the data back into host order before shifting */
3839 dest_word = LE16_TO_CPU(src_word);
3841 dest_word >>= shift_width;
3843 /* get the address from the struct field */
3844 target = dest_ctx + ce_info->offset;
3846 /* put it back in the struct */
3847 ice_memcpy(target, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
3851 * ice_read_dword - read context dword into struct
3852 * @src_ctx: the context structure to read from
3853 * @dest_ctx: the context to be written to
3854 * @ce_info: a description of the struct to be filled
3857 ice_read_dword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
3859 u32 dest_dword, mask;
3864 /* prepare the bits and mask */
3865 shift_width = ce_info->lsb % 8;
3867 /* if the field width is exactly 32 on an x86 machine, then the shift
3868 * operation will not work because the SHL instructions count is masked
3869 * to 5 bits so the shift will do nothing
3871 if (ce_info->width < 32)
3872 mask = BIT(ce_info->width) - 1;
3876 /* shift to correct alignment */
3877 mask <<= shift_width;
3879 /* get the current bits from the src bit string */
3880 src = src_ctx + (ce_info->lsb / 8);
3882 ice_memcpy(&src_dword, src, sizeof(src_dword), ICE_DMA_TO_NONDMA);
3884 /* the data in the memory is stored as little endian so mask it
3887 src_dword &= ~(CPU_TO_LE32(mask));
3889 /* get the data back into host order before shifting */
3890 dest_dword = LE32_TO_CPU(src_dword);
3892 dest_dword >>= shift_width;
3894 /* get the address from the struct field */
3895 target = dest_ctx + ce_info->offset;
3897 /* put it back in the struct */
3898 ice_memcpy(target, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
3902 * ice_read_qword - read context qword into struct
3903 * @src_ctx: the context structure to read from
3904 * @dest_ctx: the context to be written to
3905 * @ce_info: a description of the struct to be filled
3908 ice_read_qword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
3910 u64 dest_qword, mask;
3915 /* prepare the bits and mask */
3916 shift_width = ce_info->lsb % 8;
3918 /* if the field width is exactly 64 on an x86 machine, then the shift
3919 * operation will not work because the SHL instructions count is masked
3920 * to 6 bits so the shift will do nothing
3922 if (ce_info->width < 64)
3923 mask = BIT_ULL(ce_info->width) - 1;
3927 /* shift to correct alignment */
3928 mask <<= shift_width;
3930 /* get the current bits from the src bit string */
3931 src = src_ctx + (ce_info->lsb / 8);
3933 ice_memcpy(&src_qword, src, sizeof(src_qword), ICE_DMA_TO_NONDMA);
3935 /* the data in the memory is stored as little endian so mask it
3938 src_qword &= ~(CPU_TO_LE64(mask));
3940 /* get the data back into host order before shifting */
3941 dest_qword = LE64_TO_CPU(src_qword);
3943 dest_qword >>= shift_width;
3945 /* get the address from the struct field */
3946 target = dest_ctx + ce_info->offset;
3948 /* put it back in the struct */
3949 ice_memcpy(target, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
3953 * ice_get_ctx - extract context bits from a packed structure
3954 * @src_ctx: pointer to a generic packed context structure
3955 * @dest_ctx: pointer to a generic non-packed context structure
3956 * @ce_info: a description of the structure to be read from
3959 ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
3963 for (f = 0; ce_info[f].width; f++) {
3964 switch (ce_info[f].size_of) {
3966 ice_read_byte(src_ctx, dest_ctx, &ce_info[f]);
3969 ice_read_word(src_ctx, dest_ctx, &ce_info[f]);
3972 ice_read_dword(src_ctx, dest_ctx, &ce_info[f]);
3975 ice_read_qword(src_ctx, dest_ctx, &ce_info[f]);
3978 /* nothing to do, just keep going */
3987 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
3988 * @hw: pointer to the HW struct
3989 * @vsi_handle: software VSI handle
3991 * @q_handle: software queue handle
3994 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
3996 struct ice_vsi_ctx *vsi;
3997 struct ice_q_ctx *q_ctx;
3999 vsi = ice_get_vsi_ctx(hw, vsi_handle);
4002 if (q_handle >= vsi->num_lan_q_entries[tc])
4004 if (!vsi->lan_q_ctx[tc])
4006 q_ctx = vsi->lan_q_ctx[tc];
4007 return &q_ctx[q_handle];
4012 * @pi: port information structure
4013 * @vsi_handle: software VSI handle
4015 * @q_handle: software queue handle
4016 * @num_qgrps: Number of added queue groups
4017 * @buf: list of queue groups to be added
4018 * @buf_size: size of buffer for indirect command
4019 * @cd: pointer to command details structure or NULL
4021 * This function adds one LAN queue
4024 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
4025 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
4026 struct ice_sq_cd *cd)
4028 struct ice_aqc_txsched_elem_data node = { 0 };
4029 struct ice_sched_node *parent;
4030 struct ice_q_ctx *q_ctx;
4031 enum ice_status status;
4034 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4037 if (num_qgrps > 1 || buf->num_txqs > 1)
4038 return ICE_ERR_MAX_LIMIT;
4042 if (!ice_is_vsi_valid(hw, vsi_handle))
4043 return ICE_ERR_PARAM;
4045 ice_acquire_lock(&pi->sched_lock);
4047 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
4049 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
4051 status = ICE_ERR_PARAM;
4055 /* find a parent node */
4056 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
4057 ICE_SCHED_NODE_OWNER_LAN);
4059 status = ICE_ERR_PARAM;
4063 buf->parent_teid = parent->info.node_teid;
4064 node.parent_teid = parent->info.node_teid;
4065 /* Mark that the values in the "generic" section as valid. The default
4066 * value in the "generic" section is zero. This means that :
4067 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
4068 * - 0 priority among siblings, indicated by Bit 1-3.
4069 * - WFQ, indicated by Bit 4.
4070 * - 0 Adjustment value is used in PSM credit update flow, indicated by
4072 * - Bit 7 is reserved.
4073 * Without setting the generic section as valid in valid_sections, the
4074 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
4076 buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC;
4078 /* add the LAN queue */
4079 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
4080 if (status != ICE_SUCCESS) {
4081 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
4082 LE16_TO_CPU(buf->txqs[0].txq_id),
4083 hw->adminq.sq_last_status);
4087 node.node_teid = buf->txqs[0].q_teid;
4088 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
4089 q_ctx->q_handle = q_handle;
4090 q_ctx->q_teid = LE32_TO_CPU(node.node_teid);
4092 /* add a leaf node into scheduler tree queue layer */
4093 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
4095 status = ice_sched_replay_q_bw(pi, q_ctx);
4098 ice_release_lock(&pi->sched_lock);
4104 * @pi: port information structure
4105 * @vsi_handle: software VSI handle
4107 * @num_queues: number of queues
4108 * @q_handles: pointer to software queue handle array
4109 * @q_ids: pointer to the q_id array
4110 * @q_teids: pointer to queue node teids
4111 * @rst_src: if called due to reset, specifies the reset source
4112 * @vmvf_num: the relative VM or VF number that is undergoing the reset
4113 * @cd: pointer to command details structure or NULL
4115 * This function removes queues and their corresponding nodes in SW DB
4118 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
4119 u16 *q_handles, u16 *q_ids, u32 *q_teids,
4120 enum ice_disq_rst_src rst_src, u16 vmvf_num,
4121 struct ice_sq_cd *cd)
4123 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
4124 struct ice_aqc_dis_txq_item qg_list;
4125 struct ice_q_ctx *q_ctx;
4128 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4132 /* if queue is disabled already yet the disable queue command
4133 * has to be sent to complete the VF reset, then call
4134 * ice_aq_dis_lan_txq without any queue information
4137 return ice_aq_dis_lan_txq(pi->hw, 0, NULL, 0, rst_src,
4142 ice_acquire_lock(&pi->sched_lock);
4144 for (i = 0; i < num_queues; i++) {
4145 struct ice_sched_node *node;
4147 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
4150 q_ctx = ice_get_lan_q_ctx(pi->hw, vsi_handle, tc, q_handles[i]);
4152 ice_debug(pi->hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
4156 if (q_ctx->q_handle != q_handles[i]) {
4157 ice_debug(pi->hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
4158 q_ctx->q_handle, q_handles[i]);
4161 qg_list.parent_teid = node->info.parent_teid;
4163 qg_list.q_id[0] = CPU_TO_LE16(q_ids[i]);
4164 status = ice_aq_dis_lan_txq(pi->hw, 1, &qg_list,
4165 sizeof(qg_list), rst_src, vmvf_num,
4168 if (status != ICE_SUCCESS)
4170 ice_free_sched_node(pi, node);
4171 q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
4173 ice_release_lock(&pi->sched_lock);
4178 * ice_cfg_vsi_qs - configure the new/existing VSI queues
4179 * @pi: port information structure
4180 * @vsi_handle: software VSI handle
4181 * @tc_bitmap: TC bitmap
4182 * @maxqs: max queues array per TC
4183 * @owner: LAN or RDMA
4185 * This function adds/updates the VSI queues per TC.
4187 static enum ice_status
4188 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
4189 u16 *maxqs, u8 owner)
4191 enum ice_status status = ICE_SUCCESS;
4194 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4197 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
4198 return ICE_ERR_PARAM;
4200 ice_acquire_lock(&pi->sched_lock);
4202 ice_for_each_traffic_class(i) {
4203 /* configuration is possible only if TC node is present */
4204 if (!ice_sched_get_tc_node(pi, i))
4207 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
4208 ice_is_tc_ena(tc_bitmap, i));
4213 ice_release_lock(&pi->sched_lock);
4218 * ice_cfg_vsi_lan - configure VSI LAN queues
4219 * @pi: port information structure
4220 * @vsi_handle: software VSI handle
4221 * @tc_bitmap: TC bitmap
4222 * @max_lanqs: max LAN queues array per TC
4224 * This function adds/updates the VSI LAN queues per TC.
4227 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
4230 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
4231 ICE_SCHED_NODE_OWNER_LAN);
4235 * ice_replay_pre_init - replay pre initialization
4236 * @hw: pointer to the HW struct
4238 * Initializes required config data for VSI, FD, ACL, and RSS before replay.
4240 static enum ice_status ice_replay_pre_init(struct ice_hw *hw)
4242 struct ice_switch_info *sw = hw->switch_info;
4245 /* Delete old entries from replay filter list head if there is any */
4246 ice_rm_all_sw_replay_rule_info(hw);
4247 /* In start of replay, move entries into replay_rules list, it
4248 * will allow adding rules entries back to filt_rules list,
4249 * which is operational list.
4251 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
4252 LIST_REPLACE_INIT(&sw->recp_list[i].filt_rules,
4253 &sw->recp_list[i].filt_replay_rules);
4254 ice_sched_replay_agg_vsi_preinit(hw);
4256 return ice_sched_replay_tc_node_bw(hw->port_info);
4260 * ice_replay_vsi - replay VSI configuration
4261 * @hw: pointer to the HW struct
4262 * @vsi_handle: driver VSI handle
4264 * Restore all VSI configuration after reset. It is required to call this
4265 * function with main VSI first.
4267 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
4269 enum ice_status status;
4271 if (!ice_is_vsi_valid(hw, vsi_handle))
4272 return ICE_ERR_PARAM;
4274 /* Replay pre-initialization if there is any */
4275 if (vsi_handle == ICE_MAIN_VSI_HANDLE) {
4276 status = ice_replay_pre_init(hw);
4280 /* Replay per VSI all RSS configurations */
4281 status = ice_replay_rss_cfg(hw, vsi_handle);
4284 /* Replay per VSI all filters */
4285 status = ice_replay_vsi_all_fltr(hw, vsi_handle);
4287 status = ice_replay_vsi_agg(hw, vsi_handle);
4292 * ice_replay_post - post replay configuration cleanup
4293 * @hw: pointer to the HW struct
4295 * Post replay cleanup.
4297 void ice_replay_post(struct ice_hw *hw)
4299 /* Delete old entries from replay filter list head */
4300 ice_rm_all_sw_replay_rule_info(hw);
4301 ice_sched_replay_agg(hw);
4305 * ice_stat_update40 - read 40 bit stat from the chip and update stat values
4306 * @hw: ptr to the hardware info
4307 * @reg: offset of 64 bit HW register to read from
4308 * @prev_stat_loaded: bool to specify if previous stats are loaded
4309 * @prev_stat: ptr to previous loaded stat value
4310 * @cur_stat: ptr to current stat value
4313 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4314 u64 *prev_stat, u64 *cur_stat)
4316 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
4318 /* device stats are not reset at PFR, they likely will not be zeroed
4319 * when the driver starts. Thus, save the value from the first read
4320 * without adding to the statistic value so that we report stats which
4321 * count up from zero.
4323 if (!prev_stat_loaded) {
4324 *prev_stat = new_data;
4328 /* Calculate the difference between the new and old values, and then
4329 * add it to the software stat value.
4331 if (new_data >= *prev_stat)
4332 *cur_stat += new_data - *prev_stat;
4334 /* to manage the potential roll-over */
4335 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
4337 /* Update the previously stored value to prepare for next read */
4338 *prev_stat = new_data;
4342 * ice_stat_update32 - read 32 bit stat from the chip and update stat values
4343 * @hw: ptr to the hardware info
4344 * @reg: offset of HW register to read from
4345 * @prev_stat_loaded: bool to specify if previous stats are loaded
4346 * @prev_stat: ptr to previous loaded stat value
4347 * @cur_stat: ptr to current stat value
4350 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4351 u64 *prev_stat, u64 *cur_stat)
4355 new_data = rd32(hw, reg);
4357 /* device stats are not reset at PFR, they likely will not be zeroed
4358 * when the driver starts. Thus, save the value from the first read
4359 * without adding to the statistic value so that we report stats which
4360 * count up from zero.
4362 if (!prev_stat_loaded) {
4363 *prev_stat = new_data;
4367 /* Calculate the difference between the new and old values, and then
4368 * add it to the software stat value.
4370 if (new_data >= *prev_stat)
4371 *cur_stat += new_data - *prev_stat;
4373 /* to manage the potential roll-over */
4374 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
4376 /* Update the previously stored value to prepare for next read */
4377 *prev_stat = new_data;
4381 * ice_stat_update_repc - read GLV_REPC stats from chip and update stat values
4382 * @hw: ptr to the hardware info
4383 * @vsi_handle: VSI handle
4384 * @prev_stat_loaded: bool to specify if the previous stat values are loaded
4385 * @cur_stats: ptr to current stats structure
4387 * The GLV_REPC statistic register actually tracks two 16bit statistics, and
4388 * thus cannot be read using the normal ice_stat_update32 function.
4390 * Read the GLV_REPC register associated with the given VSI, and update the
4391 * rx_no_desc and rx_error values in the ice_eth_stats structure.
4393 * Because the statistics in GLV_REPC stick at 0xFFFF, the register must be
4394 * cleared each time it's read.
4396 * Note that the GLV_RDPC register also counts the causes that would trigger
4397 * GLV_REPC. However, it does not give the finer grained detail about why the
4398 * packets are being dropped. The GLV_REPC values can be used to distinguish
4399 * whether Rx packets are dropped due to errors or due to no available
4403 ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded,
4404 struct ice_eth_stats *cur_stats)
4406 u16 vsi_num, no_desc, error_cnt;
4409 if (!ice_is_vsi_valid(hw, vsi_handle))
4412 vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
4414 /* If we haven't loaded stats yet, just clear the current value */
4415 if (!prev_stat_loaded) {
4416 wr32(hw, GLV_REPC(vsi_num), 0);
4420 repc = rd32(hw, GLV_REPC(vsi_num));
4421 no_desc = (repc & GLV_REPC_NO_DESC_CNT_M) >> GLV_REPC_NO_DESC_CNT_S;
4422 error_cnt = (repc & GLV_REPC_ERROR_CNT_M) >> GLV_REPC_ERROR_CNT_S;
4424 /* Clear the count by writing to the stats register */
4425 wr32(hw, GLV_REPC(vsi_num), 0);
4427 cur_stats->rx_no_desc += no_desc;
4428 cur_stats->rx_errors += error_cnt;
4432 * ice_aq_alternate_write
4433 * @hw: pointer to the hardware structure
4434 * @reg_addr0: address of first dword to be written
4435 * @reg_val0: value to be written under 'reg_addr0'
4436 * @reg_addr1: address of second dword to be written
4437 * @reg_val1: value to be written under 'reg_addr1'
4439 * Write one or two dwords to alternate structure. Fields are indicated
4440 * by 'reg_addr0' and 'reg_addr1' register numbers.
4443 ice_aq_alternate_write(struct ice_hw *hw, u32 reg_addr0, u32 reg_val0,
4444 u32 reg_addr1, u32 reg_val1)
4446 struct ice_aqc_read_write_alt_direct *cmd;
4447 struct ice_aq_desc desc;
4448 enum ice_status status;
4450 cmd = &desc.params.read_write_alt_direct;
4452 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_alt_direct);
4453 cmd->dword0_addr = CPU_TO_LE32(reg_addr0);
4454 cmd->dword1_addr = CPU_TO_LE32(reg_addr1);
4455 cmd->dword0_value = CPU_TO_LE32(reg_val0);
4456 cmd->dword1_value = CPU_TO_LE32(reg_val1);
4458 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
4464 * ice_aq_alternate_read
4465 * @hw: pointer to the hardware structure
4466 * @reg_addr0: address of first dword to be read
4467 * @reg_val0: pointer for data read from 'reg_addr0'
4468 * @reg_addr1: address of second dword to be read
4469 * @reg_val1: pointer for data read from 'reg_addr1'
4471 * Read one or two dwords from alternate structure. Fields are indicated
4472 * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
4473 * is not passed then only register at 'reg_addr0' is read.
4476 ice_aq_alternate_read(struct ice_hw *hw, u32 reg_addr0, u32 *reg_val0,
4477 u32 reg_addr1, u32 *reg_val1)
4479 struct ice_aqc_read_write_alt_direct *cmd;
4480 struct ice_aq_desc desc;
4481 enum ice_status status;
4483 cmd = &desc.params.read_write_alt_direct;
4486 return ICE_ERR_PARAM;
4488 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_alt_direct);
4489 cmd->dword0_addr = CPU_TO_LE32(reg_addr0);
4490 cmd->dword1_addr = CPU_TO_LE32(reg_addr1);
4492 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
4494 if (status == ICE_SUCCESS) {
4495 *reg_val0 = LE32_TO_CPU(cmd->dword0_value);
4498 *reg_val1 = LE32_TO_CPU(cmd->dword1_value);
4505 * ice_aq_alternate_write_done
4506 * @hw: pointer to the HW structure.
4507 * @bios_mode: indicates whether the command is executed by UEFI or legacy BIOS
4508 * @reset_needed: indicates the SW should trigger GLOBAL reset
4510 * Indicates to the FW that alternate structures have been changed.
4513 ice_aq_alternate_write_done(struct ice_hw *hw, u8 bios_mode, bool *reset_needed)
4515 struct ice_aqc_done_alt_write *cmd;
4516 struct ice_aq_desc desc;
4517 enum ice_status status;
4519 cmd = &desc.params.done_alt_write;
4522 return ICE_ERR_PARAM;
4524 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_done_alt_write);
4525 cmd->flags = bios_mode;
4527 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
4529 *reset_needed = (LE16_TO_CPU(cmd->flags) &
4530 ICE_AQC_RESP_RESET_NEEDED) != 0;
4536 * ice_aq_alternate_clear
4537 * @hw: pointer to the HW structure.
4539 * Clear the alternate structures of the port from which the function
4542 enum ice_status ice_aq_alternate_clear(struct ice_hw *hw)
4544 struct ice_aq_desc desc;
4545 enum ice_status status;
4547 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_port_alt_write);
4549 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
4555 * ice_sched_query_elem - query element information from HW
4556 * @hw: pointer to the HW struct
4557 * @node_teid: node TEID to be queried
4558 * @buf: buffer to element information
4560 * This function queries HW element information
4563 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
4564 struct ice_aqc_get_elem *buf)
4566 u16 buf_size, num_elem_ret = 0;
4567 enum ice_status status;
4569 buf_size = sizeof(*buf);
4570 ice_memset(buf, 0, buf_size, ICE_NONDMA_MEM);
4571 buf->generic[0].node_teid = CPU_TO_LE32(node_teid);
4572 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
4574 if (status != ICE_SUCCESS || num_elem_ret != 1)
4575 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
4580 * ice_get_fw_mode - returns FW mode
4581 * @hw: pointer to the HW struct
4583 enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw)
4585 #define ICE_FW_MODE_DBG_M BIT(0)
4586 #define ICE_FW_MODE_REC_M BIT(1)
4587 #define ICE_FW_MODE_ROLLBACK_M BIT(2)
4590 /* check the current FW mode */
4591 fw_mode = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_MODES_M;
4593 if (fw_mode & ICE_FW_MODE_DBG_M)
4594 return ICE_FW_MODE_DBG;
4595 else if (fw_mode & ICE_FW_MODE_REC_M)
4596 return ICE_FW_MODE_REC;
4597 else if (fw_mode & ICE_FW_MODE_ROLLBACK_M)
4598 return ICE_FW_MODE_ROLLBACK;
4600 return ICE_FW_MODE_NORMAL;
4604 * ice_cfg_get_cur_lldp_persist_status
4605 * @hw: pointer to the HW struct
4606 * @lldp_status: return value of LLDP persistent status
4608 * Get the current status of LLDP persistent
4611 ice_get_cur_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status)
4613 struct ice_port_info *pi = hw->port_info;
4614 enum ice_status ret;
4619 return ICE_ERR_BAD_PTR;
4621 ret = ice_acquire_nvm(hw, ICE_RES_READ);
4625 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LLDP_PRESERVED_MOD_ID,
4626 ICE_AQC_NVM_CUR_LLDP_PERSIST_RD_OFFSET,
4627 ICE_AQC_NVM_LLDP_STATUS_RD_LEN, &raw_data,
4630 data = LE32_TO_CPU(raw_data);
4631 mask = ICE_AQC_NVM_LLDP_STATUS_M <<
4632 (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
4634 *lldp_status = data >>
4635 (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
4638 ice_release_nvm(hw);
4644 * ice_get_dflt_lldp_persist_status
4645 * @hw: pointer to the HW struct
4646 * @lldp_status: return value of LLDP persistent status
4648 * Get the default status of LLDP persistent
4651 ice_get_dflt_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status)
4653 struct ice_port_info *pi = hw->port_info;
4654 u32 data, mask, loc_data, loc_data_tmp;
4655 enum ice_status ret;
4656 __le16 loc_raw_data;
4660 return ICE_ERR_BAD_PTR;
4662 ret = ice_acquire_nvm(hw, ICE_RES_READ);
4666 /* Read the offset of EMP_SR_PTR */
4667 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT,
4668 ICE_AQC_NVM_EMP_SR_PTR_OFFSET,
4669 ICE_AQC_NVM_EMP_SR_PTR_RD_LEN,
4670 &loc_raw_data, false, true, NULL);
4674 loc_data = LE16_TO_CPU(loc_raw_data);
4675 if (loc_data & ICE_AQC_NVM_EMP_SR_PTR_TYPE_M) {
4676 loc_data &= ICE_AQC_NVM_EMP_SR_PTR_M;
4677 loc_data *= ICE_AQC_NVM_SECTOR_UNIT;
4679 loc_data *= ICE_AQC_NVM_WORD_UNIT;
4682 /* Read the offset of LLDP configuration pointer */
4683 loc_data += ICE_AQC_NVM_LLDP_CFG_PTR_OFFSET;
4684 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, loc_data,
4685 ICE_AQC_NVM_LLDP_CFG_PTR_RD_LEN, &loc_raw_data,
4690 loc_data_tmp = LE16_TO_CPU(loc_raw_data);
4691 loc_data_tmp *= ICE_AQC_NVM_WORD_UNIT;
4692 loc_data += loc_data_tmp;
4694 /* We need to skip LLDP configuration section length (2 bytes)*/
4695 loc_data += ICE_AQC_NVM_LLDP_CFG_HEADER_LEN;
4697 /* Read the LLDP Default Configure */
4698 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, loc_data,
4699 ICE_AQC_NVM_LLDP_STATUS_RD_LEN, &raw_data, false,
4702 data = LE32_TO_CPU(raw_data);
4703 mask = ICE_AQC_NVM_LLDP_STATUS_M <<
4704 (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
4706 *lldp_status = data >>
4707 (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
4711 ice_release_nvm(hw);
4717 * ice_get_netlist_ver_info
4718 * @hw: pointer to the HW struct
4720 * Get the netlist version information
4723 ice_get_netlist_ver_info(struct ice_hw *hw)
4725 struct ice_netlist_ver_info *ver = &hw->netlist_ver;
4726 enum ice_status ret;
4732 ret = ice_acquire_nvm(hw, ICE_RES_READ);
4735 buff = (u16 *)ice_calloc(hw, ICE_AQC_NVM_NETLIST_ID_BLK_LEN,
4738 ret = ICE_ERR_NO_MEMORY;
4742 /* read module length */
4743 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
4744 ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN_OFFSET * 2,
4745 ICE_AQC_NVM_LINK_TOPO_NETLIST_LEN, &raw_data,
4746 false, false, NULL);
4750 data = LE16_TO_CPU(raw_data);
4751 /* exit if length is = 0 */
4755 /* read node count */
4756 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
4757 ICE_AQC_NVM_NETLIST_NODE_COUNT_OFFSET * 2,
4758 ICE_AQC_NVM_NETLIST_NODE_COUNT_LEN, &raw_data,
4759 false, false, NULL);
4762 data = LE16_TO_CPU(raw_data);
4764 /* netlist ID block starts from offset 4 + node count * 2 */
4765 id_blk_start = ICE_AQC_NVM_NETLIST_ID_BLK_START_OFFSET + data * 2;
4767 /* read the entire netlist ID block */
4768 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LINK_TOPO_NETLIST_MOD_ID,
4770 ICE_AQC_NVM_NETLIST_ID_BLK_LEN * 2, buff, false,
4775 for (i = 0; i < ICE_AQC_NVM_NETLIST_ID_BLK_LEN; i++)
4776 buff[i] = LE16_TO_CPU(((_FORCE_ __le16 *)buff)[i]);
4778 ver->major = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16) |
4779 buff[ICE_AQC_NVM_NETLIST_ID_BLK_MAJOR_VER_LOW];
4780 ver->minor = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16) |
4781 buff[ICE_AQC_NVM_NETLIST_ID_BLK_MINOR_VER_LOW];
4782 ver->type = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_HIGH] << 16) |
4783 buff[ICE_AQC_NVM_NETLIST_ID_BLK_TYPE_LOW];
4784 ver->rev = (buff[ICE_AQC_NVM_NETLIST_ID_BLK_REV_HIGH] << 16) |
4785 buff[ICE_AQC_NVM_NETLIST_ID_BLK_REV_LOW];
4786 ver->cust_ver = buff[ICE_AQC_NVM_NETLIST_ID_BLK_CUST_VER];
4787 /* Read the left most 4 bytes of SHA */
4788 ver->hash = buff[ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH + 15] << 16 |
4789 buff[ICE_AQC_NVM_NETLIST_ID_BLK_SHA_HASH + 14];
4794 ice_release_nvm(hw);
4799 * ice_fw_supports_link_override
4800 * @hw: pointer to the hardware structure
4802 * Checks if the firmware supports link override
4804 bool ice_fw_supports_link_override(struct ice_hw *hw)
4806 if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
4807 if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
4809 if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
4810 hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
4812 } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
4820 * ice_get_link_default_override
4821 * @ldo: pointer to the link default override struct
4822 * @pi: pointer to the port info struct
4824 * Gets the link default override for a port
4827 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
4828 struct ice_port_info *pi)
4830 u16 i, tlv, tlv_len, tlv_start, buf, offset;
4831 struct ice_hw *hw = pi->hw;
4832 enum ice_status status;
4834 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
4835 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
4837 ice_debug(hw, ICE_DBG_INIT,
4838 "Failed to read link override TLV.\n");
4842 /* Each port has its own config; calculate for our port */
4843 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
4844 ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
4846 /* link options first */
4847 status = ice_read_sr_word(hw, tlv_start, &buf);
4849 ice_debug(hw, ICE_DBG_INIT,
4850 "Failed to read override link options.\n");
4853 ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
4854 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
4855 ICE_LINK_OVERRIDE_PHY_CFG_S;
4857 /* link PHY config */
4858 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
4859 status = ice_read_sr_word(hw, offset, &buf);
4861 ice_debug(hw, ICE_DBG_INIT,
4862 "Failed to read override phy config.\n");
4865 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
4868 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
4869 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
4870 status = ice_read_sr_word(hw, (offset + i), &buf);
4872 ice_debug(hw, ICE_DBG_INIT,
4873 "Failed to read override link options.\n");
4876 /* shift 16 bits at a time to fill 64 bits */
4877 ldo->phy_type_low |= ((u64)buf << (i * 16));
4880 /* PHY types high */
4881 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
4882 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
4883 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
4884 status = ice_read_sr_word(hw, (offset + i), &buf);
4886 ice_debug(hw, ICE_DBG_INIT,
4887 "Failed to read override link options.\n");
4890 /* shift 16 bits at a time to fill 64 bits */
4891 ldo->phy_type_high |= ((u64)buf << (i * 16));