1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright (c) 2021, Intel Corporation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the Intel Corporation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 #include "ice_common.h"
34 #include "ice_sched.h"
35 #include "ice_adminq_cmd.h"
38 #include "ice_switch.h"
40 #define ICE_PF_RESET_WAIT_COUNT 300
43 * ice_set_mac_type - Sets MAC type
44 * @hw: pointer to the HW structure
46 * This function sets the MAC type of the adapter based on the
47 * vendor ID and device ID stored in the HW structure.
49 enum ice_status ice_set_mac_type(struct ice_hw *hw)
51 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
53 if (hw->vendor_id != ICE_INTEL_VENDOR_ID)
54 return ICE_ERR_DEVICE_NOT_SUPPORTED;
56 switch (hw->device_id) {
57 case ICE_DEV_ID_E810C_BACKPLANE:
58 case ICE_DEV_ID_E810C_QSFP:
59 case ICE_DEV_ID_E810C_SFP:
60 case ICE_DEV_ID_E810_XXV_BACKPLANE:
61 case ICE_DEV_ID_E810_XXV_QSFP:
62 case ICE_DEV_ID_E810_XXV_SFP:
63 hw->mac_type = ICE_MAC_E810;
65 case ICE_DEV_ID_E822C_10G_BASE_T:
66 case ICE_DEV_ID_E822C_BACKPLANE:
67 case ICE_DEV_ID_E822C_QSFP:
68 case ICE_DEV_ID_E822C_SFP:
69 case ICE_DEV_ID_E822C_SGMII:
70 case ICE_DEV_ID_E822L_10G_BASE_T:
71 case ICE_DEV_ID_E822L_BACKPLANE:
72 case ICE_DEV_ID_E822L_SFP:
73 case ICE_DEV_ID_E822L_SGMII:
74 case ICE_DEV_ID_E823L_10G_BASE_T:
75 case ICE_DEV_ID_E823L_1GBE:
76 case ICE_DEV_ID_E823L_BACKPLANE:
77 case ICE_DEV_ID_E823L_QSFP:
78 case ICE_DEV_ID_E823L_SFP:
79 hw->mac_type = ICE_MAC_GENERIC;
82 hw->mac_type = ICE_MAC_UNKNOWN;
86 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
91 * ice_clear_pf_cfg - Clear PF configuration
92 * @hw: pointer to the hardware structure
94 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
95 * configuration, flow director filters, etc.).
97 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
99 struct ice_aq_desc desc;
101 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
103 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
107 * ice_aq_manage_mac_read - manage MAC address read command
108 * @hw: pointer to the HW struct
109 * @buf: a virtual buffer to hold the manage MAC read response
110 * @buf_size: Size of the virtual buffer
111 * @cd: pointer to command details structure or NULL
113 * This function is used to return per PF station MAC address (0x0107).
114 * NOTE: Upon successful completion of this command, MAC address information
115 * is returned in user specified buffer. Please interpret user specified
116 * buffer as "manage_mac_read" response.
117 * Response such as various MAC addresses are stored in HW struct (port.mac)
118 * ice_discover_dev_caps is expected to be called before this function is
122 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
123 struct ice_sq_cd *cd)
125 struct ice_aqc_manage_mac_read_resp *resp;
126 struct ice_aqc_manage_mac_read *cmd;
127 struct ice_aq_desc desc;
128 enum ice_status status;
132 cmd = &desc.params.mac_read;
134 if (buf_size < sizeof(*resp))
135 return ICE_ERR_BUF_TOO_SHORT;
137 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
139 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
143 resp = (struct ice_aqc_manage_mac_read_resp *)buf;
144 flags = LE16_TO_CPU(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
146 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
147 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
151 /* A single port can report up to two (LAN and WoL) addresses */
152 for (i = 0; i < cmd->num_addr; i++)
153 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
154 ice_memcpy(hw->port_info->mac.lan_addr,
155 resp[i].mac_addr, ETH_ALEN,
157 ice_memcpy(hw->port_info->mac.perm_addr,
159 ETH_ALEN, ICE_DMA_TO_NONDMA);
166 * ice_aq_get_phy_caps - returns PHY capabilities
167 * @pi: port information structure
168 * @qual_mods: report qualified modules
169 * @report_mode: report mode capabilities
170 * @pcaps: structure for PHY capabilities to be filled
171 * @cd: pointer to command details structure or NULL
173 * Returns the various PHY capabilities supported on the Port (0x0600)
176 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
177 struct ice_aqc_get_phy_caps_data *pcaps,
178 struct ice_sq_cd *cd)
180 struct ice_aqc_get_phy_caps *cmd;
181 u16 pcaps_size = sizeof(*pcaps);
182 struct ice_aq_desc desc;
183 enum ice_status status;
186 cmd = &desc.params.get_phy;
188 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
189 return ICE_ERR_PARAM;
192 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
195 cmd->param0 |= CPU_TO_LE16(ICE_AQC_GET_PHY_RQM);
197 cmd->param0 |= CPU_TO_LE16(report_mode);
198 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
200 ice_debug(hw, ICE_DBG_LINK, "get phy caps - report_mode = 0x%x\n",
202 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
203 (unsigned long long)LE64_TO_CPU(pcaps->phy_type_low));
204 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
205 (unsigned long long)LE64_TO_CPU(pcaps->phy_type_high));
206 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", pcaps->caps);
207 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
208 pcaps->low_power_ctrl_an);
209 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", pcaps->eee_cap);
210 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n",
212 ice_debug(hw, ICE_DBG_LINK, " link_fec_options = 0x%x\n",
213 pcaps->link_fec_options);
214 ice_debug(hw, ICE_DBG_LINK, " module_compliance_enforcement = 0x%x\n",
215 pcaps->module_compliance_enforcement);
216 ice_debug(hw, ICE_DBG_LINK, " extended_compliance_code = 0x%x\n",
217 pcaps->extended_compliance_code);
218 ice_debug(hw, ICE_DBG_LINK, " module_type[0] = 0x%x\n",
219 pcaps->module_type[0]);
220 ice_debug(hw, ICE_DBG_LINK, " module_type[1] = 0x%x\n",
221 pcaps->module_type[1]);
222 ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n",
223 pcaps->module_type[2]);
225 if (status == ICE_SUCCESS && report_mode == ICE_AQC_REPORT_TOPO_CAP) {
226 pi->phy.phy_type_low = LE64_TO_CPU(pcaps->phy_type_low);
227 pi->phy.phy_type_high = LE64_TO_CPU(pcaps->phy_type_high);
228 ice_memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
229 sizeof(pi->phy.link_info.module_type),
230 ICE_NONDMA_TO_NONDMA);
237 * ice_aq_get_link_topo_handle - get link topology node return status
238 * @pi: port information structure
239 * @node_type: requested node type
240 * @cd: pointer to command details structure or NULL
242 * Get link topology node return status for specified node type (0x06E0)
244 * Node type cage can be used to determine if cage is present. If AQC
245 * returns error (ENOENT), then no cage present. If no cage present, then
246 * connection type is backplane or BASE-T.
248 static enum ice_status
249 ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
250 struct ice_sq_cd *cd)
252 struct ice_aqc_get_link_topo *cmd;
253 struct ice_aq_desc desc;
255 cmd = &desc.params.get_link_topo;
257 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
259 cmd->addr.node_type_ctx = (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
260 ICE_AQC_LINK_TOPO_NODE_CTX_S);
263 cmd->addr.node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
265 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
269 * ice_is_media_cage_present
270 * @pi: port information structure
272 * Returns true if media cage is present, else false. If no cage, then
273 * media type is backplane or BASE-T.
275 static bool ice_is_media_cage_present(struct ice_port_info *pi)
277 /* Node type cage can be used to determine if cage is present. If AQC
278 * returns error (ENOENT), then no cage present. If no cage present then
279 * connection type is backplane or BASE-T.
281 return !ice_aq_get_link_topo_handle(pi,
282 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
287 * ice_get_media_type - Gets media type
288 * @pi: port information structure
290 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
292 struct ice_link_status *hw_link_info;
295 return ICE_MEDIA_UNKNOWN;
297 hw_link_info = &pi->phy.link_info;
298 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
299 /* If more than one media type is selected, report unknown */
300 return ICE_MEDIA_UNKNOWN;
302 if (hw_link_info->phy_type_low) {
303 /* 1G SGMII is a special case where some DA cable PHYs
304 * may show this as an option when it really shouldn't
305 * be since SGMII is meant to be between a MAC and a PHY
306 * in a backplane. Try to detect this case and handle it
308 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII &&
309 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
310 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
311 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
312 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
315 switch (hw_link_info->phy_type_low) {
316 case ICE_PHY_TYPE_LOW_1000BASE_SX:
317 case ICE_PHY_TYPE_LOW_1000BASE_LX:
318 case ICE_PHY_TYPE_LOW_10GBASE_SR:
319 case ICE_PHY_TYPE_LOW_10GBASE_LR:
320 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
321 case ICE_PHY_TYPE_LOW_25GBASE_SR:
322 case ICE_PHY_TYPE_LOW_25GBASE_LR:
323 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
324 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
325 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
326 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
327 case ICE_PHY_TYPE_LOW_50GBASE_SR:
328 case ICE_PHY_TYPE_LOW_50GBASE_FR:
329 case ICE_PHY_TYPE_LOW_50GBASE_LR:
330 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
331 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
332 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
333 case ICE_PHY_TYPE_LOW_100GBASE_DR:
334 return ICE_MEDIA_FIBER;
335 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
336 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
337 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
338 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
339 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
340 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
341 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
342 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
343 return ICE_MEDIA_FIBER;
344 case ICE_PHY_TYPE_LOW_100BASE_TX:
345 case ICE_PHY_TYPE_LOW_1000BASE_T:
346 case ICE_PHY_TYPE_LOW_2500BASE_T:
347 case ICE_PHY_TYPE_LOW_5GBASE_T:
348 case ICE_PHY_TYPE_LOW_10GBASE_T:
349 case ICE_PHY_TYPE_LOW_25GBASE_T:
350 return ICE_MEDIA_BASET;
351 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
352 case ICE_PHY_TYPE_LOW_25GBASE_CR:
353 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
354 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
355 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
356 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
357 case ICE_PHY_TYPE_LOW_50GBASE_CP:
358 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
359 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
360 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
362 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
363 case ICE_PHY_TYPE_LOW_40G_XLAUI:
364 case ICE_PHY_TYPE_LOW_50G_LAUI2:
365 case ICE_PHY_TYPE_LOW_50G_AUI2:
366 case ICE_PHY_TYPE_LOW_50G_AUI1:
367 case ICE_PHY_TYPE_LOW_100G_AUI4:
368 case ICE_PHY_TYPE_LOW_100G_CAUI4:
369 if (ice_is_media_cage_present(pi))
370 return ICE_MEDIA_AUI;
372 case ICE_PHY_TYPE_LOW_1000BASE_KX:
373 case ICE_PHY_TYPE_LOW_2500BASE_KX:
374 case ICE_PHY_TYPE_LOW_2500BASE_X:
375 case ICE_PHY_TYPE_LOW_5GBASE_KR:
376 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
377 case ICE_PHY_TYPE_LOW_25GBASE_KR:
378 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
379 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
380 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
381 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
382 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
383 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
384 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
385 return ICE_MEDIA_BACKPLANE;
388 switch (hw_link_info->phy_type_high) {
389 case ICE_PHY_TYPE_HIGH_100G_AUI2:
390 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
391 if (ice_is_media_cage_present(pi))
392 return ICE_MEDIA_AUI;
394 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
395 return ICE_MEDIA_BACKPLANE;
396 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
397 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
398 return ICE_MEDIA_FIBER;
401 return ICE_MEDIA_UNKNOWN;
405 * ice_aq_get_link_info
406 * @pi: port information structure
407 * @ena_lse: enable/disable LinkStatusEvent reporting
408 * @link: pointer to link status structure - optional
409 * @cd: pointer to command details structure or NULL
411 * Get Link Status (0x607). Returns the link status of the adapter.
414 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
415 struct ice_link_status *link, struct ice_sq_cd *cd)
417 struct ice_aqc_get_link_status_data link_data = { 0 };
418 struct ice_aqc_get_link_status *resp;
419 struct ice_link_status *li_old, *li;
420 enum ice_media_type *hw_media_type;
421 struct ice_fc_info *hw_fc_info;
422 bool tx_pause, rx_pause;
423 struct ice_aq_desc desc;
424 enum ice_status status;
429 return ICE_ERR_PARAM;
432 li_old = &pi->phy.link_info_old;
433 hw_media_type = &pi->phy.media_type;
434 li = &pi->phy.link_info;
435 hw_fc_info = &pi->fc;
437 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
438 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
439 resp = &desc.params.get_link_status;
440 resp->cmd_flags = CPU_TO_LE16(cmd_flags);
441 resp->lport_num = pi->lport;
443 status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
445 if (status != ICE_SUCCESS)
448 /* save off old link status information */
451 /* update current link status information */
452 li->link_speed = LE16_TO_CPU(link_data.link_speed);
453 li->phy_type_low = LE64_TO_CPU(link_data.phy_type_low);
454 li->phy_type_high = LE64_TO_CPU(link_data.phy_type_high);
455 *hw_media_type = ice_get_media_type(pi);
456 li->link_info = link_data.link_info;
457 li->an_info = link_data.an_info;
458 li->ext_info = link_data.ext_info;
459 li->max_frame_size = LE16_TO_CPU(link_data.max_frame_size);
460 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
461 li->topo_media_conflict = link_data.topo_media_conflict;
462 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
463 ICE_AQ_CFG_PACING_TYPE_M);
466 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
467 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
468 if (tx_pause && rx_pause)
469 hw_fc_info->current_mode = ICE_FC_FULL;
471 hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
473 hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
475 hw_fc_info->current_mode = ICE_FC_NONE;
477 li->lse_ena = !!(resp->cmd_flags & CPU_TO_LE16(ICE_AQ_LSE_IS_ENABLED));
479 ice_debug(hw, ICE_DBG_LINK, "get link info\n");
480 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed);
481 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
482 (unsigned long long)li->phy_type_low);
483 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
484 (unsigned long long)li->phy_type_high);
485 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type);
486 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info);
487 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info);
488 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info);
489 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info);
490 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena);
491 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n",
493 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing);
495 /* save link status information */
499 /* flag cleared so calling functions don't call AQ again */
500 pi->phy.get_link_info = false;
506 * ice_fill_tx_timer_and_fc_thresh
507 * @hw: pointer to the HW struct
508 * @cmd: pointer to MAC cfg structure
510 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command
514 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
515 struct ice_aqc_set_mac_cfg *cmd)
517 u16 fc_thres_val, tx_timer_val;
520 /* We read back the transmit timer and fc threshold value of
521 * LFC. Thus, we will use index =
522 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
524 * Also, because we are opearating on transmit timer and fc
525 * threshold of LFC, we don't turn on any bit in tx_tmr_priority
527 #define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
529 /* Retrieve the transmit timer */
530 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
532 PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
533 cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val);
535 /* Retrieve the fc threshold */
536 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
537 fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
539 cmd->fc_refresh_threshold = CPU_TO_LE16(fc_thres_val);
544 * @hw: pointer to the HW struct
545 * @max_frame_size: Maximum Frame Size to be supported
546 * @cd: pointer to command details structure or NULL
548 * Set MAC configuration (0x0603)
551 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
553 struct ice_aqc_set_mac_cfg *cmd;
554 struct ice_aq_desc desc;
556 cmd = &desc.params.set_mac_cfg;
558 if (max_frame_size == 0)
559 return ICE_ERR_PARAM;
561 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
563 cmd->max_frame_size = CPU_TO_LE16(max_frame_size);
565 ice_fill_tx_timer_and_fc_thresh(hw, cmd);
567 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
571 * ice_init_fltr_mgmt_struct - initializes filter management list and locks
572 * @hw: pointer to the HW struct
574 static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
576 struct ice_switch_info *sw;
577 enum ice_status status;
579 hw->switch_info = (struct ice_switch_info *)
580 ice_malloc(hw, sizeof(*hw->switch_info));
582 sw = hw->switch_info;
585 return ICE_ERR_NO_MEMORY;
587 INIT_LIST_HEAD(&sw->vsi_list_map_head);
588 sw->prof_res_bm_init = 0;
590 status = ice_init_def_sw_recp(hw, &hw->switch_info->recp_list);
592 ice_free(hw, hw->switch_info);
599 * ice_cleanup_fltr_mgmt_single - clears single filter mngt struct
600 * @hw: pointer to the HW struct
601 * @sw: pointer to switch info struct for which function clears filters
604 ice_cleanup_fltr_mgmt_single(struct ice_hw *hw, struct ice_switch_info *sw)
606 struct ice_vsi_list_map_info *v_pos_map;
607 struct ice_vsi_list_map_info *v_tmp_map;
608 struct ice_sw_recipe *recps;
614 LIST_FOR_EACH_ENTRY_SAFE(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
615 ice_vsi_list_map_info, list_entry) {
616 LIST_DEL(&v_pos_map->list_entry);
617 ice_free(hw, v_pos_map);
619 recps = sw->recp_list;
620 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
621 struct ice_recp_grp_entry *rg_entry, *tmprg_entry;
623 recps[i].root_rid = i;
624 LIST_FOR_EACH_ENTRY_SAFE(rg_entry, tmprg_entry,
625 &recps[i].rg_list, ice_recp_grp_entry,
627 LIST_DEL(&rg_entry->l_entry);
628 ice_free(hw, rg_entry);
631 if (recps[i].adv_rule) {
632 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
633 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
635 ice_destroy_lock(&recps[i].filt_rule_lock);
636 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
637 &recps[i].filt_rules,
638 ice_adv_fltr_mgmt_list_entry,
640 LIST_DEL(&lst_itr->list_entry);
641 ice_free(hw, lst_itr->lkups);
642 ice_free(hw, lst_itr);
645 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
647 ice_destroy_lock(&recps[i].filt_rule_lock);
648 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
649 &recps[i].filt_rules,
650 ice_fltr_mgmt_list_entry,
652 LIST_DEL(&lst_itr->list_entry);
653 ice_free(hw, lst_itr);
656 if (recps[i].root_buf)
657 ice_free(hw, recps[i].root_buf);
659 ice_rm_sw_replay_rule_info(hw, sw);
660 ice_free(hw, sw->recp_list);
665 * ice_cleanup_all_fltr_mgmt - cleanup filter management list and locks
666 * @hw: pointer to the HW struct
668 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
670 ice_cleanup_fltr_mgmt_single(hw, hw->switch_info);
674 * ice_get_itr_intrl_gran
675 * @hw: pointer to the HW struct
677 * Determines the ITR/INTRL granularities based on the maximum aggregate
678 * bandwidth according to the device's configuration during power-on.
680 static void ice_get_itr_intrl_gran(struct ice_hw *hw)
682 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
683 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
684 GL_PWR_MODE_CTL_CAR_MAX_BW_S;
686 switch (max_agg_bw) {
687 case ICE_MAX_AGG_BW_200G:
688 case ICE_MAX_AGG_BW_100G:
689 case ICE_MAX_AGG_BW_50G:
690 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
691 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
693 case ICE_MAX_AGG_BW_25G:
694 hw->itr_gran = ICE_ITR_GRAN_MAX_25;
695 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
701 * ice_print_rollback_msg - print FW rollback message
702 * @hw: pointer to the hardware structure
704 void ice_print_rollback_msg(struct ice_hw *hw)
706 char nvm_str[ICE_NVM_VER_LEN] = { 0 };
707 struct ice_orom_info *orom;
708 struct ice_nvm_info *nvm;
710 orom = &hw->flash.orom;
711 nvm = &hw->flash.nvm;
713 SNPRINTF(nvm_str, sizeof(nvm_str), "%x.%02x 0x%x %d.%d.%d",
714 nvm->major, nvm->minor, nvm->eetrack, orom->major,
715 orom->build, orom->patch);
717 "Firmware rollback mode detected. Current version is NVM: %s, FW: %d.%d. Device may exhibit limited functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware rollback mode\n",
718 nvm_str, hw->fw_maj_ver, hw->fw_min_ver);
722 * ice_init_hw - main hardware initialization routine
723 * @hw: pointer to the hardware structure
725 enum ice_status ice_init_hw(struct ice_hw *hw)
727 struct ice_aqc_get_phy_caps_data *pcaps;
728 enum ice_status status;
732 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
734 /* Set MAC type based on DeviceID */
735 status = ice_set_mac_type(hw);
739 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
740 PF_FUNC_RID_FUNCTION_NUMBER_M) >>
741 PF_FUNC_RID_FUNCTION_NUMBER_S;
743 status = ice_reset(hw, ICE_RESET_PFR);
746 ice_get_itr_intrl_gran(hw);
748 status = ice_create_all_ctrlq(hw);
750 goto err_unroll_cqinit;
752 status = ice_init_nvm(hw);
754 goto err_unroll_cqinit;
756 if (ice_get_fw_mode(hw) == ICE_FW_MODE_ROLLBACK)
757 ice_print_rollback_msg(hw);
759 status = ice_clear_pf_cfg(hw);
761 goto err_unroll_cqinit;
763 ice_clear_pxe_mode(hw);
765 status = ice_get_caps(hw);
767 goto err_unroll_cqinit;
769 hw->port_info = (struct ice_port_info *)
770 ice_malloc(hw, sizeof(*hw->port_info));
771 if (!hw->port_info) {
772 status = ICE_ERR_NO_MEMORY;
773 goto err_unroll_cqinit;
776 /* set the back pointer to HW */
777 hw->port_info->hw = hw;
779 /* Initialize port_info struct with switch configuration data */
780 status = ice_get_initial_sw_cfg(hw);
782 goto err_unroll_alloc;
785 /* Query the allocated resources for Tx scheduler */
786 status = ice_sched_query_res_alloc(hw);
788 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n");
789 goto err_unroll_alloc;
791 ice_sched_get_psm_clk_freq(hw);
793 /* Initialize port_info struct with scheduler data */
794 status = ice_sched_init_port(hw->port_info);
796 goto err_unroll_sched;
797 pcaps = (struct ice_aqc_get_phy_caps_data *)
798 ice_malloc(hw, sizeof(*pcaps));
800 status = ICE_ERR_NO_MEMORY;
801 goto err_unroll_sched;
804 /* Initialize port_info struct with PHY capabilities */
805 status = ice_aq_get_phy_caps(hw->port_info, false,
806 ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
809 ice_debug(hw, ICE_DBG_PHY, "Get PHY capabilities failed, continuing anyway\n");
811 /* Initialize port_info struct with link information */
812 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
814 goto err_unroll_sched;
815 /* need a valid SW entry point to build a Tx tree */
816 if (!hw->sw_entry_point_layer) {
817 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
818 status = ICE_ERR_CFG;
819 goto err_unroll_sched;
821 INIT_LIST_HEAD(&hw->agg_list);
822 /* Initialize max burst size */
823 if (!hw->max_burst_size)
824 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
825 status = ice_init_fltr_mgmt_struct(hw);
827 goto err_unroll_sched;
829 /* Get MAC information */
830 /* A single port can report up to two (LAN and WoL) addresses */
831 mac_buf = ice_calloc(hw, 2,
832 sizeof(struct ice_aqc_manage_mac_read_resp));
833 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
836 status = ICE_ERR_NO_MEMORY;
837 goto err_unroll_fltr_mgmt_struct;
840 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
841 ice_free(hw, mac_buf);
844 goto err_unroll_fltr_mgmt_struct;
845 /* enable jumbo frame support at MAC level */
846 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
848 goto err_unroll_fltr_mgmt_struct;
849 status = ice_init_hw_tbls(hw);
851 goto err_unroll_fltr_mgmt_struct;
852 ice_init_lock(&hw->tnl_lock);
854 ice_init_vlan_mode_ops(hw);
858 err_unroll_fltr_mgmt_struct:
859 ice_cleanup_fltr_mgmt_struct(hw);
861 ice_sched_cleanup_all(hw);
863 ice_free(hw, hw->port_info);
864 hw->port_info = NULL;
866 ice_destroy_all_ctrlq(hw);
871 * ice_deinit_hw - unroll initialization operations done by ice_init_hw
872 * @hw: pointer to the hardware structure
874 * This should be called only during nominal operation, not as a result of
875 * ice_init_hw() failing since ice_init_hw() will take care of unrolling
876 * applicable initializations if it fails for any reason.
878 void ice_deinit_hw(struct ice_hw *hw)
880 ice_cleanup_fltr_mgmt_struct(hw);
882 ice_sched_cleanup_all(hw);
883 ice_sched_clear_agg(hw);
885 ice_free_hw_tbls(hw);
886 ice_destroy_lock(&hw->tnl_lock);
889 ice_free(hw, hw->port_info);
890 hw->port_info = NULL;
893 ice_destroy_all_ctrlq(hw);
895 /* Clear VSI contexts if not already cleared */
896 ice_clear_all_vsi_ctx(hw);
900 * ice_check_reset - Check to see if a global reset is complete
901 * @hw: pointer to the hardware structure
903 enum ice_status ice_check_reset(struct ice_hw *hw)
905 u32 cnt, reg = 0, grst_timeout, uld_mask;
907 /* Poll for Device Active state in case a recent CORER, GLOBR,
908 * or EMPR has occurred. The grst delay value is in 100ms units.
909 * Add 1sec for outstanding AQ commands that can take a long time.
911 grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
912 GLGEN_RSTCTL_GRSTDEL_S) + 10;
914 for (cnt = 0; cnt < grst_timeout; cnt++) {
915 ice_msec_delay(100, true);
916 reg = rd32(hw, GLGEN_RSTAT);
917 if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
921 if (cnt == grst_timeout) {
922 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n");
923 return ICE_ERR_RESET_FAILED;
926 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
927 GLNVM_ULD_PCIER_DONE_1_M |\
928 GLNVM_ULD_CORER_DONE_M |\
929 GLNVM_ULD_GLOBR_DONE_M |\
930 GLNVM_ULD_POR_DONE_M |\
931 GLNVM_ULD_POR_DONE_1_M |\
932 GLNVM_ULD_PCIER_DONE_2_M)
934 uld_mask = ICE_RESET_DONE_MASK;
936 /* Device is Active; check Global Reset processes are done */
937 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
938 reg = rd32(hw, GLNVM_ULD) & uld_mask;
939 if (reg == uld_mask) {
940 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt);
943 ice_msec_delay(10, true);
946 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
947 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
949 return ICE_ERR_RESET_FAILED;
956 * ice_pf_reset - Reset the PF
957 * @hw: pointer to the hardware structure
959 * If a global reset has been triggered, this function checks
960 * for its completion and then issues the PF reset
962 static enum ice_status ice_pf_reset(struct ice_hw *hw)
966 /* If at function entry a global reset was already in progress, i.e.
967 * state is not 'device active' or any of the reset done bits are not
968 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
969 * global reset is done.
971 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
972 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
973 /* poll on global reset currently in progress until done */
974 if (ice_check_reset(hw))
975 return ICE_ERR_RESET_FAILED;
981 reg = rd32(hw, PFGEN_CTRL);
983 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
985 /* Wait for the PFR to complete. The wait time is the global config lock
986 * timeout plus the PFR timeout which will account for a possible reset
987 * that is occurring during a download package operation.
989 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
990 ICE_PF_RESET_WAIT_COUNT; cnt++) {
991 reg = rd32(hw, PFGEN_CTRL);
992 if (!(reg & PFGEN_CTRL_PFSWR_M))
995 ice_msec_delay(1, true);
998 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
999 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n");
1000 return ICE_ERR_RESET_FAILED;
1007 * ice_reset - Perform different types of reset
1008 * @hw: pointer to the hardware structure
1009 * @req: reset request
1011 * This function triggers a reset as specified by the req parameter.
1014 * If anything other than a PF reset is triggered, PXE mode is restored.
1015 * This has to be cleared using ice_clear_pxe_mode again, once the AQ
1016 * interface has been restored in the rebuild flow.
1018 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1024 return ice_pf_reset(hw);
1025 case ICE_RESET_CORER:
1026 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1027 val = GLGEN_RTRIG_CORER_M;
1029 case ICE_RESET_GLOBR:
1030 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1031 val = GLGEN_RTRIG_GLOBR_M;
1034 return ICE_ERR_PARAM;
1037 val |= rd32(hw, GLGEN_RTRIG);
1038 wr32(hw, GLGEN_RTRIG, val);
1041 /* wait for the FW to be ready */
1042 return ice_check_reset(hw);
1046 * ice_copy_rxq_ctx_to_hw
1047 * @hw: pointer to the hardware structure
1048 * @ice_rxq_ctx: pointer to the rxq context
1049 * @rxq_index: the index of the Rx queue
1051 * Copies rxq context from dense structure to HW register space
1053 static enum ice_status
1054 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1059 return ICE_ERR_BAD_PTR;
1061 if (rxq_index > QRX_CTRL_MAX_INDEX)
1062 return ICE_ERR_PARAM;
1064 /* Copy each dword separately to HW */
1065 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1066 wr32(hw, QRX_CONTEXT(i, rxq_index),
1067 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1069 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1070 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1076 /* LAN Rx Queue Context */
1077 static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1078 /* Field Width LSB */
1079 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
1080 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
1081 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
1082 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
1083 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
1084 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
1085 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
1086 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
1087 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
1088 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
1089 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
1090 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
1091 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
1092 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
1093 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
1094 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
1095 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
1096 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
1097 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
1098 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
1104 * @hw: pointer to the hardware structure
1105 * @rlan_ctx: pointer to the rxq context
1106 * @rxq_index: the index of the Rx queue
1108 * Converts rxq context from sparse to dense structure and then writes
1109 * it to HW register space and enables the hardware to prefetch descriptors
1110 * instead of only fetching them on demand
1113 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1116 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1119 return ICE_ERR_BAD_PTR;
1121 rlan_ctx->prefena = 1;
1123 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1124 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1129 * @hw: pointer to the hardware structure
1130 * @rxq_index: the index of the Rx queue to clear
1132 * Clears rxq context in HW register space
1134 enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index)
1138 if (rxq_index > QRX_CTRL_MAX_INDEX)
1139 return ICE_ERR_PARAM;
1141 /* Clear each dword register separately */
1142 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++)
1143 wr32(hw, QRX_CONTEXT(i, rxq_index), 0);
1148 /* LAN Tx Queue Context */
1149 const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1150 /* Field Width LSB */
1151 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
1152 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
1153 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
1154 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
1155 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
1156 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
1157 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
1158 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
1159 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91),
1160 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
1161 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
1162 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
1163 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
1164 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
1165 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
1166 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
1167 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
1168 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
1169 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
1170 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
1171 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
1172 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
1173 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
1174 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
1175 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
1176 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
1177 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
1178 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
1183 * ice_copy_tx_cmpltnq_ctx_to_hw
1184 * @hw: pointer to the hardware structure
1185 * @ice_tx_cmpltnq_ctx: pointer to the Tx completion queue context
1186 * @tx_cmpltnq_index: the index of the completion queue
1188 * Copies Tx completion queue context from dense structure to HW register space
1190 static enum ice_status
1191 ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_cmpltnq_ctx,
1192 u32 tx_cmpltnq_index)
1196 if (!ice_tx_cmpltnq_ctx)
1197 return ICE_ERR_BAD_PTR;
1199 if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1200 return ICE_ERR_PARAM;
1202 /* Copy each dword separately to HW */
1203 for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++) {
1204 wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index),
1205 *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1207 ice_debug(hw, ICE_DBG_QCTX, "cmpltnqdata[%d]: %08X\n", i,
1208 *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1214 /* LAN Tx Completion Queue Context */
1215 static const struct ice_ctx_ele ice_tx_cmpltnq_ctx_info[] = {
1216 /* Field Width LSB */
1217 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, base, 57, 0),
1218 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, q_len, 18, 64),
1219 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, generation, 1, 96),
1220 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, wrt_ptr, 22, 97),
1221 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, pf_num, 3, 128),
1222 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_num, 10, 131),
1223 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_type, 2, 141),
1224 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, tph_desc_wr, 1, 160),
1225 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cpuid, 8, 161),
1226 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cmpltn_cache, 512, 192),
1231 * ice_write_tx_cmpltnq_ctx
1232 * @hw: pointer to the hardware structure
1233 * @tx_cmpltnq_ctx: pointer to the completion queue context
1234 * @tx_cmpltnq_index: the index of the completion queue
1236 * Converts completion queue context from sparse to dense structure and then
1237 * writes it to HW register space
1240 ice_write_tx_cmpltnq_ctx(struct ice_hw *hw,
1241 struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx,
1242 u32 tx_cmpltnq_index)
1244 u8 ctx_buf[ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1246 ice_set_ctx(hw, (u8 *)tx_cmpltnq_ctx, ctx_buf, ice_tx_cmpltnq_ctx_info);
1247 return ice_copy_tx_cmpltnq_ctx_to_hw(hw, ctx_buf, tx_cmpltnq_index);
1251 * ice_clear_tx_cmpltnq_ctx
1252 * @hw: pointer to the hardware structure
1253 * @tx_cmpltnq_index: the index of the completion queue to clear
1255 * Clears Tx completion queue context in HW register space
1258 ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index)
1262 if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1263 return ICE_ERR_PARAM;
1265 /* Clear each dword register separately */
1266 for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++)
1267 wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index), 0);
1273 * ice_copy_tx_drbell_q_ctx_to_hw
1274 * @hw: pointer to the hardware structure
1275 * @ice_tx_drbell_q_ctx: pointer to the doorbell queue context
1276 * @tx_drbell_q_index: the index of the doorbell queue
1278 * Copies doorbell queue context from dense structure to HW register space
1280 static enum ice_status
1281 ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_drbell_q_ctx,
1282 u32 tx_drbell_q_index)
1286 if (!ice_tx_drbell_q_ctx)
1287 return ICE_ERR_BAD_PTR;
1289 if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1290 return ICE_ERR_PARAM;
1292 /* Copy each dword separately to HW */
1293 for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++) {
1294 wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index),
1295 *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1297 ice_debug(hw, ICE_DBG_QCTX, "tx_drbell_qdata[%d]: %08X\n", i,
1298 *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1304 /* LAN Tx Doorbell Queue Context info */
1305 static const struct ice_ctx_ele ice_tx_drbell_q_ctx_info[] = {
1306 /* Field Width LSB */
1307 ICE_CTX_STORE(ice_tx_drbell_q_ctx, base, 57, 0),
1308 ICE_CTX_STORE(ice_tx_drbell_q_ctx, ring_len, 13, 64),
1309 ICE_CTX_STORE(ice_tx_drbell_q_ctx, pf_num, 3, 80),
1310 ICE_CTX_STORE(ice_tx_drbell_q_ctx, vf_num, 8, 84),
1311 ICE_CTX_STORE(ice_tx_drbell_q_ctx, vmvf_type, 2, 94),
1312 ICE_CTX_STORE(ice_tx_drbell_q_ctx, cpuid, 8, 96),
1313 ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_rd, 1, 104),
1314 ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_wr, 1, 108),
1315 ICE_CTX_STORE(ice_tx_drbell_q_ctx, db_q_en, 1, 112),
1316 ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_head, 13, 128),
1317 ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_tail, 13, 144),
1322 * ice_write_tx_drbell_q_ctx
1323 * @hw: pointer to the hardware structure
1324 * @tx_drbell_q_ctx: pointer to the doorbell queue context
1325 * @tx_drbell_q_index: the index of the doorbell queue
1327 * Converts doorbell queue context from sparse to dense structure and then
1328 * writes it to HW register space
1331 ice_write_tx_drbell_q_ctx(struct ice_hw *hw,
1332 struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx,
1333 u32 tx_drbell_q_index)
1335 u8 ctx_buf[ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1337 ice_set_ctx(hw, (u8 *)tx_drbell_q_ctx, ctx_buf,
1338 ice_tx_drbell_q_ctx_info);
1339 return ice_copy_tx_drbell_q_ctx_to_hw(hw, ctx_buf, tx_drbell_q_index);
1343 * ice_clear_tx_drbell_q_ctx
1344 * @hw: pointer to the hardware structure
1345 * @tx_drbell_q_index: the index of the doorbell queue to clear
1347 * Clears doorbell queue context in HW register space
1350 ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index)
1354 if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1355 return ICE_ERR_PARAM;
1357 /* Clear each dword register separately */
1358 for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++)
1359 wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index), 0);
1364 /* FW Admin Queue command wrappers */
1367 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1368 * @hw: pointer to the HW struct
1369 * @desc: descriptor describing the command
1370 * @buf: buffer to use for indirect commands (NULL for direct commands)
1371 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1372 * @cd: pointer to command details structure
1374 * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1377 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1378 u16 buf_size, struct ice_sq_cd *cd)
1380 return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
1385 * @hw: pointer to the HW struct
1386 * @cd: pointer to command details structure or NULL
1388 * Get the firmware version (0x0001) from the admin queue commands
1390 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1392 struct ice_aqc_get_ver *resp;
1393 struct ice_aq_desc desc;
1394 enum ice_status status;
1396 resp = &desc.params.get_ver;
1398 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1400 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1403 hw->fw_branch = resp->fw_branch;
1404 hw->fw_maj_ver = resp->fw_major;
1405 hw->fw_min_ver = resp->fw_minor;
1406 hw->fw_patch = resp->fw_patch;
1407 hw->fw_build = LE32_TO_CPU(resp->fw_build);
1408 hw->api_branch = resp->api_branch;
1409 hw->api_maj_ver = resp->api_major;
1410 hw->api_min_ver = resp->api_minor;
1411 hw->api_patch = resp->api_patch;
1418 * ice_aq_send_driver_ver
1419 * @hw: pointer to the HW struct
1420 * @dv: driver's major, minor version
1421 * @cd: pointer to command details structure or NULL
1423 * Send the driver version (0x0002) to the firmware
1426 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1427 struct ice_sq_cd *cd)
1429 struct ice_aqc_driver_ver *cmd;
1430 struct ice_aq_desc desc;
1433 cmd = &desc.params.driver_ver;
1436 return ICE_ERR_PARAM;
1438 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1440 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1441 cmd->major_ver = dv->major_ver;
1442 cmd->minor_ver = dv->minor_ver;
1443 cmd->build_ver = dv->build_ver;
1444 cmd->subbuild_ver = dv->subbuild_ver;
1447 while (len < sizeof(dv->driver_string) &&
1448 IS_ASCII(dv->driver_string[len]) && dv->driver_string[len])
1451 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1456 * @hw: pointer to the HW struct
1457 * @unloading: is the driver unloading itself
1459 * Tell the Firmware that we're shutting down the AdminQ and whether
1460 * or not the driver is unloading as well (0x0003).
1462 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1464 struct ice_aqc_q_shutdown *cmd;
1465 struct ice_aq_desc desc;
1467 cmd = &desc.params.q_shutdown;
1469 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1472 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1474 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1479 * @hw: pointer to the HW struct
1481 * @access: access type
1482 * @sdp_number: resource number
1483 * @timeout: the maximum time in ms that the driver may hold the resource
1484 * @cd: pointer to command details structure or NULL
1486 * Requests common resource using the admin queue commands (0x0008).
1487 * When attempting to acquire the Global Config Lock, the driver can
1488 * learn of three states:
1489 * 1) ICE_SUCCESS - acquired lock, and can perform download package
1490 * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load
1491 * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
1492 * successfully downloaded the package; the driver does
1493 * not have to download the package and can continue
1496 * Note that if the caller is in an acquire lock, perform action, release lock
1497 * phase of operation, it is possible that the FW may detect a timeout and issue
1498 * a CORER. In this case, the driver will receive a CORER interrupt and will
1499 * have to determine its cause. The calling thread that is handling this flow
1500 * will likely get an error propagated back to it indicating the Download
1501 * Package, Update Package or the Release Resource AQ commands timed out.
1503 static enum ice_status
1504 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1505 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1506 struct ice_sq_cd *cd)
1508 struct ice_aqc_req_res *cmd_resp;
1509 struct ice_aq_desc desc;
1510 enum ice_status status;
1512 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1514 cmd_resp = &desc.params.res_owner;
1516 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1518 cmd_resp->res_id = CPU_TO_LE16(res);
1519 cmd_resp->access_type = CPU_TO_LE16(access);
1520 cmd_resp->res_number = CPU_TO_LE32(sdp_number);
1521 cmd_resp->timeout = CPU_TO_LE32(*timeout);
1524 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1526 /* The completion specifies the maximum time in ms that the driver
1527 * may hold the resource in the Timeout field.
1530 /* Global config lock response utilizes an additional status field.
1532 * If the Global config lock resource is held by some other driver, the
1533 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1534 * and the timeout field indicates the maximum time the current owner
1535 * of the resource has to free it.
1537 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1538 if (LE16_TO_CPU(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1539 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1541 } else if (LE16_TO_CPU(cmd_resp->status) ==
1542 ICE_AQ_RES_GLBL_IN_PROG) {
1543 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1544 return ICE_ERR_AQ_ERROR;
1545 } else if (LE16_TO_CPU(cmd_resp->status) ==
1546 ICE_AQ_RES_GLBL_DONE) {
1547 return ICE_ERR_AQ_NO_WORK;
1550 /* invalid FW response, force a timeout immediately */
1552 return ICE_ERR_AQ_ERROR;
1555 /* If the resource is held by some other driver, the command completes
1556 * with a busy return value and the timeout field indicates the maximum
1557 * time the current owner of the resource has to free it.
1559 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1560 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1566 * ice_aq_release_res
1567 * @hw: pointer to the HW struct
1569 * @sdp_number: resource number
1570 * @cd: pointer to command details structure or NULL
1572 * release common resource using the admin queue commands (0x0009)
1574 static enum ice_status
1575 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1576 struct ice_sq_cd *cd)
1578 struct ice_aqc_req_res *cmd;
1579 struct ice_aq_desc desc;
1581 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1583 cmd = &desc.params.res_owner;
1585 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1587 cmd->res_id = CPU_TO_LE16(res);
1588 cmd->res_number = CPU_TO_LE32(sdp_number);
1590 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1595 * @hw: pointer to the HW structure
1597 * @access: access type (read or write)
1598 * @timeout: timeout in milliseconds
1600 * This function will attempt to acquire the ownership of a resource.
1603 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1604 enum ice_aq_res_access_type access, u32 timeout)
1606 #define ICE_RES_POLLING_DELAY_MS 10
1607 u32 delay = ICE_RES_POLLING_DELAY_MS;
1608 u32 time_left = timeout;
1609 enum ice_status status;
1611 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1613 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1615 /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
1616 * previously acquired the resource and performed any necessary updates;
1617 * in this case the caller does not obtain the resource and has no
1618 * further work to do.
1620 if (status == ICE_ERR_AQ_NO_WORK)
1621 goto ice_acquire_res_exit;
1624 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access);
1626 /* If necessary, poll until the current lock owner timeouts */
1627 timeout = time_left;
1628 while (status && timeout && time_left) {
1629 ice_msec_delay(delay, true);
1630 timeout = (timeout > delay) ? timeout - delay : 0;
1631 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1633 if (status == ICE_ERR_AQ_NO_WORK)
1634 /* lock free, but no work to do */
1641 if (status && status != ICE_ERR_AQ_NO_WORK)
1642 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1644 ice_acquire_res_exit:
1645 if (status == ICE_ERR_AQ_NO_WORK) {
1646 if (access == ICE_RES_WRITE)
1647 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n");
1649 ice_debug(hw, ICE_DBG_RES, "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1656 * @hw: pointer to the HW structure
1659 * This function will release a resource using the proper Admin Command.
1661 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1663 enum ice_status status;
1664 u32 total_delay = 0;
1666 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1668 status = ice_aq_release_res(hw, res, 0, NULL);
1670 /* there are some rare cases when trying to release the resource
1671 * results in an admin queue timeout, so handle them correctly
1673 while ((status == ICE_ERR_AQ_TIMEOUT) &&
1674 (total_delay < hw->adminq.sq_cmd_timeout)) {
1675 ice_msec_delay(1, true);
1676 status = ice_aq_release_res(hw, res, 0, NULL);
1682 * ice_aq_alloc_free_res - command to allocate/free resources
1683 * @hw: pointer to the HW struct
1684 * @num_entries: number of resource entries in buffer
1685 * @buf: Indirect buffer to hold data parameters and response
1686 * @buf_size: size of buffer for indirect commands
1687 * @opc: pass in the command opcode
1688 * @cd: pointer to command details structure or NULL
1690 * Helper function to allocate/free resources using the admin queue commands
1693 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
1694 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
1695 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1697 struct ice_aqc_alloc_free_res_cmd *cmd;
1698 struct ice_aq_desc desc;
1700 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1702 cmd = &desc.params.sw_res_ctrl;
1705 return ICE_ERR_PARAM;
1707 if (buf_size < FLEX_ARRAY_SIZE(buf, elem, num_entries))
1708 return ICE_ERR_PARAM;
1710 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1712 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1714 cmd->num_entries = CPU_TO_LE16(num_entries);
1716 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1720 * ice_alloc_hw_res - allocate resource
1721 * @hw: pointer to the HW struct
1722 * @type: type of resource
1723 * @num: number of resources to allocate
1724 * @btm: allocate from bottom
1725 * @res: pointer to array that will receive the resources
1728 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
1730 struct ice_aqc_alloc_free_res_elem *buf;
1731 enum ice_status status;
1734 buf_len = ice_struct_size(buf, elem, num);
1735 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1737 return ICE_ERR_NO_MEMORY;
1739 /* Prepare buffer to allocate resource. */
1740 buf->num_elems = CPU_TO_LE16(num);
1741 buf->res_type = CPU_TO_LE16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
1742 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
1744 buf->res_type |= CPU_TO_LE16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
1746 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
1747 ice_aqc_opc_alloc_res, NULL);
1749 goto ice_alloc_res_exit;
1751 ice_memcpy(res, buf->elem, sizeof(*buf->elem) * num,
1752 ICE_NONDMA_TO_NONDMA);
1760 * ice_free_hw_res - free allocated HW resource
1761 * @hw: pointer to the HW struct
1762 * @type: type of resource to free
1763 * @num: number of resources
1764 * @res: pointer to array that contains the resources to free
1766 enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
1768 struct ice_aqc_alloc_free_res_elem *buf;
1769 enum ice_status status;
1772 buf_len = ice_struct_size(buf, elem, num);
1773 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1775 return ICE_ERR_NO_MEMORY;
1777 /* Prepare buffer to free resource. */
1778 buf->num_elems = CPU_TO_LE16(num);
1779 buf->res_type = CPU_TO_LE16(type);
1780 ice_memcpy(buf->elem, res, sizeof(*buf->elem) * num,
1781 ICE_NONDMA_TO_NONDMA);
1783 status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
1784 ice_aqc_opc_free_res, NULL);
1786 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1793 * ice_get_num_per_func - determine number of resources per PF
1794 * @hw: pointer to the HW structure
1795 * @max: value to be evenly split between each PF
1797 * Determine the number of valid functions by going through the bitmap returned
1798 * from parsing capabilities and use this to calculate the number of resources
1799 * per PF based on the max value passed in.
1801 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
1805 #define ICE_CAPS_VALID_FUNCS_M 0xFF
1806 funcs = ice_hweight8(hw->dev_caps.common_cap.valid_functions &
1807 ICE_CAPS_VALID_FUNCS_M);
1816 * ice_print_led_caps - print LED capabilities
1817 * @hw: pointer to the ice_hw instance
1818 * @caps: pointer to common caps instance
1819 * @prefix: string to prefix when printing
1820 * @debug: set to indicate debug print
1823 ice_print_led_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
1824 char const *prefix, bool debug)
1829 ice_debug(hw, ICE_DBG_INIT, "%s: led_pin_num = %d\n", prefix,
1832 ice_info(hw, "%s: led_pin_num = %d\n", prefix,
1835 for (i = 0; i < ICE_MAX_SUPPORTED_GPIO_LED; i++) {
1840 ice_debug(hw, ICE_DBG_INIT, "%s: led[%d] = %d\n",
1841 prefix, i, caps->led[i]);
1843 ice_info(hw, "%s: led[%d] = %d\n", prefix, i,
1849 * ice_print_sdp_caps - print SDP capabilities
1850 * @hw: pointer to the ice_hw instance
1851 * @caps: pointer to common caps instance
1852 * @prefix: string to prefix when printing
1853 * @debug: set to indicate debug print
1856 ice_print_sdp_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
1857 char const *prefix, bool debug)
1862 ice_debug(hw, ICE_DBG_INIT, "%s: sdp_pin_num = %d\n", prefix,
1865 ice_info(hw, "%s: sdp_pin_num = %d\n", prefix,
1868 for (i = 0; i < ICE_MAX_SUPPORTED_GPIO_SDP; i++) {
1873 ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%d] = %d\n",
1874 prefix, i, caps->sdp[i]);
1876 ice_info(hw, "%s: sdp[%d] = %d\n", prefix,
1882 * ice_parse_common_caps - parse common device/function capabilities
1883 * @hw: pointer to the HW struct
1884 * @caps: pointer to common capabilities structure
1885 * @elem: the capability element to parse
1886 * @prefix: message prefix for tracing capabilities
1888 * Given a capability element, extract relevant details into the common
1889 * capability structure.
1891 * Returns: true if the capability matches one of the common capability ids,
1895 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
1896 struct ice_aqc_list_caps_elem *elem, const char *prefix)
1898 u32 logical_id = LE32_TO_CPU(elem->logical_id);
1899 u32 phys_id = LE32_TO_CPU(elem->phys_id);
1900 u32 number = LE32_TO_CPU(elem->number);
1901 u16 cap = LE16_TO_CPU(elem->cap);
1905 case ICE_AQC_CAPS_SWITCHING_MODE:
1906 caps->switching_mode = number;
1907 ice_debug(hw, ICE_DBG_INIT, "%s: switching_mode = %d\n", prefix,
1908 caps->switching_mode);
1910 case ICE_AQC_CAPS_MANAGEABILITY_MODE:
1911 caps->mgmt_mode = number;
1912 caps->mgmt_protocols_mctp = logical_id;
1913 ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_mode = %d\n", prefix,
1915 ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_protocols_mctp = %d\n", prefix,
1916 caps->mgmt_protocols_mctp);
1918 case ICE_AQC_CAPS_OS2BMC:
1919 caps->os2bmc = number;
1920 ice_debug(hw, ICE_DBG_INIT, "%s: os2bmc = %d\n", prefix, caps->os2bmc);
1922 case ICE_AQC_CAPS_VALID_FUNCTIONS:
1923 caps->valid_functions = number;
1924 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix,
1925 caps->valid_functions);
1927 case ICE_AQC_CAPS_SRIOV:
1928 caps->sr_iov_1_1 = (number == 1);
1929 ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix,
1932 case ICE_AQC_CAPS_802_1QBG:
1933 caps->evb_802_1_qbg = (number == 1);
1934 ice_debug(hw, ICE_DBG_INIT, "%s: evb_802_1_qbg = %d\n", prefix, number);
1936 case ICE_AQC_CAPS_802_1BR:
1937 caps->evb_802_1_qbh = (number == 1);
1938 ice_debug(hw, ICE_DBG_INIT, "%s: evb_802_1_qbh = %d\n", prefix, number);
1940 case ICE_AQC_CAPS_DCB:
1941 caps->dcb = (number == 1);
1942 caps->active_tc_bitmap = logical_id;
1943 caps->maxtc = phys_id;
1944 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb);
1945 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix,
1946 caps->active_tc_bitmap);
1947 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc);
1949 case ICE_AQC_CAPS_ISCSI:
1950 caps->iscsi = (number == 1);
1951 ice_debug(hw, ICE_DBG_INIT, "%s: iscsi = %d\n", prefix, caps->iscsi);
1953 case ICE_AQC_CAPS_RSS:
1954 caps->rss_table_size = number;
1955 caps->rss_table_entry_width = logical_id;
1956 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix,
1957 caps->rss_table_size);
1958 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix,
1959 caps->rss_table_entry_width);
1961 case ICE_AQC_CAPS_RXQS:
1962 caps->num_rxq = number;
1963 caps->rxq_first_id = phys_id;
1964 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix,
1966 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix,
1967 caps->rxq_first_id);
1969 case ICE_AQC_CAPS_TXQS:
1970 caps->num_txq = number;
1971 caps->txq_first_id = phys_id;
1972 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix,
1974 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix,
1975 caps->txq_first_id);
1977 case ICE_AQC_CAPS_MSIX:
1978 caps->num_msix_vectors = number;
1979 caps->msix_vector_first_id = phys_id;
1980 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix,
1981 caps->num_msix_vectors);
1982 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix,
1983 caps->msix_vector_first_id);
1985 case ICE_AQC_CAPS_NVM_VER:
1987 case ICE_AQC_CAPS_NVM_MGMT:
1988 caps->sec_rev_disabled =
1989 (number & ICE_NVM_MGMT_SEC_REV_DISABLED) ?
1991 ice_debug(hw, ICE_DBG_INIT, "%s: sec_rev_disabled = %d\n", prefix,
1992 caps->sec_rev_disabled);
1993 caps->update_disabled =
1994 (number & ICE_NVM_MGMT_UPDATE_DISABLED) ?
1996 ice_debug(hw, ICE_DBG_INIT, "%s: update_disabled = %d\n", prefix,
1997 caps->update_disabled);
1998 caps->nvm_unified_update =
1999 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
2001 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
2002 caps->nvm_unified_update);
2004 case ICE_AQC_CAPS_CEM:
2005 caps->mgmt_cem = (number == 1);
2006 ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_cem = %d\n", prefix,
2009 case ICE_AQC_CAPS_LED:
2010 if (phys_id < ICE_MAX_SUPPORTED_GPIO_LED) {
2011 caps->led[phys_id] = true;
2012 caps->led_pin_num++;
2013 ice_debug(hw, ICE_DBG_INIT, "%s: led[%d] = 1\n", prefix, phys_id);
2016 case ICE_AQC_CAPS_SDP:
2017 if (phys_id < ICE_MAX_SUPPORTED_GPIO_SDP) {
2018 caps->sdp[phys_id] = true;
2019 caps->sdp_pin_num++;
2020 ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%d] = 1\n", prefix, phys_id);
2023 case ICE_AQC_CAPS_WR_CSR_PROT:
2024 caps->wr_csr_prot = number;
2025 caps->wr_csr_prot |= (u64)logical_id << 32;
2026 ice_debug(hw, ICE_DBG_INIT, "%s: wr_csr_prot = 0x%llX\n", prefix,
2027 (unsigned long long)caps->wr_csr_prot);
2029 case ICE_AQC_CAPS_WOL_PROXY:
2030 caps->num_wol_proxy_fltr = number;
2031 caps->wol_proxy_vsi_seid = logical_id;
2032 caps->apm_wol_support = !!(phys_id & ICE_WOL_SUPPORT_M);
2033 caps->acpi_prog_mthd = !!(phys_id &
2034 ICE_ACPI_PROG_MTHD_M);
2035 caps->proxy_support = !!(phys_id & ICE_PROXY_SUPPORT_M);
2036 ice_debug(hw, ICE_DBG_INIT, "%s: num_wol_proxy_fltr = %d\n", prefix,
2037 caps->num_wol_proxy_fltr);
2038 ice_debug(hw, ICE_DBG_INIT, "%s: wol_proxy_vsi_seid = %d\n", prefix,
2039 caps->wol_proxy_vsi_seid);
2041 case ICE_AQC_CAPS_MAX_MTU:
2042 caps->max_mtu = number;
2043 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
2044 prefix, caps->max_mtu);
2047 /* Not one of the recognized common capabilities */
2055 * ice_recalc_port_limited_caps - Recalculate port limited capabilities
2056 * @hw: pointer to the HW structure
2057 * @caps: pointer to capabilities structure to fix
2059 * Re-calculate the capabilities that are dependent on the number of physical
2060 * ports; i.e. some features are not supported or function differently on
2061 * devices with more than 4 ports.
2064 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
2066 /* This assumes device capabilities are always scanned before function
2067 * capabilities during the initialization flow.
2069 if (hw->dev_caps.num_funcs > 4) {
2070 /* Max 4 TCs per port */
2072 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n",
2078 * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps
2079 * @hw: pointer to the HW struct
2080 * @func_p: pointer to function capabilities structure
2081 * @cap: pointer to the capability element to parse
2083 * Extract function capabilities for ICE_AQC_CAPS_VF.
2086 ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2087 struct ice_aqc_list_caps_elem *cap)
2089 u32 number = LE32_TO_CPU(cap->number);
2090 u32 logical_id = LE32_TO_CPU(cap->logical_id);
2092 func_p->num_allocd_vfs = number;
2093 func_p->vf_base_id = logical_id;
2094 ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n",
2095 func_p->num_allocd_vfs);
2096 ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n",
2097 func_p->vf_base_id);
2101 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps
2102 * @hw: pointer to the HW struct
2103 * @func_p: pointer to function capabilities structure
2104 * @cap: pointer to the capability element to parse
2106 * Extract function capabilities for ICE_AQC_CAPS_VSI.
2109 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2110 struct ice_aqc_list_caps_elem *cap)
2112 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
2113 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
2114 LE32_TO_CPU(cap->number));
2115 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n",
2116 func_p->guar_num_vsi);
2120 * ice_parse_func_caps - Parse function capabilities
2121 * @hw: pointer to the HW struct
2122 * @func_p: pointer to function capabilities structure
2123 * @buf: buffer containing the function capability records
2124 * @cap_count: the number of capabilities
2126 * Helper function to parse function (0x000A) capabilities list. For
2127 * capabilities shared between device and function, this relies on
2128 * ice_parse_common_caps.
2130 * Loop through the list of provided capabilities and extract the relevant
2131 * data into the function capabilities structured.
2134 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2135 void *buf, u32 cap_count)
2137 struct ice_aqc_list_caps_elem *cap_resp;
2140 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
2142 ice_memset(func_p, 0, sizeof(*func_p), ICE_NONDMA_MEM);
2144 for (i = 0; i < cap_count; i++) {
2145 u16 cap = LE16_TO_CPU(cap_resp[i].cap);
2148 found = ice_parse_common_caps(hw, &func_p->common_cap,
2149 &cap_resp[i], "func caps");
2152 case ICE_AQC_CAPS_VF:
2153 ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
2155 case ICE_AQC_CAPS_VSI:
2156 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
2159 /* Don't list common capabilities as unknown */
2161 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n",
2167 ice_print_led_caps(hw, &func_p->common_cap, "func caps", true);
2168 ice_print_sdp_caps(hw, &func_p->common_cap, "func caps", true);
2170 ice_recalc_port_limited_caps(hw, &func_p->common_cap);
2174 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps
2175 * @hw: pointer to the HW struct
2176 * @dev_p: pointer to device capabilities structure
2177 * @cap: capability element to parse
2179 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities.
2182 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2183 struct ice_aqc_list_caps_elem *cap)
2185 u32 number = LE32_TO_CPU(cap->number);
2187 dev_p->num_funcs = ice_hweight32(number);
2188 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
2193 * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps
2194 * @hw: pointer to the HW struct
2195 * @dev_p: pointer to device capabilities structure
2196 * @cap: capability element to parse
2198 * Parse ICE_AQC_CAPS_VF for device capabilities.
2201 ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2202 struct ice_aqc_list_caps_elem *cap)
2204 u32 number = LE32_TO_CPU(cap->number);
2206 dev_p->num_vfs_exposed = number;
2207 ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n",
2208 dev_p->num_vfs_exposed);
2212 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps
2213 * @hw: pointer to the HW struct
2214 * @dev_p: pointer to device capabilities structure
2215 * @cap: capability element to parse
2217 * Parse ICE_AQC_CAPS_VSI for device capabilities.
2220 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2221 struct ice_aqc_list_caps_elem *cap)
2223 u32 number = LE32_TO_CPU(cap->number);
2225 dev_p->num_vsi_allocd_to_host = number;
2226 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n",
2227 dev_p->num_vsi_allocd_to_host);
2231 * ice_parse_dev_caps - Parse device capabilities
2232 * @hw: pointer to the HW struct
2233 * @dev_p: pointer to device capabilities structure
2234 * @buf: buffer containing the device capability records
2235 * @cap_count: the number of capabilities
2237 * Helper device to parse device (0x000B) capabilities list. For
2238 * capabilities shared between device and function, this relies on
2239 * ice_parse_common_caps.
2241 * Loop through the list of provided capabilities and extract the relevant
2242 * data into the device capabilities structured.
2245 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2246 void *buf, u32 cap_count)
2248 struct ice_aqc_list_caps_elem *cap_resp;
2251 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
2253 ice_memset(dev_p, 0, sizeof(*dev_p), ICE_NONDMA_MEM);
2255 for (i = 0; i < cap_count; i++) {
2256 u16 cap = LE16_TO_CPU(cap_resp[i].cap);
2259 found = ice_parse_common_caps(hw, &dev_p->common_cap,
2260 &cap_resp[i], "dev caps");
2263 case ICE_AQC_CAPS_VALID_FUNCTIONS:
2264 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
2266 case ICE_AQC_CAPS_VF:
2267 ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
2269 case ICE_AQC_CAPS_VSI:
2270 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
2273 /* Don't list common capabilities as unknown */
2275 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n",
2281 ice_print_led_caps(hw, &dev_p->common_cap, "dev caps", true);
2282 ice_print_sdp_caps(hw, &dev_p->common_cap, "dev caps", true);
2284 ice_recalc_port_limited_caps(hw, &dev_p->common_cap);
2288 * ice_aq_list_caps - query function/device capabilities
2289 * @hw: pointer to the HW struct
2290 * @buf: a buffer to hold the capabilities
2291 * @buf_size: size of the buffer
2292 * @cap_count: if not NULL, set to the number of capabilities reported
2293 * @opc: capabilities type to discover, device or function
2294 * @cd: pointer to command details structure or NULL
2296 * Get the function (0x000A) or device (0x000B) capabilities description from
2297 * firmware and store it in the buffer.
2299 * If the cap_count pointer is not NULL, then it is set to the number of
2300 * capabilities firmware will report. Note that if the buffer size is too
2301 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The
2302 * cap_count will still be updated in this case. It is recommended that the
2303 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that
2304 * firmware could return) to avoid this.
2306 static enum ice_status
2307 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
2308 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2310 struct ice_aqc_list_caps *cmd;
2311 struct ice_aq_desc desc;
2312 enum ice_status status;
2314 cmd = &desc.params.get_cap;
2316 if (opc != ice_aqc_opc_list_func_caps &&
2317 opc != ice_aqc_opc_list_dev_caps)
2318 return ICE_ERR_PARAM;
2320 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2321 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2324 *cap_count = LE32_TO_CPU(cmd->count);
2330 * ice_discover_dev_caps - Read and extract device capabilities
2331 * @hw: pointer to the hardware structure
2332 * @dev_caps: pointer to device capabilities structure
2334 * Read the device capabilities and extract them into the dev_caps structure
2337 static enum ice_status
2338 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
2340 enum ice_status status;
2344 cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
2346 return ICE_ERR_NO_MEMORY;
2348 /* Although the driver doesn't know the number of capabilities the
2349 * device will return, we can simply send a 4KB buffer, the maximum
2350 * possible size that firmware can return.
2352 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2354 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2355 ice_aqc_opc_list_dev_caps, NULL);
2357 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
2364 * ice_discover_func_caps - Read and extract function capabilities
2365 * @hw: pointer to the hardware structure
2366 * @func_caps: pointer to function capabilities structure
2368 * Read the function capabilities and extract them into the func_caps structure
2371 static enum ice_status
2372 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
2374 enum ice_status status;
2378 cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
2380 return ICE_ERR_NO_MEMORY;
2382 /* Although the driver doesn't know the number of capabilities the
2383 * device will return, we can simply send a 4KB buffer, the maximum
2384 * possible size that firmware can return.
2386 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2388 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2389 ice_aqc_opc_list_func_caps, NULL);
2391 ice_parse_func_caps(hw, func_caps, cbuf, cap_count);
2398 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
2399 * @hw: pointer to the hardware structure
2401 void ice_set_safe_mode_caps(struct ice_hw *hw)
2403 struct ice_hw_func_caps *func_caps = &hw->func_caps;
2404 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
2405 struct ice_hw_common_caps cached_caps;
2408 /* cache some func_caps values that should be restored after memset */
2409 cached_caps = func_caps->common_cap;
2411 /* unset func capabilities */
2412 memset(func_caps, 0, sizeof(*func_caps));
2414 #define ICE_RESTORE_FUNC_CAP(name) \
2415 func_caps->common_cap.name = cached_caps.name
2417 /* restore cached values */
2418 ICE_RESTORE_FUNC_CAP(valid_functions);
2419 ICE_RESTORE_FUNC_CAP(txq_first_id);
2420 ICE_RESTORE_FUNC_CAP(rxq_first_id);
2421 ICE_RESTORE_FUNC_CAP(msix_vector_first_id);
2422 ICE_RESTORE_FUNC_CAP(max_mtu);
2423 ICE_RESTORE_FUNC_CAP(nvm_unified_update);
2425 /* one Tx and one Rx queue in safe mode */
2426 func_caps->common_cap.num_rxq = 1;
2427 func_caps->common_cap.num_txq = 1;
2429 /* two MSIX vectors, one for traffic and one for misc causes */
2430 func_caps->common_cap.num_msix_vectors = 2;
2431 func_caps->guar_num_vsi = 1;
2433 /* cache some dev_caps values that should be restored after memset */
2434 cached_caps = dev_caps->common_cap;
2435 num_funcs = dev_caps->num_funcs;
2437 /* unset dev capabilities */
2438 memset(dev_caps, 0, sizeof(*dev_caps));
2440 #define ICE_RESTORE_DEV_CAP(name) \
2441 dev_caps->common_cap.name = cached_caps.name
2443 /* restore cached values */
2444 ICE_RESTORE_DEV_CAP(valid_functions);
2445 ICE_RESTORE_DEV_CAP(txq_first_id);
2446 ICE_RESTORE_DEV_CAP(rxq_first_id);
2447 ICE_RESTORE_DEV_CAP(msix_vector_first_id);
2448 ICE_RESTORE_DEV_CAP(max_mtu);
2449 ICE_RESTORE_DEV_CAP(nvm_unified_update);
2450 dev_caps->num_funcs = num_funcs;
2452 /* one Tx and one Rx queue per function in safe mode */
2453 dev_caps->common_cap.num_rxq = num_funcs;
2454 dev_caps->common_cap.num_txq = num_funcs;
2456 /* two MSIX vectors per function */
2457 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
2461 * ice_get_caps - get info about the HW
2462 * @hw: pointer to the hardware structure
2464 enum ice_status ice_get_caps(struct ice_hw *hw)
2466 enum ice_status status;
2468 status = ice_discover_dev_caps(hw, &hw->dev_caps);
2472 return ice_discover_func_caps(hw, &hw->func_caps);
2476 * ice_aq_manage_mac_write - manage MAC address write command
2477 * @hw: pointer to the HW struct
2478 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
2479 * @flags: flags to control write behavior
2480 * @cd: pointer to command details structure or NULL
2482 * This function is used to write MAC address to the NVM (0x0108).
2485 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
2486 struct ice_sq_cd *cd)
2488 struct ice_aqc_manage_mac_write *cmd;
2489 struct ice_aq_desc desc;
2491 cmd = &desc.params.mac_write;
2492 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
2495 ice_memcpy(cmd->mac_addr, mac_addr, ETH_ALEN, ICE_NONDMA_TO_NONDMA);
2497 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2501 * ice_aq_clear_pxe_mode
2502 * @hw: pointer to the HW struct
2504 * Tell the firmware that the driver is taking over from PXE (0x0110).
2506 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
2508 struct ice_aq_desc desc;
2510 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
2511 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
2513 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2517 * ice_clear_pxe_mode - clear pxe operations mode
2518 * @hw: pointer to the HW struct
2520 * Make sure all PXE mode settings are cleared, including things
2521 * like descriptor fetch/write-back mode.
2523 void ice_clear_pxe_mode(struct ice_hw *hw)
2525 if (ice_check_sq_alive(hw, &hw->adminq))
2526 ice_aq_clear_pxe_mode(hw);
2530 * ice_aq_set_port_params - set physical port parameters.
2531 * @pi: pointer to the port info struct
2532 * @bad_frame_vsi: defines the VSI to which bad frames are forwarded
2533 * @save_bad_pac: if set packets with errors are forwarded to the bad frames VSI
2534 * @pad_short_pac: if set transmit packets smaller than 60 bytes are padded
2535 * @double_vlan: if set double VLAN is enabled
2536 * @cd: pointer to command details structure or NULL
2538 * Set Physical port parameters (0x0203)
2541 ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi,
2542 bool save_bad_pac, bool pad_short_pac, bool double_vlan,
2543 struct ice_sq_cd *cd)
2546 struct ice_aqc_set_port_params *cmd;
2547 struct ice_hw *hw = pi->hw;
2548 struct ice_aq_desc desc;
2551 cmd = &desc.params.set_port_params;
2553 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params);
2554 cmd->bad_frame_vsi = CPU_TO_LE16(bad_frame_vsi);
2556 cmd_flags |= ICE_AQC_SET_P_PARAMS_SAVE_BAD_PACKETS;
2558 cmd_flags |= ICE_AQC_SET_P_PARAMS_PAD_SHORT_PACKETS;
2560 cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA;
2561 cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
2563 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2567 * ice_get_link_speed_based_on_phy_type - returns link speed
2568 * @phy_type_low: lower part of phy_type
2569 * @phy_type_high: higher part of phy_type
2571 * This helper function will convert an entry in PHY type structure
2572 * [phy_type_low, phy_type_high] to its corresponding link speed.
2573 * Note: In the structure of [phy_type_low, phy_type_high], there should
2574 * be one bit set, as this function will convert one PHY type to its
2576 * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2577 * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2580 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
2582 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2583 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2585 switch (phy_type_low) {
2586 case ICE_PHY_TYPE_LOW_100BASE_TX:
2587 case ICE_PHY_TYPE_LOW_100M_SGMII:
2588 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
2590 case ICE_PHY_TYPE_LOW_1000BASE_T:
2591 case ICE_PHY_TYPE_LOW_1000BASE_SX:
2592 case ICE_PHY_TYPE_LOW_1000BASE_LX:
2593 case ICE_PHY_TYPE_LOW_1000BASE_KX:
2594 case ICE_PHY_TYPE_LOW_1G_SGMII:
2595 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
2597 case ICE_PHY_TYPE_LOW_2500BASE_T:
2598 case ICE_PHY_TYPE_LOW_2500BASE_X:
2599 case ICE_PHY_TYPE_LOW_2500BASE_KX:
2600 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
2602 case ICE_PHY_TYPE_LOW_5GBASE_T:
2603 case ICE_PHY_TYPE_LOW_5GBASE_KR:
2604 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
2606 case ICE_PHY_TYPE_LOW_10GBASE_T:
2607 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
2608 case ICE_PHY_TYPE_LOW_10GBASE_SR:
2609 case ICE_PHY_TYPE_LOW_10GBASE_LR:
2610 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
2611 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
2612 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
2613 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
2615 case ICE_PHY_TYPE_LOW_25GBASE_T:
2616 case ICE_PHY_TYPE_LOW_25GBASE_CR:
2617 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
2618 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
2619 case ICE_PHY_TYPE_LOW_25GBASE_SR:
2620 case ICE_PHY_TYPE_LOW_25GBASE_LR:
2621 case ICE_PHY_TYPE_LOW_25GBASE_KR:
2622 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
2623 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
2624 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
2625 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
2626 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
2628 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
2629 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
2630 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
2631 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
2632 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
2633 case ICE_PHY_TYPE_LOW_40G_XLAUI:
2634 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
2636 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
2637 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
2638 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
2639 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
2640 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
2641 case ICE_PHY_TYPE_LOW_50G_LAUI2:
2642 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
2643 case ICE_PHY_TYPE_LOW_50G_AUI2:
2644 case ICE_PHY_TYPE_LOW_50GBASE_CP:
2645 case ICE_PHY_TYPE_LOW_50GBASE_SR:
2646 case ICE_PHY_TYPE_LOW_50GBASE_FR:
2647 case ICE_PHY_TYPE_LOW_50GBASE_LR:
2648 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
2649 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
2650 case ICE_PHY_TYPE_LOW_50G_AUI1:
2651 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
2653 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
2654 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
2655 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
2656 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
2657 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
2658 case ICE_PHY_TYPE_LOW_100G_CAUI4:
2659 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
2660 case ICE_PHY_TYPE_LOW_100G_AUI4:
2661 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
2662 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
2663 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
2664 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
2665 case ICE_PHY_TYPE_LOW_100GBASE_DR:
2666 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
2669 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2673 switch (phy_type_high) {
2674 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
2675 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
2676 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
2677 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
2678 case ICE_PHY_TYPE_HIGH_100G_AUI2:
2679 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
2682 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2686 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
2687 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2688 return ICE_AQ_LINK_SPEED_UNKNOWN;
2689 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2690 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
2691 return ICE_AQ_LINK_SPEED_UNKNOWN;
2692 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2693 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2694 return speed_phy_type_low;
2696 return speed_phy_type_high;
2700 * ice_update_phy_type
2701 * @phy_type_low: pointer to the lower part of phy_type
2702 * @phy_type_high: pointer to the higher part of phy_type
2703 * @link_speeds_bitmap: targeted link speeds bitmap
2705 * Note: For the link_speeds_bitmap structure, you can check it at
2706 * [ice_aqc_get_link_status->link_speed]. Caller can pass in
2707 * link_speeds_bitmap include multiple speeds.
2709 * Each entry in this [phy_type_low, phy_type_high] structure will
2710 * present a certain link speed. This helper function will turn on bits
2711 * in [phy_type_low, phy_type_high] structure based on the value of
2712 * link_speeds_bitmap input parameter.
2715 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
2716 u16 link_speeds_bitmap)
2723 /* We first check with low part of phy_type */
2724 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
2725 pt_low = BIT_ULL(index);
2726 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
2728 if (link_speeds_bitmap & speed)
2729 *phy_type_low |= BIT_ULL(index);
2732 /* We then check with high part of phy_type */
2733 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
2734 pt_high = BIT_ULL(index);
2735 speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
2737 if (link_speeds_bitmap & speed)
2738 *phy_type_high |= BIT_ULL(index);
2743 * ice_aq_set_phy_cfg
2744 * @hw: pointer to the HW struct
2745 * @pi: port info structure of the interested logical port
2746 * @cfg: structure with PHY configuration data to be set
2747 * @cd: pointer to command details structure or NULL
2749 * Set the various PHY configuration parameters supported on the Port.
2750 * One or more of the Set PHY config parameters may be ignored in an MFP
2751 * mode as the PF may not have the privilege to set some of the PHY Config
2752 * parameters. This status will be indicated by the command response (0x0601).
2755 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
2756 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
2758 struct ice_aq_desc desc;
2759 enum ice_status status;
2762 return ICE_ERR_PARAM;
2764 /* Ensure that only valid bits of cfg->caps can be turned on. */
2765 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
2766 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
2769 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
2772 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
2773 desc.params.set_phy.lport_num = pi->lport;
2774 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2776 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
2777 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
2778 (unsigned long long)LE64_TO_CPU(cfg->phy_type_low));
2779 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
2780 (unsigned long long)LE64_TO_CPU(cfg->phy_type_high));
2781 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps);
2782 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
2783 cfg->low_power_ctrl_an);
2784 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap);
2785 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value);
2786 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n",
2789 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
2791 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
2792 status = ICE_SUCCESS;
2795 pi->phy.curr_user_phy_cfg = *cfg;
2801 * ice_update_link_info - update status of the HW network link
2802 * @pi: port info structure of the interested logical port
2804 enum ice_status ice_update_link_info(struct ice_port_info *pi)
2806 struct ice_link_status *li;
2807 enum ice_status status;
2810 return ICE_ERR_PARAM;
2812 li = &pi->phy.link_info;
2814 status = ice_aq_get_link_info(pi, true, NULL, NULL);
2818 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
2819 struct ice_aqc_get_phy_caps_data *pcaps;
2823 pcaps = (struct ice_aqc_get_phy_caps_data *)
2824 ice_malloc(hw, sizeof(*pcaps));
2826 return ICE_ERR_NO_MEMORY;
2828 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
2831 if (status == ICE_SUCCESS)
2832 ice_memcpy(li->module_type, &pcaps->module_type,
2833 sizeof(li->module_type),
2834 ICE_NONDMA_TO_NONDMA);
2836 ice_free(hw, pcaps);
2843 * ice_cache_phy_user_req
2844 * @pi: port information structure
2845 * @cache_data: PHY logging data
2846 * @cache_mode: PHY logging mode
2848 * Log the user request on (FC, FEC, SPEED) for later user.
2851 ice_cache_phy_user_req(struct ice_port_info *pi,
2852 struct ice_phy_cache_mode_data cache_data,
2853 enum ice_phy_cache_mode cache_mode)
2858 switch (cache_mode) {
2860 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
2862 case ICE_SPEED_MODE:
2863 pi->phy.curr_user_speed_req =
2864 cache_data.data.curr_user_speed_req;
2867 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
2875 * ice_caps_to_fc_mode
2876 * @caps: PHY capabilities
2878 * Convert PHY FC capabilities to ice FC mode
2880 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
2882 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
2883 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2886 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
2887 return ICE_FC_TX_PAUSE;
2889 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2890 return ICE_FC_RX_PAUSE;
2896 * ice_caps_to_fec_mode
2897 * @caps: PHY capabilities
2898 * @fec_options: Link FEC options
2900 * Convert PHY FEC capabilities to ice FEC mode
2902 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
2904 if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
2905 return ICE_FEC_AUTO;
2907 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
2908 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
2909 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
2910 ICE_AQC_PHY_FEC_25G_KR_REQ))
2911 return ICE_FEC_BASER;
2913 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
2914 ICE_AQC_PHY_FEC_25G_RS_544_REQ |
2915 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
2918 return ICE_FEC_NONE;
2922 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode
2923 * @pi: port information structure
2924 * @cfg: PHY configuration data to set FC mode
2925 * @req_mode: FC mode to configure
2927 static enum ice_status
2928 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
2929 enum ice_fc_mode req_mode)
2931 struct ice_phy_cache_mode_data cache_data;
2932 u8 pause_mask = 0x0;
2935 return ICE_ERR_BAD_PTR;
2940 struct ice_aqc_get_phy_caps_data *pcaps;
2941 enum ice_status status;
2943 pcaps = (struct ice_aqc_get_phy_caps_data *)
2944 ice_malloc(pi->hw, sizeof(*pcaps));
2946 return ICE_ERR_NO_MEMORY;
2948 /* Query the value of FC that both the NIC and attached media
2951 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
2954 ice_free(pi->hw, pcaps);
2958 pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2959 pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2961 ice_free(pi->hw, pcaps);
2965 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2966 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2968 case ICE_FC_RX_PAUSE:
2969 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2971 case ICE_FC_TX_PAUSE:
2972 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2978 /* clear the old pause settings */
2979 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
2980 ICE_AQC_PHY_EN_RX_LINK_PAUSE);
2982 /* set the new capabilities */
2983 cfg->caps |= pause_mask;
2985 /* Cache user FC request */
2986 cache_data.data.curr_user_fc_req = req_mode;
2987 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
2994 * @pi: port information structure
2995 * @aq_failures: pointer to status code, specific to ice_set_fc routine
2996 * @ena_auto_link_update: enable automatic link update
2998 * Set the requested flow control mode.
3001 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
3003 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
3004 struct ice_aqc_get_phy_caps_data *pcaps;
3005 enum ice_status status;
3008 if (!pi || !aq_failures)
3009 return ICE_ERR_BAD_PTR;
3014 pcaps = (struct ice_aqc_get_phy_caps_data *)
3015 ice_malloc(hw, sizeof(*pcaps));
3017 return ICE_ERR_NO_MEMORY;
3019 /* Get the current PHY config */
3020 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
3023 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
3027 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
3029 /* Configure the set PHY data */
3030 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
3032 if (status != ICE_ERR_BAD_PTR)
3033 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
3038 /* If the capabilities have changed, then set the new config */
3039 if (cfg.caps != pcaps->caps) {
3040 int retry_count, retry_max = 10;
3042 /* Auto restart link so settings take effect */
3043 if (ena_auto_link_update)
3044 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3046 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
3048 *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
3052 /* Update the link info
3053 * It sometimes takes a really long time for link to
3054 * come back from the atomic reset. Thus, we wait a
3057 for (retry_count = 0; retry_count < retry_max; retry_count++) {
3058 status = ice_update_link_info(pi);
3060 if (status == ICE_SUCCESS)
3063 ice_msec_delay(100, true);
3067 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
3071 ice_free(hw, pcaps);
3076 * ice_phy_caps_equals_cfg
3077 * @phy_caps: PHY capabilities
3078 * @phy_cfg: PHY configuration
3080 * Helper function to determine if PHY capabilities matches PHY
3084 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
3085 struct ice_aqc_set_phy_cfg_data *phy_cfg)
3087 u8 caps_mask, cfg_mask;
3089 if (!phy_caps || !phy_cfg)
3092 /* These bits are not common between capabilities and configuration.
3093 * Do not use them to determine equality.
3095 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
3096 ICE_AQC_PHY_EN_MOD_QUAL);
3097 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3099 if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
3100 phy_caps->phy_type_high != phy_cfg->phy_type_high ||
3101 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
3102 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
3103 phy_caps->eee_cap != phy_cfg->eee_cap ||
3104 phy_caps->eeer_value != phy_cfg->eeer_value ||
3105 phy_caps->link_fec_options != phy_cfg->link_fec_opt)
3112 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
3113 * @pi: port information structure
3114 * @caps: PHY ability structure to copy date from
3115 * @cfg: PHY configuration structure to copy data to
3117 * Helper function to copy AQC PHY get ability data to PHY set configuration
3121 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
3122 struct ice_aqc_get_phy_caps_data *caps,
3123 struct ice_aqc_set_phy_cfg_data *cfg)
3125 if (!pi || !caps || !cfg)
3128 ice_memset(cfg, 0, sizeof(*cfg), ICE_NONDMA_MEM);
3129 cfg->phy_type_low = caps->phy_type_low;
3130 cfg->phy_type_high = caps->phy_type_high;
3131 cfg->caps = caps->caps;
3132 cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
3133 cfg->eee_cap = caps->eee_cap;
3134 cfg->eeer_value = caps->eeer_value;
3135 cfg->link_fec_opt = caps->link_fec_options;
3136 cfg->module_compliance_enforcement =
3137 caps->module_compliance_enforcement;
3139 if (ice_fw_supports_link_override(pi->hw)) {
3140 struct ice_link_default_override_tlv tlv;
3142 if (ice_get_link_default_override(&tlv, pi))
3145 if (tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE)
3146 cfg->module_compliance_enforcement |=
3147 ICE_LINK_OVERRIDE_STRICT_MODE;
3152 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
3153 * @pi: port information structure
3154 * @cfg: PHY configuration data to set FEC mode
3155 * @fec: FEC mode to configure
3158 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3159 enum ice_fec_mode fec)
3161 struct ice_aqc_get_phy_caps_data *pcaps;
3162 enum ice_status status = ICE_SUCCESS;
3166 return ICE_ERR_BAD_PTR;
3170 pcaps = (struct ice_aqc_get_phy_caps_data *)
3171 ice_malloc(hw, sizeof(*pcaps));
3173 return ICE_ERR_NO_MEMORY;
3175 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps,
3180 cfg->caps |= (pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC);
3181 cfg->link_fec_opt = pcaps->link_fec_options;
3185 /* Clear RS bits, and AND BASE-R ability
3186 * bits and OR request bits.
3188 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3189 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
3190 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3191 ICE_AQC_PHY_FEC_25G_KR_REQ;
3194 /* Clear BASE-R bits, and AND RS ability
3195 * bits and OR request bits.
3197 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
3198 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3199 ICE_AQC_PHY_FEC_25G_RS_544_REQ;
3202 /* Clear all FEC option bits. */
3203 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
3206 /* AND auto FEC bit, and all caps bits. */
3207 cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
3208 cfg->link_fec_opt |= pcaps->link_fec_options;
3211 status = ICE_ERR_PARAM;
3215 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw)) {
3216 struct ice_link_default_override_tlv tlv;
3218 if (ice_get_link_default_override(&tlv, pi))
3221 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
3222 (tlv.options & ICE_LINK_OVERRIDE_EN))
3223 cfg->link_fec_opt = tlv.fec_options;
3227 ice_free(hw, pcaps);
3233 * ice_get_link_status - get status of the HW network link
3234 * @pi: port information structure
3235 * @link_up: pointer to bool (true/false = linkup/linkdown)
3237 * Variable link_up is true if link is up, false if link is down.
3238 * The variable link_up is invalid if status is non zero. As a
3239 * result of this call, link status reporting becomes enabled
3241 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
3243 struct ice_phy_info *phy_info;
3244 enum ice_status status = ICE_SUCCESS;
3246 if (!pi || !link_up)
3247 return ICE_ERR_PARAM;
3249 phy_info = &pi->phy;
3251 if (phy_info->get_link_info) {
3252 status = ice_update_link_info(pi);
3255 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n",
3259 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
3265 * ice_aq_set_link_restart_an
3266 * @pi: pointer to the port information structure
3267 * @ena_link: if true: enable link, if false: disable link
3268 * @cd: pointer to command details structure or NULL
3270 * Sets up the link and restarts the Auto-Negotiation over the link.
3273 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
3274 struct ice_sq_cd *cd)
3276 struct ice_aqc_restart_an *cmd;
3277 struct ice_aq_desc desc;
3279 cmd = &desc.params.restart_an;
3281 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
3283 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
3284 cmd->lport_num = pi->lport;
3286 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
3288 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
3290 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
3294 * ice_aq_set_event_mask
3295 * @hw: pointer to the HW struct
3296 * @port_num: port number of the physical function
3297 * @mask: event mask to be set
3298 * @cd: pointer to command details structure or NULL
3300 * Set event mask (0x0613)
3303 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
3304 struct ice_sq_cd *cd)
3306 struct ice_aqc_set_event_mask *cmd;
3307 struct ice_aq_desc desc;
3309 cmd = &desc.params.set_event_mask;
3311 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
3313 cmd->lport_num = port_num;
3315 cmd->event_mask = CPU_TO_LE16(mask);
3316 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3320 * ice_aq_set_mac_loopback
3321 * @hw: pointer to the HW struct
3322 * @ena_lpbk: Enable or Disable loopback
3323 * @cd: pointer to command details structure or NULL
3325 * Enable/disable loopback on a given port
3328 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
3330 struct ice_aqc_set_mac_lb *cmd;
3331 struct ice_aq_desc desc;
3333 cmd = &desc.params.set_mac_lb;
3335 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
3337 cmd->lb_mode = ICE_AQ_MAC_LB_EN;
3339 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3343 * ice_aq_set_port_id_led
3344 * @pi: pointer to the port information
3345 * @is_orig_mode: is this LED set to original mode (by the net-list)
3346 * @cd: pointer to command details structure or NULL
3348 * Set LED value for the given port (0x06e9)
3351 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
3352 struct ice_sq_cd *cd)
3354 struct ice_aqc_set_port_id_led *cmd;
3355 struct ice_hw *hw = pi->hw;
3356 struct ice_aq_desc desc;
3358 cmd = &desc.params.set_port_id_led;
3360 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
3363 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
3365 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
3367 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3372 * @hw: pointer to the HW struct
3373 * @lport: bits [7:0] = logical port, bit [8] = logical port valid
3374 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
3375 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
3377 * @set_page: set or ignore the page
3378 * @data: pointer to data buffer to be read/written to the I2C device.
3379 * @length: 1-16 for read, 1 for write.
3380 * @write: 0 read, 1 for write.
3381 * @cd: pointer to command details structure or NULL
3383 * Read/Write SFF EEPROM (0x06EE)
3386 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
3387 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
3388 bool write, struct ice_sq_cd *cd)
3390 struct ice_aqc_sff_eeprom *cmd;
3391 struct ice_aq_desc desc;
3392 enum ice_status status;
3394 if (!data || (mem_addr & 0xff00))
3395 return ICE_ERR_PARAM;
3397 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
3398 cmd = &desc.params.read_write_sff_param;
3399 desc.flags = CPU_TO_LE16(ICE_AQ_FLAG_RD);
3400 cmd->lport_num = (u8)(lport & 0xff);
3401 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
3402 cmd->i2c_bus_addr = CPU_TO_LE16(((bus_addr >> 1) &
3403 ICE_AQC_SFF_I2CBUS_7BIT_M) |
3405 ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
3406 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
3407 cmd->i2c_mem_addr = CPU_TO_LE16(mem_addr & 0xff);
3408 cmd->eeprom_page = CPU_TO_LE16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
3410 cmd->i2c_bus_addr |= CPU_TO_LE16(ICE_AQC_SFF_IS_WRITE);
3412 status = ice_aq_send_cmd(hw, &desc, data, length, cd);
3417 * __ice_aq_get_set_rss_lut
3418 * @hw: pointer to the hardware structure
3419 * @params: RSS LUT parameters
3420 * @set: set true to set the table, false to get the table
3422 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
3424 static enum ice_status
3425 __ice_aq_get_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *params, bool set)
3427 u16 flags = 0, vsi_id, lut_type, lut_size, glob_lut_idx, vsi_handle;
3428 struct ice_aqc_get_set_rss_lut *cmd_resp;
3429 struct ice_aq_desc desc;
3430 enum ice_status status;
3434 return ICE_ERR_PARAM;
3436 vsi_handle = params->vsi_handle;
3439 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3440 return ICE_ERR_PARAM;
3442 lut_size = params->lut_size;
3443 lut_type = params->lut_type;
3444 glob_lut_idx = params->global_lut_id;
3445 vsi_id = ice_get_hw_vsi_num(hw, vsi_handle);
3447 cmd_resp = &desc.params.get_set_rss_lut;
3450 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
3451 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3453 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
3456 cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
3457 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
3458 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
3459 ICE_AQC_GSET_RSS_LUT_VSI_VALID);
3462 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
3463 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
3464 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
3465 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
3466 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
3469 status = ICE_ERR_PARAM;
3470 goto ice_aq_get_set_rss_lut_exit;
3473 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
3474 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
3475 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
3478 goto ice_aq_get_set_rss_lut_send;
3479 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3481 goto ice_aq_get_set_rss_lut_send;
3483 goto ice_aq_get_set_rss_lut_send;
3486 /* LUT size is only valid for Global and PF table types */
3488 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
3489 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG <<
3490 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3491 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3493 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
3494 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
3495 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3496 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3498 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
3499 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3500 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
3501 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3502 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3507 status = ICE_ERR_PARAM;
3508 goto ice_aq_get_set_rss_lut_exit;
3511 ice_aq_get_set_rss_lut_send:
3512 cmd_resp->flags = CPU_TO_LE16(flags);
3513 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
3515 ice_aq_get_set_rss_lut_exit:
3520 * ice_aq_get_rss_lut
3521 * @hw: pointer to the hardware structure
3522 * @get_params: RSS LUT parameters used to specify which RSS LUT to get
3524 * get the RSS lookup table, PF or VSI type
3527 ice_aq_get_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *get_params)
3529 return __ice_aq_get_set_rss_lut(hw, get_params, false);
3533 * ice_aq_set_rss_lut
3534 * @hw: pointer to the hardware structure
3535 * @set_params: RSS LUT parameters used to specify how to set the RSS LUT
3537 * set the RSS lookup table, PF or VSI type
3540 ice_aq_set_rss_lut(struct ice_hw *hw, struct ice_aq_get_set_rss_lut_params *set_params)
3542 return __ice_aq_get_set_rss_lut(hw, set_params, true);
3546 * __ice_aq_get_set_rss_key
3547 * @hw: pointer to the HW struct
3548 * @vsi_id: VSI FW index
3549 * @key: pointer to key info struct
3550 * @set: set true to set the key, false to get the key
3552 * get (0x0B04) or set (0x0B02) the RSS key per VSI
3555 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
3556 struct ice_aqc_get_set_rss_keys *key,
3559 struct ice_aqc_get_set_rss_key *cmd_resp;
3560 u16 key_size = sizeof(*key);
3561 struct ice_aq_desc desc;
3563 cmd_resp = &desc.params.get_set_rss_key;
3566 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
3567 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3569 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
3572 cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
3573 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
3574 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
3575 ICE_AQC_GSET_RSS_KEY_VSI_VALID);
3577 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
3581 * ice_aq_get_rss_key
3582 * @hw: pointer to the HW struct
3583 * @vsi_handle: software VSI handle
3584 * @key: pointer to key info struct
3586 * get the RSS key per VSI
3589 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
3590 struct ice_aqc_get_set_rss_keys *key)
3592 if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
3593 return ICE_ERR_PARAM;
3595 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3600 * ice_aq_set_rss_key
3601 * @hw: pointer to the HW struct
3602 * @vsi_handle: software VSI handle
3603 * @keys: pointer to key info struct
3605 * set the RSS key per VSI
3608 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
3609 struct ice_aqc_get_set_rss_keys *keys)
3611 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
3612 return ICE_ERR_PARAM;
3614 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3619 * ice_aq_add_lan_txq
3620 * @hw: pointer to the hardware structure
3621 * @num_qgrps: Number of added queue groups
3622 * @qg_list: list of queue groups to be added
3623 * @buf_size: size of buffer for indirect command
3624 * @cd: pointer to command details structure or NULL
3626 * Add Tx LAN queue (0x0C30)
3629 * Prior to calling add Tx LAN queue:
3630 * Initialize the following as part of the Tx queue context:
3631 * Completion queue ID if the queue uses Completion queue, Quanta profile,
3632 * Cache profile and Packet shaper profile.
3634 * After add Tx LAN queue AQ command is completed:
3635 * Interrupts should be associated with specific queues,
3636 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
3640 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3641 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
3642 struct ice_sq_cd *cd)
3644 struct ice_aqc_add_tx_qgrp *list;
3645 struct ice_aqc_add_txqs *cmd;
3646 struct ice_aq_desc desc;
3647 u16 i, sum_size = 0;
3649 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3651 cmd = &desc.params.add_txqs;
3653 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
3656 return ICE_ERR_PARAM;
3658 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3659 return ICE_ERR_PARAM;
3661 for (i = 0, list = qg_list; i < num_qgrps; i++) {
3662 sum_size += ice_struct_size(list, txqs, list->num_txqs);
3663 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs +
3667 if (buf_size != sum_size)
3668 return ICE_ERR_PARAM;
3670 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3672 cmd->num_qgrps = num_qgrps;
3674 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3678 * ice_aq_dis_lan_txq
3679 * @hw: pointer to the hardware structure
3680 * @num_qgrps: number of groups in the list
3681 * @qg_list: the list of groups to disable
3682 * @buf_size: the total size of the qg_list buffer in bytes
3683 * @rst_src: if called due to reset, specifies the reset source
3684 * @vmvf_num: the relative VM or VF number that is undergoing the reset
3685 * @cd: pointer to command details structure or NULL
3687 * Disable LAN Tx queue (0x0C31)
3689 static enum ice_status
3690 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3691 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
3692 enum ice_disq_rst_src rst_src, u16 vmvf_num,
3693 struct ice_sq_cd *cd)
3695 struct ice_aqc_dis_txq_item *item;
3696 struct ice_aqc_dis_txqs *cmd;
3697 struct ice_aq_desc desc;
3698 enum ice_status status;
3701 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3702 cmd = &desc.params.dis_txqs;
3703 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
3705 /* qg_list can be NULL only in VM/VF reset flow */
3706 if (!qg_list && !rst_src)
3707 return ICE_ERR_PARAM;
3709 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3710 return ICE_ERR_PARAM;
3712 cmd->num_entries = num_qgrps;
3714 cmd->vmvf_and_timeout = CPU_TO_LE16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
3715 ICE_AQC_Q_DIS_TIMEOUT_M);
3719 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
3720 cmd->vmvf_and_timeout |=
3721 CPU_TO_LE16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
3724 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
3725 /* In this case, FW expects vmvf_num to be absolute VF ID */
3726 cmd->vmvf_and_timeout |=
3727 CPU_TO_LE16((vmvf_num + hw->func_caps.vf_base_id) &
3728 ICE_AQC_Q_DIS_VMVF_NUM_M);
3735 /* flush pipe on time out */
3736 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
3737 /* If no queue group info, we are in a reset flow. Issue the AQ */
3741 /* set RD bit to indicate that command buffer is provided by the driver
3742 * and it needs to be read by the firmware
3744 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3746 for (i = 0, item = qg_list; i < num_qgrps; i++) {
3747 u16 item_size = ice_struct_size(item, q_id, item->num_qs);
3749 /* If the num of queues is even, add 2 bytes of padding */
3750 if ((item->num_qs % 2) == 0)
3755 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size);
3759 return ICE_ERR_PARAM;
3762 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3765 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
3766 vmvf_num, hw->adminq.sq_last_status);
3768 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
3769 LE16_TO_CPU(qg_list[0].q_id[0]),
3770 hw->adminq.sq_last_status);
3776 * ice_aq_move_recfg_lan_txq
3777 * @hw: pointer to the hardware structure
3778 * @num_qs: number of queues to move/reconfigure
3779 * @is_move: true if this operation involves node movement
3780 * @is_tc_change: true if this operation involves a TC change
3781 * @subseq_call: true if this operation is a subsequent call
3782 * @flush_pipe: on timeout, true to flush pipe, false to return EAGAIN
3783 * @timeout: timeout in units of 100 usec (valid values 0-50)
3784 * @blocked_cgds: out param, bitmap of CGDs that timed out if returning EAGAIN
3785 * @buf: struct containing src/dest TEID and per-queue info
3786 * @buf_size: size of buffer for indirect command
3787 * @txqs_moved: out param, number of queues successfully moved
3788 * @cd: pointer to command details structure or NULL
3790 * Move / Reconfigure Tx LAN queues (0x0C32)
3793 ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move,
3794 bool is_tc_change, bool subseq_call, bool flush_pipe,
3795 u8 timeout, u32 *blocked_cgds,
3796 struct ice_aqc_move_txqs_data *buf, u16 buf_size,
3797 u8 *txqs_moved, struct ice_sq_cd *cd)
3799 struct ice_aqc_move_txqs *cmd;
3800 struct ice_aq_desc desc;
3801 enum ice_status status;
3803 cmd = &desc.params.move_txqs;
3804 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_move_recfg_txqs);
3806 #define ICE_LAN_TXQ_MOVE_TIMEOUT_MAX 50
3807 if (timeout > ICE_LAN_TXQ_MOVE_TIMEOUT_MAX)
3808 return ICE_ERR_PARAM;
3810 if (is_tc_change && !flush_pipe && !blocked_cgds)
3811 return ICE_ERR_PARAM;
3813 if (!is_move && !is_tc_change)
3814 return ICE_ERR_PARAM;
3816 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3819 cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_MOVE;
3822 cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_TC_CHANGE;
3825 cmd->cmd_type |= ICE_AQC_Q_CMD_SUBSEQ_CALL;
3828 cmd->cmd_type |= ICE_AQC_Q_CMD_FLUSH_PIPE;
3830 cmd->num_qs = num_qs;
3831 cmd->timeout = ((timeout << ICE_AQC_Q_CMD_TIMEOUT_S) &
3832 ICE_AQC_Q_CMD_TIMEOUT_M);
3834 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3836 if (!status && txqs_moved)
3837 *txqs_moved = cmd->num_qs;
3839 if (hw->adminq.sq_last_status == ICE_AQ_RC_EAGAIN &&
3840 is_tc_change && !flush_pipe)
3841 *blocked_cgds = LE32_TO_CPU(cmd->blocked_cgds);
3846 /* End of FW Admin Queue command wrappers */
3849 * ice_write_byte - write a byte to a packed context structure
3850 * @src_ctx: the context structure to read from
3851 * @dest_ctx: the context to be written to
3852 * @ce_info: a description of the struct to be filled
3855 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3857 u8 src_byte, dest_byte, mask;
3861 /* copy from the next struct field */
3862 from = src_ctx + ce_info->offset;
3864 /* prepare the bits and mask */
3865 shift_width = ce_info->lsb % 8;
3866 mask = (u8)(BIT(ce_info->width) - 1);
3871 /* shift to correct alignment */
3872 mask <<= shift_width;
3873 src_byte <<= shift_width;
3875 /* get the current bits from the target bit string */
3876 dest = dest_ctx + (ce_info->lsb / 8);
3878 ice_memcpy(&dest_byte, dest, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
3880 dest_byte &= ~mask; /* get the bits not changing */
3881 dest_byte |= src_byte; /* add in the new bits */
3883 /* put it all back */
3884 ice_memcpy(dest, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
3888 * ice_write_word - write a word to a packed context structure
3889 * @src_ctx: the context structure to read from
3890 * @dest_ctx: the context to be written to
3891 * @ce_info: a description of the struct to be filled
3894 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3901 /* copy from the next struct field */
3902 from = src_ctx + ce_info->offset;
3904 /* prepare the bits and mask */
3905 shift_width = ce_info->lsb % 8;
3906 mask = BIT(ce_info->width) - 1;
3908 /* don't swizzle the bits until after the mask because the mask bits
3909 * will be in a different bit position on big endian machines
3911 src_word = *(u16 *)from;
3914 /* shift to correct alignment */
3915 mask <<= shift_width;
3916 src_word <<= shift_width;
3918 /* get the current bits from the target bit string */
3919 dest = dest_ctx + (ce_info->lsb / 8);
3921 ice_memcpy(&dest_word, dest, sizeof(dest_word), ICE_DMA_TO_NONDMA);
3923 dest_word &= ~(CPU_TO_LE16(mask)); /* get the bits not changing */
3924 dest_word |= CPU_TO_LE16(src_word); /* add in the new bits */
3926 /* put it all back */
3927 ice_memcpy(dest, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
3931 * ice_write_dword - write a dword to a packed context structure
3932 * @src_ctx: the context structure to read from
3933 * @dest_ctx: the context to be written to
3934 * @ce_info: a description of the struct to be filled
3937 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3939 u32 src_dword, mask;
3944 /* copy from the next struct field */
3945 from = src_ctx + ce_info->offset;
3947 /* prepare the bits and mask */
3948 shift_width = ce_info->lsb % 8;
3950 /* if the field width is exactly 32 on an x86 machine, then the shift
3951 * operation will not work because the SHL instructions count is masked
3952 * to 5 bits so the shift will do nothing
3954 if (ce_info->width < 32)
3955 mask = BIT(ce_info->width) - 1;
3959 /* don't swizzle the bits until after the mask because the mask bits
3960 * will be in a different bit position on big endian machines
3962 src_dword = *(u32 *)from;
3965 /* shift to correct alignment */
3966 mask <<= shift_width;
3967 src_dword <<= shift_width;
3969 /* get the current bits from the target bit string */
3970 dest = dest_ctx + (ce_info->lsb / 8);
3972 ice_memcpy(&dest_dword, dest, sizeof(dest_dword), ICE_DMA_TO_NONDMA);
3974 dest_dword &= ~(CPU_TO_LE32(mask)); /* get the bits not changing */
3975 dest_dword |= CPU_TO_LE32(src_dword); /* add in the new bits */
3977 /* put it all back */
3978 ice_memcpy(dest, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
3982 * ice_write_qword - write a qword to a packed context structure
3983 * @src_ctx: the context structure to read from
3984 * @dest_ctx: the context to be written to
3985 * @ce_info: a description of the struct to be filled
3988 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3990 u64 src_qword, mask;
3995 /* copy from the next struct field */
3996 from = src_ctx + ce_info->offset;
3998 /* prepare the bits and mask */
3999 shift_width = ce_info->lsb % 8;
4001 /* if the field width is exactly 64 on an x86 machine, then the shift
4002 * operation will not work because the SHL instructions count is masked
4003 * to 6 bits so the shift will do nothing
4005 if (ce_info->width < 64)
4006 mask = BIT_ULL(ce_info->width) - 1;
4010 /* don't swizzle the bits until after the mask because the mask bits
4011 * will be in a different bit position on big endian machines
4013 src_qword = *(u64 *)from;
4016 /* shift to correct alignment */
4017 mask <<= shift_width;
4018 src_qword <<= shift_width;
4020 /* get the current bits from the target bit string */
4021 dest = dest_ctx + (ce_info->lsb / 8);
4023 ice_memcpy(&dest_qword, dest, sizeof(dest_qword), ICE_DMA_TO_NONDMA);
4025 dest_qword &= ~(CPU_TO_LE64(mask)); /* get the bits not changing */
4026 dest_qword |= CPU_TO_LE64(src_qword); /* add in the new bits */
4028 /* put it all back */
4029 ice_memcpy(dest, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
4033 * ice_set_ctx - set context bits in packed structure
4034 * @hw: pointer to the hardware structure
4035 * @src_ctx: pointer to a generic non-packed context structure
4036 * @dest_ctx: pointer to memory for the packed structure
4037 * @ce_info: a description of the structure to be transformed
4040 ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
4041 const struct ice_ctx_ele *ce_info)
4045 for (f = 0; ce_info[f].width; f++) {
4046 /* We have to deal with each element of the FW response
4047 * using the correct size so that we are correct regardless
4048 * of the endianness of the machine.
4050 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
4051 ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
4052 f, ce_info[f].width, ce_info[f].size_of);
4055 switch (ce_info[f].size_of) {
4057 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
4060 ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
4063 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
4066 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
4069 return ICE_ERR_INVAL_SIZE;
4077 * ice_read_byte - read context byte into struct
4078 * @src_ctx: the context structure to read from
4079 * @dest_ctx: the context to be written to
4080 * @ce_info: a description of the struct to be filled
4083 ice_read_byte(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4089 /* prepare the bits and mask */
4090 shift_width = ce_info->lsb % 8;
4091 mask = (u8)(BIT(ce_info->width) - 1);
4093 /* shift to correct alignment */
4094 mask <<= shift_width;
4096 /* get the current bits from the src bit string */
4097 src = src_ctx + (ce_info->lsb / 8);
4099 ice_memcpy(&dest_byte, src, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
4101 dest_byte &= ~(mask);
4103 dest_byte >>= shift_width;
4105 /* get the address from the struct field */
4106 target = dest_ctx + ce_info->offset;
4108 /* put it back in the struct */
4109 ice_memcpy(target, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
4113 * ice_read_word - read context word into struct
4114 * @src_ctx: the context structure to read from
4115 * @dest_ctx: the context to be written to
4116 * @ce_info: a description of the struct to be filled
4119 ice_read_word(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4121 u16 dest_word, mask;
4126 /* prepare the bits and mask */
4127 shift_width = ce_info->lsb % 8;
4128 mask = BIT(ce_info->width) - 1;
4130 /* shift to correct alignment */
4131 mask <<= shift_width;
4133 /* get the current bits from the src bit string */
4134 src = src_ctx + (ce_info->lsb / 8);
4136 ice_memcpy(&src_word, src, sizeof(src_word), ICE_DMA_TO_NONDMA);
4138 /* the data in the memory is stored as little endian so mask it
4141 src_word &= ~(CPU_TO_LE16(mask));
4143 /* get the data back into host order before shifting */
4144 dest_word = LE16_TO_CPU(src_word);
4146 dest_word >>= shift_width;
4148 /* get the address from the struct field */
4149 target = dest_ctx + ce_info->offset;
4151 /* put it back in the struct */
4152 ice_memcpy(target, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
4156 * ice_read_dword - read context dword into struct
4157 * @src_ctx: the context structure to read from
4158 * @dest_ctx: the context to be written to
4159 * @ce_info: a description of the struct to be filled
4162 ice_read_dword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4164 u32 dest_dword, mask;
4169 /* prepare the bits and mask */
4170 shift_width = ce_info->lsb % 8;
4172 /* if the field width is exactly 32 on an x86 machine, then the shift
4173 * operation will not work because the SHL instructions count is masked
4174 * to 5 bits so the shift will do nothing
4176 if (ce_info->width < 32)
4177 mask = BIT(ce_info->width) - 1;
4181 /* shift to correct alignment */
4182 mask <<= shift_width;
4184 /* get the current bits from the src bit string */
4185 src = src_ctx + (ce_info->lsb / 8);
4187 ice_memcpy(&src_dword, src, sizeof(src_dword), ICE_DMA_TO_NONDMA);
4189 /* the data in the memory is stored as little endian so mask it
4192 src_dword &= ~(CPU_TO_LE32(mask));
4194 /* get the data back into host order before shifting */
4195 dest_dword = LE32_TO_CPU(src_dword);
4197 dest_dword >>= shift_width;
4199 /* get the address from the struct field */
4200 target = dest_ctx + ce_info->offset;
4202 /* put it back in the struct */
4203 ice_memcpy(target, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
4207 * ice_read_qword - read context qword into struct
4208 * @src_ctx: the context structure to read from
4209 * @dest_ctx: the context to be written to
4210 * @ce_info: a description of the struct to be filled
4213 ice_read_qword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4215 u64 dest_qword, mask;
4220 /* prepare the bits and mask */
4221 shift_width = ce_info->lsb % 8;
4223 /* if the field width is exactly 64 on an x86 machine, then the shift
4224 * operation will not work because the SHL instructions count is masked
4225 * to 6 bits so the shift will do nothing
4227 if (ce_info->width < 64)
4228 mask = BIT_ULL(ce_info->width) - 1;
4232 /* shift to correct alignment */
4233 mask <<= shift_width;
4235 /* get the current bits from the src bit string */
4236 src = src_ctx + (ce_info->lsb / 8);
4238 ice_memcpy(&src_qword, src, sizeof(src_qword), ICE_DMA_TO_NONDMA);
4240 /* the data in the memory is stored as little endian so mask it
4243 src_qword &= ~(CPU_TO_LE64(mask));
4245 /* get the data back into host order before shifting */
4246 dest_qword = LE64_TO_CPU(src_qword);
4248 dest_qword >>= shift_width;
4250 /* get the address from the struct field */
4251 target = dest_ctx + ce_info->offset;
4253 /* put it back in the struct */
4254 ice_memcpy(target, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
4258 * ice_get_ctx - extract context bits from a packed structure
4259 * @src_ctx: pointer to a generic packed context structure
4260 * @dest_ctx: pointer to a generic non-packed context structure
4261 * @ce_info: a description of the structure to be read from
4264 ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4268 for (f = 0; ce_info[f].width; f++) {
4269 switch (ce_info[f].size_of) {
4271 ice_read_byte(src_ctx, dest_ctx, &ce_info[f]);
4274 ice_read_word(src_ctx, dest_ctx, &ce_info[f]);
4277 ice_read_dword(src_ctx, dest_ctx, &ce_info[f]);
4280 ice_read_qword(src_ctx, dest_ctx, &ce_info[f]);
4283 /* nothing to do, just keep going */
4292 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
4293 * @hw: pointer to the HW struct
4294 * @vsi_handle: software VSI handle
4296 * @q_handle: software queue handle
4299 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
4301 struct ice_vsi_ctx *vsi;
4302 struct ice_q_ctx *q_ctx;
4304 vsi = ice_get_vsi_ctx(hw, vsi_handle);
4307 if (q_handle >= vsi->num_lan_q_entries[tc])
4309 if (!vsi->lan_q_ctx[tc])
4311 q_ctx = vsi->lan_q_ctx[tc];
4312 return &q_ctx[q_handle];
4317 * @pi: port information structure
4318 * @vsi_handle: software VSI handle
4320 * @q_handle: software queue handle
4321 * @num_qgrps: Number of added queue groups
4322 * @buf: list of queue groups to be added
4323 * @buf_size: size of buffer for indirect command
4324 * @cd: pointer to command details structure or NULL
4326 * This function adds one LAN queue
4329 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
4330 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
4331 struct ice_sq_cd *cd)
4333 struct ice_aqc_txsched_elem_data node = { 0 };
4334 struct ice_sched_node *parent;
4335 struct ice_q_ctx *q_ctx;
4336 enum ice_status status;
4339 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4342 if (num_qgrps > 1 || buf->num_txqs > 1)
4343 return ICE_ERR_MAX_LIMIT;
4347 if (!ice_is_vsi_valid(hw, vsi_handle))
4348 return ICE_ERR_PARAM;
4350 ice_acquire_lock(&pi->sched_lock);
4352 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
4354 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
4356 status = ICE_ERR_PARAM;
4360 /* find a parent node */
4361 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
4362 ICE_SCHED_NODE_OWNER_LAN);
4364 status = ICE_ERR_PARAM;
4368 buf->parent_teid = parent->info.node_teid;
4369 node.parent_teid = parent->info.node_teid;
4370 /* Mark that the values in the "generic" section as valid. The default
4371 * value in the "generic" section is zero. This means that :
4372 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
4373 * - 0 priority among siblings, indicated by Bit 1-3.
4374 * - WFQ, indicated by Bit 4.
4375 * - 0 Adjustment value is used in PSM credit update flow, indicated by
4377 * - Bit 7 is reserved.
4378 * Without setting the generic section as valid in valid_sections, the
4379 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
4381 buf->txqs[0].info.valid_sections =
4382 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
4383 ICE_AQC_ELEM_VALID_EIR;
4384 buf->txqs[0].info.generic = 0;
4385 buf->txqs[0].info.cir_bw.bw_profile_idx =
4386 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
4387 buf->txqs[0].info.cir_bw.bw_alloc =
4388 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
4389 buf->txqs[0].info.eir_bw.bw_profile_idx =
4390 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
4391 buf->txqs[0].info.eir_bw.bw_alloc =
4392 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
4394 /* add the LAN queue */
4395 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
4396 if (status != ICE_SUCCESS) {
4397 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
4398 LE16_TO_CPU(buf->txqs[0].txq_id),
4399 hw->adminq.sq_last_status);
4403 node.node_teid = buf->txqs[0].q_teid;
4404 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
4405 q_ctx->q_handle = q_handle;
4406 q_ctx->q_teid = LE32_TO_CPU(node.node_teid);
4408 /* add a leaf node into scheduler tree queue layer */
4409 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
4411 status = ice_sched_replay_q_bw(pi, q_ctx);
4414 ice_release_lock(&pi->sched_lock);
4420 * @pi: port information structure
4421 * @vsi_handle: software VSI handle
4423 * @num_queues: number of queues
4424 * @q_handles: pointer to software queue handle array
4425 * @q_ids: pointer to the q_id array
4426 * @q_teids: pointer to queue node teids
4427 * @rst_src: if called due to reset, specifies the reset source
4428 * @vmvf_num: the relative VM or VF number that is undergoing the reset
4429 * @cd: pointer to command details structure or NULL
4431 * This function removes queues and their corresponding nodes in SW DB
4434 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
4435 u16 *q_handles, u16 *q_ids, u32 *q_teids,
4436 enum ice_disq_rst_src rst_src, u16 vmvf_num,
4437 struct ice_sq_cd *cd)
4439 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
4440 struct ice_aqc_dis_txq_item *qg_list;
4441 struct ice_q_ctx *q_ctx;
4445 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4451 /* if queue is disabled already yet the disable queue command
4452 * has to be sent to complete the VF reset, then call
4453 * ice_aq_dis_lan_txq without any queue information
4456 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
4461 buf_size = ice_struct_size(qg_list, q_id, 1);
4462 qg_list = (struct ice_aqc_dis_txq_item *)ice_malloc(hw, buf_size);
4464 return ICE_ERR_NO_MEMORY;
4466 ice_acquire_lock(&pi->sched_lock);
4468 for (i = 0; i < num_queues; i++) {
4469 struct ice_sched_node *node;
4471 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
4474 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]);
4476 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
4480 if (q_ctx->q_handle != q_handles[i]) {
4481 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
4482 q_ctx->q_handle, q_handles[i]);
4485 qg_list->parent_teid = node->info.parent_teid;
4486 qg_list->num_qs = 1;
4487 qg_list->q_id[0] = CPU_TO_LE16(q_ids[i]);
4488 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
4491 if (status != ICE_SUCCESS)
4493 ice_free_sched_node(pi, node);
4494 q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
4496 ice_release_lock(&pi->sched_lock);
4497 ice_free(hw, qg_list);
4502 * ice_cfg_vsi_qs - configure the new/existing VSI queues
4503 * @pi: port information structure
4504 * @vsi_handle: software VSI handle
4505 * @tc_bitmap: TC bitmap
4506 * @maxqs: max queues array per TC
4507 * @owner: LAN or RDMA
4509 * This function adds/updates the VSI queues per TC.
4511 static enum ice_status
4512 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
4513 u16 *maxqs, u8 owner)
4515 enum ice_status status = ICE_SUCCESS;
4518 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4521 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
4522 return ICE_ERR_PARAM;
4524 ice_acquire_lock(&pi->sched_lock);
4526 ice_for_each_traffic_class(i) {
4527 /* configuration is possible only if TC node is present */
4528 if (!ice_sched_get_tc_node(pi, i))
4531 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
4532 ice_is_tc_ena(tc_bitmap, i));
4537 ice_release_lock(&pi->sched_lock);
4542 * ice_cfg_vsi_lan - configure VSI LAN queues
4543 * @pi: port information structure
4544 * @vsi_handle: software VSI handle
4545 * @tc_bitmap: TC bitmap
4546 * @max_lanqs: max LAN queues array per TC
4548 * This function adds/updates the VSI LAN queues per TC.
4551 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
4554 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
4555 ICE_SCHED_NODE_OWNER_LAN);
4559 * ice_is_main_vsi - checks whether the VSI is main VSI
4560 * @hw: pointer to the HW struct
4561 * @vsi_handle: VSI handle
4563 * Checks whether the VSI is the main VSI (the first PF VSI created on
4566 static bool ice_is_main_vsi(struct ice_hw *hw, u16 vsi_handle)
4568 return vsi_handle == ICE_MAIN_VSI_HANDLE && hw->vsi_ctx[vsi_handle];
4572 * ice_replay_pre_init - replay pre initialization
4573 * @hw: pointer to the HW struct
4574 * @sw: pointer to switch info struct for which function initializes filters
4576 * Initializes required config data for VSI, FD, ACL, and RSS before replay.
4578 static enum ice_status
4579 ice_replay_pre_init(struct ice_hw *hw, struct ice_switch_info *sw)
4581 enum ice_status status;
4584 /* Delete old entries from replay filter list head if there is any */
4585 ice_rm_sw_replay_rule_info(hw, sw);
4586 /* In start of replay, move entries into replay_rules list, it
4587 * will allow adding rules entries back to filt_rules list,
4588 * which is operational list.
4590 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
4591 LIST_REPLACE_INIT(&sw->recp_list[i].filt_rules,
4592 &sw->recp_list[i].filt_replay_rules);
4593 ice_sched_replay_agg_vsi_preinit(hw);
4595 status = ice_sched_replay_root_node_bw(hw->port_info);
4599 return ice_sched_replay_tc_node_bw(hw->port_info);
4603 * ice_replay_vsi - replay VSI configuration
4604 * @hw: pointer to the HW struct
4605 * @vsi_handle: driver VSI handle
4607 * Restore all VSI configuration after reset. It is required to call this
4608 * function with main VSI first.
4610 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
4612 struct ice_switch_info *sw = hw->switch_info;
4613 struct ice_port_info *pi = hw->port_info;
4614 enum ice_status status;
4616 if (!ice_is_vsi_valid(hw, vsi_handle))
4617 return ICE_ERR_PARAM;
4619 /* Replay pre-initialization if there is any */
4620 if (ice_is_main_vsi(hw, vsi_handle)) {
4621 status = ice_replay_pre_init(hw, sw);
4625 /* Replay per VSI all RSS configurations */
4626 status = ice_replay_rss_cfg(hw, vsi_handle);
4629 /* Replay per VSI all filters */
4630 status = ice_replay_vsi_all_fltr(hw, pi, vsi_handle);
4632 status = ice_replay_vsi_agg(hw, vsi_handle);
4637 * ice_replay_post - post replay configuration cleanup
4638 * @hw: pointer to the HW struct
4640 * Post replay cleanup.
4642 void ice_replay_post(struct ice_hw *hw)
4644 /* Delete old entries from replay filter list head */
4645 ice_rm_all_sw_replay_rule_info(hw);
4646 ice_sched_replay_agg(hw);
4650 * ice_stat_update40 - read 40 bit stat from the chip and update stat values
4651 * @hw: ptr to the hardware info
4652 * @reg: offset of 64 bit HW register to read from
4653 * @prev_stat_loaded: bool to specify if previous stats are loaded
4654 * @prev_stat: ptr to previous loaded stat value
4655 * @cur_stat: ptr to current stat value
4658 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4659 u64 *prev_stat, u64 *cur_stat)
4661 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
4663 /* device stats are not reset at PFR, they likely will not be zeroed
4664 * when the driver starts. Thus, save the value from the first read
4665 * without adding to the statistic value so that we report stats which
4666 * count up from zero.
4668 if (!prev_stat_loaded) {
4669 *prev_stat = new_data;
4673 /* Calculate the difference between the new and old values, and then
4674 * add it to the software stat value.
4676 if (new_data >= *prev_stat)
4677 *cur_stat += new_data - *prev_stat;
4679 /* to manage the potential roll-over */
4680 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
4682 /* Update the previously stored value to prepare for next read */
4683 *prev_stat = new_data;
4687 * ice_stat_update32 - read 32 bit stat from the chip and update stat values
4688 * @hw: ptr to the hardware info
4689 * @reg: offset of HW register to read from
4690 * @prev_stat_loaded: bool to specify if previous stats are loaded
4691 * @prev_stat: ptr to previous loaded stat value
4692 * @cur_stat: ptr to current stat value
4695 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4696 u64 *prev_stat, u64 *cur_stat)
4700 new_data = rd32(hw, reg);
4702 /* device stats are not reset at PFR, they likely will not be zeroed
4703 * when the driver starts. Thus, save the value from the first read
4704 * without adding to the statistic value so that we report stats which
4705 * count up from zero.
4707 if (!prev_stat_loaded) {
4708 *prev_stat = new_data;
4712 /* Calculate the difference between the new and old values, and then
4713 * add it to the software stat value.
4715 if (new_data >= *prev_stat)
4716 *cur_stat += new_data - *prev_stat;
4718 /* to manage the potential roll-over */
4719 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
4721 /* Update the previously stored value to prepare for next read */
4722 *prev_stat = new_data;
4726 * ice_stat_update_repc - read GLV_REPC stats from chip and update stat values
4727 * @hw: ptr to the hardware info
4728 * @vsi_handle: VSI handle
4729 * @prev_stat_loaded: bool to specify if the previous stat values are loaded
4730 * @cur_stats: ptr to current stats structure
4732 * The GLV_REPC statistic register actually tracks two 16bit statistics, and
4733 * thus cannot be read using the normal ice_stat_update32 function.
4735 * Read the GLV_REPC register associated with the given VSI, and update the
4736 * rx_no_desc and rx_error values in the ice_eth_stats structure.
4738 * Because the statistics in GLV_REPC stick at 0xFFFF, the register must be
4739 * cleared each time it's read.
4741 * Note that the GLV_RDPC register also counts the causes that would trigger
4742 * GLV_REPC. However, it does not give the finer grained detail about why the
4743 * packets are being dropped. The GLV_REPC values can be used to distinguish
4744 * whether Rx packets are dropped due to errors or due to no available
4748 ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded,
4749 struct ice_eth_stats *cur_stats)
4751 u16 vsi_num, no_desc, error_cnt;
4754 if (!ice_is_vsi_valid(hw, vsi_handle))
4757 vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
4759 /* If we haven't loaded stats yet, just clear the current value */
4760 if (!prev_stat_loaded) {
4761 wr32(hw, GLV_REPC(vsi_num), 0);
4765 repc = rd32(hw, GLV_REPC(vsi_num));
4766 no_desc = (repc & GLV_REPC_NO_DESC_CNT_M) >> GLV_REPC_NO_DESC_CNT_S;
4767 error_cnt = (repc & GLV_REPC_ERROR_CNT_M) >> GLV_REPC_ERROR_CNT_S;
4769 /* Clear the count by writing to the stats register */
4770 wr32(hw, GLV_REPC(vsi_num), 0);
4772 cur_stats->rx_no_desc += no_desc;
4773 cur_stats->rx_errors += error_cnt;
4777 * ice_aq_alternate_write
4778 * @hw: pointer to the hardware structure
4779 * @reg_addr0: address of first dword to be written
4780 * @reg_val0: value to be written under 'reg_addr0'
4781 * @reg_addr1: address of second dword to be written
4782 * @reg_val1: value to be written under 'reg_addr1'
4784 * Write one or two dwords to alternate structure. Fields are indicated
4785 * by 'reg_addr0' and 'reg_addr1' register numbers.
4788 ice_aq_alternate_write(struct ice_hw *hw, u32 reg_addr0, u32 reg_val0,
4789 u32 reg_addr1, u32 reg_val1)
4791 struct ice_aqc_read_write_alt_direct *cmd;
4792 struct ice_aq_desc desc;
4793 enum ice_status status;
4795 cmd = &desc.params.read_write_alt_direct;
4797 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_alt_direct);
4798 cmd->dword0_addr = CPU_TO_LE32(reg_addr0);
4799 cmd->dword1_addr = CPU_TO_LE32(reg_addr1);
4800 cmd->dword0_value = CPU_TO_LE32(reg_val0);
4801 cmd->dword1_value = CPU_TO_LE32(reg_val1);
4803 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
4809 * ice_aq_alternate_read
4810 * @hw: pointer to the hardware structure
4811 * @reg_addr0: address of first dword to be read
4812 * @reg_val0: pointer for data read from 'reg_addr0'
4813 * @reg_addr1: address of second dword to be read
4814 * @reg_val1: pointer for data read from 'reg_addr1'
4816 * Read one or two dwords from alternate structure. Fields are indicated
4817 * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
4818 * is not passed then only register at 'reg_addr0' is read.
4821 ice_aq_alternate_read(struct ice_hw *hw, u32 reg_addr0, u32 *reg_val0,
4822 u32 reg_addr1, u32 *reg_val1)
4824 struct ice_aqc_read_write_alt_direct *cmd;
4825 struct ice_aq_desc desc;
4826 enum ice_status status;
4828 cmd = &desc.params.read_write_alt_direct;
4831 return ICE_ERR_PARAM;
4833 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_alt_direct);
4834 cmd->dword0_addr = CPU_TO_LE32(reg_addr0);
4835 cmd->dword1_addr = CPU_TO_LE32(reg_addr1);
4837 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
4839 if (status == ICE_SUCCESS) {
4840 *reg_val0 = LE32_TO_CPU(cmd->dword0_value);
4843 *reg_val1 = LE32_TO_CPU(cmd->dword1_value);
4850 * ice_aq_alternate_write_done
4851 * @hw: pointer to the HW structure.
4852 * @bios_mode: indicates whether the command is executed by UEFI or legacy BIOS
4853 * @reset_needed: indicates the SW should trigger GLOBAL reset
4855 * Indicates to the FW that alternate structures have been changed.
4858 ice_aq_alternate_write_done(struct ice_hw *hw, u8 bios_mode, bool *reset_needed)
4860 struct ice_aqc_done_alt_write *cmd;
4861 struct ice_aq_desc desc;
4862 enum ice_status status;
4864 cmd = &desc.params.done_alt_write;
4867 return ICE_ERR_PARAM;
4869 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_done_alt_write);
4870 cmd->flags = bios_mode;
4872 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
4874 *reset_needed = (LE16_TO_CPU(cmd->flags) &
4875 ICE_AQC_RESP_RESET_NEEDED) != 0;
4881 * ice_aq_alternate_clear
4882 * @hw: pointer to the HW structure.
4884 * Clear the alternate structures of the port from which the function
4887 enum ice_status ice_aq_alternate_clear(struct ice_hw *hw)
4889 struct ice_aq_desc desc;
4890 enum ice_status status;
4892 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_port_alt_write);
4894 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
4900 * ice_sched_query_elem - query element information from HW
4901 * @hw: pointer to the HW struct
4902 * @node_teid: node TEID to be queried
4903 * @buf: buffer to element information
4905 * This function queries HW element information
4908 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
4909 struct ice_aqc_txsched_elem_data *buf)
4911 u16 buf_size, num_elem_ret = 0;
4912 enum ice_status status;
4914 buf_size = sizeof(*buf);
4915 ice_memset(buf, 0, buf_size, ICE_NONDMA_MEM);
4916 buf->node_teid = CPU_TO_LE32(node_teid);
4917 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
4919 if (status != ICE_SUCCESS || num_elem_ret != 1)
4920 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
4925 * ice_get_fw_mode - returns FW mode
4926 * @hw: pointer to the HW struct
4928 enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw)
4930 #define ICE_FW_MODE_DBG_M BIT(0)
4931 #define ICE_FW_MODE_REC_M BIT(1)
4932 #define ICE_FW_MODE_ROLLBACK_M BIT(2)
4935 /* check the current FW mode */
4936 fw_mode = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_MODES_M;
4938 if (fw_mode & ICE_FW_MODE_DBG_M)
4939 return ICE_FW_MODE_DBG;
4940 else if (fw_mode & ICE_FW_MODE_REC_M)
4941 return ICE_FW_MODE_REC;
4942 else if (fw_mode & ICE_FW_MODE_ROLLBACK_M)
4943 return ICE_FW_MODE_ROLLBACK;
4945 return ICE_FW_MODE_NORMAL;
4949 * ice_cfg_get_cur_lldp_persist_status
4950 * @hw: pointer to the HW struct
4951 * @lldp_status: return value of LLDP persistent status
4953 * Get the current status of LLDP persistent
4956 ice_get_cur_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status)
4958 struct ice_port_info *pi = hw->port_info;
4959 enum ice_status ret;
4964 return ICE_ERR_BAD_PTR;
4966 ret = ice_acquire_nvm(hw, ICE_RES_READ);
4970 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LLDP_PRESERVED_MOD_ID,
4971 ICE_AQC_NVM_CUR_LLDP_PERSIST_RD_OFFSET,
4972 ICE_AQC_NVM_LLDP_STATUS_RD_LEN, &raw_data,
4975 data = LE32_TO_CPU(raw_data);
4976 mask = ICE_AQC_NVM_LLDP_STATUS_M <<
4977 (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
4979 *lldp_status = data >>
4980 (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
4983 ice_release_nvm(hw);
4989 * ice_get_dflt_lldp_persist_status
4990 * @hw: pointer to the HW struct
4991 * @lldp_status: return value of LLDP persistent status
4993 * Get the default status of LLDP persistent
4996 ice_get_dflt_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status)
4998 struct ice_port_info *pi = hw->port_info;
4999 u32 data, mask, loc_data, loc_data_tmp;
5000 enum ice_status ret;
5001 __le16 loc_raw_data;
5005 return ICE_ERR_BAD_PTR;
5007 ret = ice_acquire_nvm(hw, ICE_RES_READ);
5011 /* Read the offset of EMP_SR_PTR */
5012 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT,
5013 ICE_AQC_NVM_EMP_SR_PTR_OFFSET,
5014 ICE_AQC_NVM_EMP_SR_PTR_RD_LEN,
5015 &loc_raw_data, false, true, NULL);
5019 loc_data = LE16_TO_CPU(loc_raw_data);
5020 if (loc_data & ICE_AQC_NVM_EMP_SR_PTR_TYPE_M) {
5021 loc_data &= ICE_AQC_NVM_EMP_SR_PTR_M;
5022 loc_data *= ICE_AQC_NVM_SECTOR_UNIT;
5024 loc_data *= ICE_AQC_NVM_WORD_UNIT;
5027 /* Read the offset of LLDP configuration pointer */
5028 loc_data += ICE_AQC_NVM_LLDP_CFG_PTR_OFFSET;
5029 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, loc_data,
5030 ICE_AQC_NVM_LLDP_CFG_PTR_RD_LEN, &loc_raw_data,
5035 loc_data_tmp = LE16_TO_CPU(loc_raw_data);
5036 loc_data_tmp *= ICE_AQC_NVM_WORD_UNIT;
5037 loc_data += loc_data_tmp;
5039 /* We need to skip LLDP configuration section length (2 bytes) */
5040 loc_data += ICE_AQC_NVM_LLDP_CFG_HEADER_LEN;
5042 /* Read the LLDP Default Configure */
5043 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, loc_data,
5044 ICE_AQC_NVM_LLDP_STATUS_RD_LEN, &raw_data, false,
5047 data = LE32_TO_CPU(raw_data);
5048 mask = ICE_AQC_NVM_LLDP_STATUS_M <<
5049 (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
5051 *lldp_status = data >>
5052 (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
5056 ice_release_nvm(hw);
5062 * ice_fw_supports_link_override
5063 * @hw: pointer to the hardware structure
5065 * Checks if the firmware supports link override
5067 bool ice_fw_supports_link_override(struct ice_hw *hw)
5069 if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
5070 if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
5072 if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
5073 hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
5075 } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
5083 * ice_get_link_default_override
5084 * @ldo: pointer to the link default override struct
5085 * @pi: pointer to the port info struct
5087 * Gets the link default override for a port
5090 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
5091 struct ice_port_info *pi)
5093 u16 i, tlv, tlv_len, tlv_start, buf, offset;
5094 struct ice_hw *hw = pi->hw;
5095 enum ice_status status;
5097 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
5098 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
5100 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n");
5104 /* Each port has its own config; calculate for our port */
5105 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
5106 ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
5108 /* link options first */
5109 status = ice_read_sr_word(hw, tlv_start, &buf);
5111 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
5114 ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
5115 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
5116 ICE_LINK_OVERRIDE_PHY_CFG_S;
5118 /* link PHY config */
5119 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
5120 status = ice_read_sr_word(hw, offset, &buf);
5122 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n");
5125 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
5128 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
5129 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
5130 status = ice_read_sr_word(hw, (offset + i), &buf);
5132 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
5135 /* shift 16 bits at a time to fill 64 bits */
5136 ldo->phy_type_low |= ((u64)buf << (i * 16));
5139 /* PHY types high */
5140 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
5141 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
5142 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
5143 status = ice_read_sr_word(hw, (offset + i), &buf);
5145 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
5148 /* shift 16 bits at a time to fill 64 bits */
5149 ldo->phy_type_high |= ((u64)buf << (i * 16));
5156 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled
5157 * @caps: get PHY capability data
5159 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
5161 if (caps->caps & ICE_AQC_PHY_AN_MODE ||
5162 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
5163 ICE_AQC_PHY_AN_EN_CLAUSE73 |
5164 ICE_AQC_PHY_AN_EN_CLAUSE37))
5171 * ice_aq_set_lldp_mib - Set the LLDP MIB
5172 * @hw: pointer to the HW struct
5173 * @mib_type: Local, Remote or both Local and Remote MIBs
5174 * @buf: pointer to the caller-supplied buffer to store the MIB block
5175 * @buf_size: size of the buffer (in bytes)
5176 * @cd: pointer to command details structure or NULL
5178 * Set the LLDP MIB. (0x0A08)
5181 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
5182 struct ice_sq_cd *cd)
5184 struct ice_aqc_lldp_set_local_mib *cmd;
5185 struct ice_aq_desc desc;
5187 cmd = &desc.params.lldp_set_mib;
5189 if (buf_size == 0 || !buf)
5190 return ICE_ERR_PARAM;
5192 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
5194 desc.flags |= CPU_TO_LE16((u16)ICE_AQ_FLAG_RD);
5195 desc.datalen = CPU_TO_LE16(buf_size);
5197 cmd->type = mib_type;
5198 cmd->length = CPU_TO_LE16(buf_size);
5200 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
5204 * ice_fw_supports_lldp_fltr - check NVM version supports lldp_fltr_ctrl
5205 * @hw: pointer to HW struct
5207 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
5209 if (hw->mac_type != ICE_MAC_E810)
5212 if (hw->api_maj_ver == ICE_FW_API_LLDP_FLTR_MAJ) {
5213 if (hw->api_min_ver > ICE_FW_API_LLDP_FLTR_MIN)
5215 if (hw->api_min_ver == ICE_FW_API_LLDP_FLTR_MIN &&
5216 hw->api_patch >= ICE_FW_API_LLDP_FLTR_PATCH)
5218 } else if (hw->api_maj_ver > ICE_FW_API_LLDP_FLTR_MAJ) {
5225 * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter
5226 * @hw: pointer to HW struct
5227 * @vsi_num: absolute HW index for VSI
5228 * @add: boolean for if adding or removing a filter
5231 ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
5233 struct ice_aqc_lldp_filter_ctrl *cmd;
5234 struct ice_aq_desc desc;
5236 cmd = &desc.params.lldp_filter_ctrl;
5238 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl);
5241 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD;
5243 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE;
5245 cmd->vsi_num = CPU_TO_LE16(vsi_num);
5247 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);