1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /* Copyright (c) 2020, Intel Corporation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the Intel Corporation nor the names of its
16 * contributors may be used to endorse or promote products derived from
17 * this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
33 #include "ice_common.h"
34 #include "ice_sched.h"
35 #include "ice_adminq_cmd.h"
38 #include "ice_switch.h"
40 #define ICE_PF_RESET_WAIT_COUNT 300
43 * ice_set_mac_type - Sets MAC type
44 * @hw: pointer to the HW structure
46 * This function sets the MAC type of the adapter based on the
47 * vendor ID and device ID stored in the HW structure.
49 enum ice_status ice_set_mac_type(struct ice_hw *hw)
51 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
53 if (hw->vendor_id != ICE_INTEL_VENDOR_ID)
54 return ICE_ERR_DEVICE_NOT_SUPPORTED;
56 switch (hw->device_id) {
57 case ICE_DEV_ID_E810C_BACKPLANE:
58 case ICE_DEV_ID_E810C_QSFP:
59 case ICE_DEV_ID_E810C_SFP:
60 case ICE_DEV_ID_E810_XXV_BACKPLANE:
61 case ICE_DEV_ID_E810_XXV_QSFP:
62 case ICE_DEV_ID_E810_XXV_SFP:
63 hw->mac_type = ICE_MAC_E810;
65 case ICE_DEV_ID_E822C_10G_BASE_T:
66 case ICE_DEV_ID_E822C_BACKPLANE:
67 case ICE_DEV_ID_E822C_QSFP:
68 case ICE_DEV_ID_E822C_SFP:
69 case ICE_DEV_ID_E822C_SGMII:
70 case ICE_DEV_ID_E822L_10G_BASE_T:
71 case ICE_DEV_ID_E822L_BACKPLANE:
72 case ICE_DEV_ID_E822L_SFP:
73 case ICE_DEV_ID_E822L_SGMII:
74 case ICE_DEV_ID_E823L_10G_BASE_T:
75 case ICE_DEV_ID_E823L_1GBE:
76 case ICE_DEV_ID_E823L_BACKPLANE:
77 case ICE_DEV_ID_E823L_QSFP:
78 case ICE_DEV_ID_E823L_SFP:
79 hw->mac_type = ICE_MAC_GENERIC;
82 hw->mac_type = ICE_MAC_UNKNOWN;
86 ice_debug(hw, ICE_DBG_INIT, "mac_type: %d\n", hw->mac_type);
91 * ice_clear_pf_cfg - Clear PF configuration
92 * @hw: pointer to the hardware structure
94 * Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
95 * configuration, flow director filters, etc.).
97 enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
99 struct ice_aq_desc desc;
101 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pf_cfg);
103 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
107 * ice_aq_manage_mac_read - manage MAC address read command
108 * @hw: pointer to the HW struct
109 * @buf: a virtual buffer to hold the manage MAC read response
110 * @buf_size: Size of the virtual buffer
111 * @cd: pointer to command details structure or NULL
113 * This function is used to return per PF station MAC address (0x0107).
114 * NOTE: Upon successful completion of this command, MAC address information
115 * is returned in user specified buffer. Please interpret user specified
116 * buffer as "manage_mac_read" response.
117 * Response such as various MAC addresses are stored in HW struct (port.mac)
118 * ice_discover_dev_caps is expected to be called before this function is
122 ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size,
123 struct ice_sq_cd *cd)
125 struct ice_aqc_manage_mac_read_resp *resp;
126 struct ice_aqc_manage_mac_read *cmd;
127 struct ice_aq_desc desc;
128 enum ice_status status;
132 cmd = &desc.params.mac_read;
134 if (buf_size < sizeof(*resp))
135 return ICE_ERR_BUF_TOO_SHORT;
137 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_read);
139 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
143 resp = (struct ice_aqc_manage_mac_read_resp *)buf;
144 flags = LE16_TO_CPU(cmd->flags) & ICE_AQC_MAN_MAC_READ_M;
146 if (!(flags & ICE_AQC_MAN_MAC_LAN_ADDR_VALID)) {
147 ice_debug(hw, ICE_DBG_LAN, "got invalid MAC address\n");
151 /* A single port can report up to two (LAN and WoL) addresses */
152 for (i = 0; i < cmd->num_addr; i++)
153 if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) {
154 ice_memcpy(hw->port_info->mac.lan_addr,
155 resp[i].mac_addr, ETH_ALEN,
157 ice_memcpy(hw->port_info->mac.perm_addr,
159 ETH_ALEN, ICE_DMA_TO_NONDMA);
166 * ice_aq_get_phy_caps - returns PHY capabilities
167 * @pi: port information structure
168 * @qual_mods: report qualified modules
169 * @report_mode: report mode capabilities
170 * @pcaps: structure for PHY capabilities to be filled
171 * @cd: pointer to command details structure or NULL
173 * Returns the various PHY capabilities supported on the Port (0x0600)
176 ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
177 struct ice_aqc_get_phy_caps_data *pcaps,
178 struct ice_sq_cd *cd)
180 struct ice_aqc_get_phy_caps *cmd;
181 u16 pcaps_size = sizeof(*pcaps);
182 struct ice_aq_desc desc;
183 enum ice_status status;
186 cmd = &desc.params.get_phy;
188 if (!pcaps || (report_mode & ~ICE_AQC_REPORT_MODE_M) || !pi)
189 return ICE_ERR_PARAM;
192 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
195 cmd->param0 |= CPU_TO_LE16(ICE_AQC_GET_PHY_RQM);
197 cmd->param0 |= CPU_TO_LE16(report_mode);
198 status = ice_aq_send_cmd(hw, &desc, pcaps, pcaps_size, cd);
200 ice_debug(hw, ICE_DBG_LINK, "get phy caps - report_mode = 0x%x\n",
202 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
203 (unsigned long long)LE64_TO_CPU(pcaps->phy_type_low));
204 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
205 (unsigned long long)LE64_TO_CPU(pcaps->phy_type_high));
206 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", pcaps->caps);
207 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
208 pcaps->low_power_ctrl_an);
209 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", pcaps->eee_cap);
210 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n",
212 ice_debug(hw, ICE_DBG_LINK, " link_fec_options = 0x%x\n",
213 pcaps->link_fec_options);
214 ice_debug(hw, ICE_DBG_LINK, " module_compliance_enforcement = 0x%x\n",
215 pcaps->module_compliance_enforcement);
216 ice_debug(hw, ICE_DBG_LINK, " extended_compliance_code = 0x%x\n",
217 pcaps->extended_compliance_code);
218 ice_debug(hw, ICE_DBG_LINK, " module_type[0] = 0x%x\n",
219 pcaps->module_type[0]);
220 ice_debug(hw, ICE_DBG_LINK, " module_type[1] = 0x%x\n",
221 pcaps->module_type[1]);
222 ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n",
223 pcaps->module_type[2]);
225 if (status == ICE_SUCCESS && report_mode == ICE_AQC_REPORT_TOPO_CAP) {
226 pi->phy.phy_type_low = LE64_TO_CPU(pcaps->phy_type_low);
227 pi->phy.phy_type_high = LE64_TO_CPU(pcaps->phy_type_high);
228 ice_memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
229 sizeof(pi->phy.link_info.module_type),
230 ICE_NONDMA_TO_NONDMA);
237 * ice_aq_get_link_topo_handle - get link topology node return status
238 * @pi: port information structure
239 * @node_type: requested node type
240 * @cd: pointer to command details structure or NULL
242 * Get link topology node return status for specified node type (0x06E0)
244 * Node type cage can be used to determine if cage is present. If AQC
245 * returns error (ENOENT), then no cage present. If no cage present, then
246 * connection type is backplane or BASE-T.
248 static enum ice_status
249 ice_aq_get_link_topo_handle(struct ice_port_info *pi, u8 node_type,
250 struct ice_sq_cd *cd)
252 struct ice_aqc_get_link_topo *cmd;
253 struct ice_aq_desc desc;
255 cmd = &desc.params.get_link_topo;
257 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_topo);
259 cmd->addr.node_type_ctx = (ICE_AQC_LINK_TOPO_NODE_CTX_PORT <<
260 ICE_AQC_LINK_TOPO_NODE_CTX_S);
263 cmd->addr.node_type_ctx |= (ICE_AQC_LINK_TOPO_NODE_TYPE_M & node_type);
265 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
269 * ice_is_media_cage_present
270 * @pi: port information structure
272 * Returns true if media cage is present, else false. If no cage, then
273 * media type is backplane or BASE-T.
275 static bool ice_is_media_cage_present(struct ice_port_info *pi)
277 /* Node type cage can be used to determine if cage is present. If AQC
278 * returns error (ENOENT), then no cage present. If no cage present then
279 * connection type is backplane or BASE-T.
281 return !ice_aq_get_link_topo_handle(pi,
282 ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE,
287 * ice_get_media_type - Gets media type
288 * @pi: port information structure
290 static enum ice_media_type ice_get_media_type(struct ice_port_info *pi)
292 struct ice_link_status *hw_link_info;
295 return ICE_MEDIA_UNKNOWN;
297 hw_link_info = &pi->phy.link_info;
298 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
299 /* If more than one media type is selected, report unknown */
300 return ICE_MEDIA_UNKNOWN;
302 if (hw_link_info->phy_type_low) {
303 /* 1G SGMII is a special case where some DA cable PHYs
304 * may show this as an option when it really shouldn't
305 * be since SGMII is meant to be between a MAC and a PHY
306 * in a backplane. Try to detect this case and handle it
308 if (hw_link_info->phy_type_low == ICE_PHY_TYPE_LOW_1G_SGMII &&
309 (hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
310 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
311 hw_link_info->module_type[ICE_AQC_MOD_TYPE_IDENT] ==
312 ICE_AQC_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
315 switch (hw_link_info->phy_type_low) {
316 case ICE_PHY_TYPE_LOW_1000BASE_SX:
317 case ICE_PHY_TYPE_LOW_1000BASE_LX:
318 case ICE_PHY_TYPE_LOW_10GBASE_SR:
319 case ICE_PHY_TYPE_LOW_10GBASE_LR:
320 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
321 case ICE_PHY_TYPE_LOW_25GBASE_SR:
322 case ICE_PHY_TYPE_LOW_25GBASE_LR:
323 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
324 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
325 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
326 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
327 case ICE_PHY_TYPE_LOW_50GBASE_SR:
328 case ICE_PHY_TYPE_LOW_50GBASE_FR:
329 case ICE_PHY_TYPE_LOW_50GBASE_LR:
330 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
331 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
332 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
333 case ICE_PHY_TYPE_LOW_100GBASE_DR:
334 return ICE_MEDIA_FIBER;
335 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
336 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
337 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
338 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
339 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
340 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
341 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
342 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
343 return ICE_MEDIA_FIBER;
344 case ICE_PHY_TYPE_LOW_100BASE_TX:
345 case ICE_PHY_TYPE_LOW_1000BASE_T:
346 case ICE_PHY_TYPE_LOW_2500BASE_T:
347 case ICE_PHY_TYPE_LOW_5GBASE_T:
348 case ICE_PHY_TYPE_LOW_10GBASE_T:
349 case ICE_PHY_TYPE_LOW_25GBASE_T:
350 return ICE_MEDIA_BASET;
351 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
352 case ICE_PHY_TYPE_LOW_25GBASE_CR:
353 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
354 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
355 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
356 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
357 case ICE_PHY_TYPE_LOW_50GBASE_CP:
358 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
359 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
360 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
362 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
363 case ICE_PHY_TYPE_LOW_40G_XLAUI:
364 case ICE_PHY_TYPE_LOW_50G_LAUI2:
365 case ICE_PHY_TYPE_LOW_50G_AUI2:
366 case ICE_PHY_TYPE_LOW_50G_AUI1:
367 case ICE_PHY_TYPE_LOW_100G_AUI4:
368 case ICE_PHY_TYPE_LOW_100G_CAUI4:
369 if (ice_is_media_cage_present(pi))
370 return ICE_MEDIA_AUI;
372 case ICE_PHY_TYPE_LOW_1000BASE_KX:
373 case ICE_PHY_TYPE_LOW_2500BASE_KX:
374 case ICE_PHY_TYPE_LOW_2500BASE_X:
375 case ICE_PHY_TYPE_LOW_5GBASE_KR:
376 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
377 case ICE_PHY_TYPE_LOW_25GBASE_KR:
378 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
379 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
380 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
381 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
382 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
383 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
384 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
385 return ICE_MEDIA_BACKPLANE;
388 switch (hw_link_info->phy_type_high) {
389 case ICE_PHY_TYPE_HIGH_100G_AUI2:
390 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
391 if (ice_is_media_cage_present(pi))
392 return ICE_MEDIA_AUI;
394 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
395 return ICE_MEDIA_BACKPLANE;
396 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
397 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
398 return ICE_MEDIA_FIBER;
401 return ICE_MEDIA_UNKNOWN;
405 * ice_aq_get_link_info
406 * @pi: port information structure
407 * @ena_lse: enable/disable LinkStatusEvent reporting
408 * @link: pointer to link status structure - optional
409 * @cd: pointer to command details structure or NULL
411 * Get Link Status (0x607). Returns the link status of the adapter.
414 ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
415 struct ice_link_status *link, struct ice_sq_cd *cd)
417 struct ice_aqc_get_link_status_data link_data = { 0 };
418 struct ice_aqc_get_link_status *resp;
419 struct ice_link_status *li_old, *li;
420 enum ice_media_type *hw_media_type;
421 struct ice_fc_info *hw_fc_info;
422 bool tx_pause, rx_pause;
423 struct ice_aq_desc desc;
424 enum ice_status status;
429 return ICE_ERR_PARAM;
432 li_old = &pi->phy.link_info_old;
433 hw_media_type = &pi->phy.media_type;
434 li = &pi->phy.link_info;
435 hw_fc_info = &pi->fc;
437 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
438 cmd_flags = (ena_lse) ? ICE_AQ_LSE_ENA : ICE_AQ_LSE_DIS;
439 resp = &desc.params.get_link_status;
440 resp->cmd_flags = CPU_TO_LE16(cmd_flags);
441 resp->lport_num = pi->lport;
443 status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
445 if (status != ICE_SUCCESS)
448 /* save off old link status information */
451 /* update current link status information */
452 li->link_speed = LE16_TO_CPU(link_data.link_speed);
453 li->phy_type_low = LE64_TO_CPU(link_data.phy_type_low);
454 li->phy_type_high = LE64_TO_CPU(link_data.phy_type_high);
455 *hw_media_type = ice_get_media_type(pi);
456 li->link_info = link_data.link_info;
457 li->an_info = link_data.an_info;
458 li->ext_info = link_data.ext_info;
459 li->max_frame_size = LE16_TO_CPU(link_data.max_frame_size);
460 li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
461 li->topo_media_conflict = link_data.topo_media_conflict;
462 li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
463 ICE_AQ_CFG_PACING_TYPE_M);
466 tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
467 rx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_RX);
468 if (tx_pause && rx_pause)
469 hw_fc_info->current_mode = ICE_FC_FULL;
471 hw_fc_info->current_mode = ICE_FC_TX_PAUSE;
473 hw_fc_info->current_mode = ICE_FC_RX_PAUSE;
475 hw_fc_info->current_mode = ICE_FC_NONE;
477 li->lse_ena = !!(resp->cmd_flags & CPU_TO_LE16(ICE_AQ_LSE_IS_ENABLED));
479 ice_debug(hw, ICE_DBG_LINK, "get link info\n");
480 ice_debug(hw, ICE_DBG_LINK, " link_speed = 0x%x\n", li->link_speed);
481 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
482 (unsigned long long)li->phy_type_low);
483 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
484 (unsigned long long)li->phy_type_high);
485 ice_debug(hw, ICE_DBG_LINK, " media_type = 0x%x\n", *hw_media_type);
486 ice_debug(hw, ICE_DBG_LINK, " link_info = 0x%x\n", li->link_info);
487 ice_debug(hw, ICE_DBG_LINK, " an_info = 0x%x\n", li->an_info);
488 ice_debug(hw, ICE_DBG_LINK, " ext_info = 0x%x\n", li->ext_info);
489 ice_debug(hw, ICE_DBG_LINK, " fec_info = 0x%x\n", li->fec_info);
490 ice_debug(hw, ICE_DBG_LINK, " lse_ena = 0x%x\n", li->lse_ena);
491 ice_debug(hw, ICE_DBG_LINK, " max_frame = 0x%x\n",
493 ice_debug(hw, ICE_DBG_LINK, " pacing = 0x%x\n", li->pacing);
495 /* save link status information */
499 /* flag cleared so calling functions don't call AQ again */
500 pi->phy.get_link_info = false;
506 * ice_fill_tx_timer_and_fc_thresh
507 * @hw: pointer to the HW struct
508 * @cmd: pointer to MAC cfg structure
510 * Add Tx timer and FC refresh threshold info to Set MAC Config AQ command
514 ice_fill_tx_timer_and_fc_thresh(struct ice_hw *hw,
515 struct ice_aqc_set_mac_cfg *cmd)
517 u16 fc_thres_val, tx_timer_val;
520 /* We read back the transmit timer and fc threshold value of
521 * LFC. Thus, we will use index =
522 * PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX.
524 * Also, because we are opearating on transmit timer and fc
525 * threshold of LFC, we don't turn on any bit in tx_tmr_priority
527 #define IDX_OF_LFC PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX
529 /* Retrieve the transmit timer */
530 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(IDX_OF_LFC));
532 PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_M;
533 cmd->tx_tmr_value = CPU_TO_LE16(tx_timer_val);
535 /* Retrieve the fc threshold */
536 val = rd32(hw, PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(IDX_OF_LFC));
537 fc_thres_val = val & PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_M;
539 cmd->fc_refresh_threshold = CPU_TO_LE16(fc_thres_val);
544 * @hw: pointer to the HW struct
545 * @max_frame_size: Maximum Frame Size to be supported
546 * @cd: pointer to command details structure or NULL
548 * Set MAC configuration (0x0603)
551 ice_aq_set_mac_cfg(struct ice_hw *hw, u16 max_frame_size, struct ice_sq_cd *cd)
553 struct ice_aqc_set_mac_cfg *cmd;
554 struct ice_aq_desc desc;
556 cmd = &desc.params.set_mac_cfg;
558 if (max_frame_size == 0)
559 return ICE_ERR_PARAM;
561 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_cfg);
563 cmd->max_frame_size = CPU_TO_LE16(max_frame_size);
565 ice_fill_tx_timer_and_fc_thresh(hw, cmd);
567 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
571 * ice_init_fltr_mgmt_struct - initializes filter management list and locks
572 * @hw: pointer to the HW struct
574 static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
576 struct ice_switch_info *sw;
577 enum ice_status status;
579 hw->switch_info = (struct ice_switch_info *)
580 ice_malloc(hw, sizeof(*hw->switch_info));
582 sw = hw->switch_info;
585 return ICE_ERR_NO_MEMORY;
587 INIT_LIST_HEAD(&sw->vsi_list_map_head);
588 sw->prof_res_bm_init = 0;
590 status = ice_init_def_sw_recp(hw, &hw->switch_info->recp_list);
592 ice_free(hw, hw->switch_info);
599 * ice_cleanup_fltr_mgmt_single - clears single filter mngt struct
600 * @hw: pointer to the HW struct
601 * @sw: pointer to switch info struct for which function clears filters
604 ice_cleanup_fltr_mgmt_single(struct ice_hw *hw, struct ice_switch_info *sw)
606 struct ice_vsi_list_map_info *v_pos_map;
607 struct ice_vsi_list_map_info *v_tmp_map;
608 struct ice_sw_recipe *recps;
614 LIST_FOR_EACH_ENTRY_SAFE(v_pos_map, v_tmp_map, &sw->vsi_list_map_head,
615 ice_vsi_list_map_info, list_entry) {
616 LIST_DEL(&v_pos_map->list_entry);
617 ice_free(hw, v_pos_map);
619 recps = sw->recp_list;
620 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) {
621 struct ice_recp_grp_entry *rg_entry, *tmprg_entry;
623 recps[i].root_rid = i;
624 LIST_FOR_EACH_ENTRY_SAFE(rg_entry, tmprg_entry,
625 &recps[i].rg_list, ice_recp_grp_entry,
627 LIST_DEL(&rg_entry->l_entry);
628 ice_free(hw, rg_entry);
631 if (recps[i].adv_rule) {
632 struct ice_adv_fltr_mgmt_list_entry *tmp_entry;
633 struct ice_adv_fltr_mgmt_list_entry *lst_itr;
635 ice_destroy_lock(&recps[i].filt_rule_lock);
636 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
637 &recps[i].filt_rules,
638 ice_adv_fltr_mgmt_list_entry,
640 LIST_DEL(&lst_itr->list_entry);
641 ice_free(hw, lst_itr->lkups);
642 ice_free(hw, lst_itr);
645 struct ice_fltr_mgmt_list_entry *lst_itr, *tmp_entry;
647 ice_destroy_lock(&recps[i].filt_rule_lock);
648 LIST_FOR_EACH_ENTRY_SAFE(lst_itr, tmp_entry,
649 &recps[i].filt_rules,
650 ice_fltr_mgmt_list_entry,
652 LIST_DEL(&lst_itr->list_entry);
653 ice_free(hw, lst_itr);
656 if (recps[i].root_buf)
657 ice_free(hw, recps[i].root_buf);
659 ice_rm_sw_replay_rule_info(hw, sw);
660 ice_free(hw, sw->recp_list);
665 * ice_cleanup_all_fltr_mgmt - cleanup filter management list and locks
666 * @hw: pointer to the HW struct
668 static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw)
670 ice_cleanup_fltr_mgmt_single(hw, hw->switch_info);
674 * ice_get_itr_intrl_gran
675 * @hw: pointer to the HW struct
677 * Determines the ITR/INTRL granularities based on the maximum aggregate
678 * bandwidth according to the device's configuration during power-on.
680 static void ice_get_itr_intrl_gran(struct ice_hw *hw)
682 u8 max_agg_bw = (rd32(hw, GL_PWR_MODE_CTL) &
683 GL_PWR_MODE_CTL_CAR_MAX_BW_M) >>
684 GL_PWR_MODE_CTL_CAR_MAX_BW_S;
686 switch (max_agg_bw) {
687 case ICE_MAX_AGG_BW_200G:
688 case ICE_MAX_AGG_BW_100G:
689 case ICE_MAX_AGG_BW_50G:
690 hw->itr_gran = ICE_ITR_GRAN_ABOVE_25;
691 hw->intrl_gran = ICE_INTRL_GRAN_ABOVE_25;
693 case ICE_MAX_AGG_BW_25G:
694 hw->itr_gran = ICE_ITR_GRAN_MAX_25;
695 hw->intrl_gran = ICE_INTRL_GRAN_MAX_25;
701 * ice_print_rollback_msg - print FW rollback message
702 * @hw: pointer to the hardware structure
704 void ice_print_rollback_msg(struct ice_hw *hw)
706 char nvm_str[ICE_NVM_VER_LEN] = { 0 };
707 struct ice_nvm_info *nvm = &hw->nvm;
708 struct ice_orom_info *orom;
712 SNPRINTF(nvm_str, sizeof(nvm_str), "%x.%02x 0x%x %d.%d.%d",
713 nvm->major_ver, nvm->minor_ver, nvm->eetrack, orom->major,
714 orom->build, orom->patch);
716 "Firmware rollback mode detected. Current version is NVM: %s, FW: %d.%d. Device may exhibit limited functionality. Refer to the Intel(R) Ethernet Adapters and Devices User Guide for details on firmware rollback mode\n",
717 nvm_str, hw->fw_maj_ver, hw->fw_min_ver);
721 * ice_init_hw - main hardware initialization routine
722 * @hw: pointer to the hardware structure
724 enum ice_status ice_init_hw(struct ice_hw *hw)
726 struct ice_aqc_get_phy_caps_data *pcaps;
727 enum ice_status status;
731 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
733 /* Set MAC type based on DeviceID */
734 status = ice_set_mac_type(hw);
738 hw->pf_id = (u8)(rd32(hw, PF_FUNC_RID) &
739 PF_FUNC_RID_FUNCTION_NUMBER_M) >>
740 PF_FUNC_RID_FUNCTION_NUMBER_S;
742 status = ice_reset(hw, ICE_RESET_PFR);
745 ice_get_itr_intrl_gran(hw);
747 status = ice_create_all_ctrlq(hw);
749 goto err_unroll_cqinit;
751 status = ice_init_nvm(hw);
753 goto err_unroll_cqinit;
755 if (ice_get_fw_mode(hw) == ICE_FW_MODE_ROLLBACK)
756 ice_print_rollback_msg(hw);
758 status = ice_clear_pf_cfg(hw);
760 goto err_unroll_cqinit;
762 ice_clear_pxe_mode(hw);
764 status = ice_get_caps(hw);
766 goto err_unroll_cqinit;
768 hw->port_info = (struct ice_port_info *)
769 ice_malloc(hw, sizeof(*hw->port_info));
770 if (!hw->port_info) {
771 status = ICE_ERR_NO_MEMORY;
772 goto err_unroll_cqinit;
775 /* set the back pointer to HW */
776 hw->port_info->hw = hw;
778 /* Initialize port_info struct with switch configuration data */
779 status = ice_get_initial_sw_cfg(hw);
781 goto err_unroll_alloc;
784 /* Query the allocated resources for Tx scheduler */
785 status = ice_sched_query_res_alloc(hw);
787 ice_debug(hw, ICE_DBG_SCHED, "Failed to get scheduler allocated resources\n");
788 goto err_unroll_alloc;
790 ice_sched_get_psm_clk_freq(hw);
792 /* Initialize port_info struct with scheduler data */
793 status = ice_sched_init_port(hw->port_info);
795 goto err_unroll_sched;
796 pcaps = (struct ice_aqc_get_phy_caps_data *)
797 ice_malloc(hw, sizeof(*pcaps));
799 status = ICE_ERR_NO_MEMORY;
800 goto err_unroll_sched;
803 /* Initialize port_info struct with PHY capabilities */
804 status = ice_aq_get_phy_caps(hw->port_info, false,
805 ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL);
808 ice_debug(hw, ICE_DBG_PHY, "%s: Get PHY capabilities failed, continuing anyway\n",
811 /* Initialize port_info struct with link information */
812 status = ice_aq_get_link_info(hw->port_info, false, NULL, NULL);
814 goto err_unroll_sched;
815 /* need a valid SW entry point to build a Tx tree */
816 if (!hw->sw_entry_point_layer) {
817 ice_debug(hw, ICE_DBG_SCHED, "invalid sw entry point\n");
818 status = ICE_ERR_CFG;
819 goto err_unroll_sched;
821 INIT_LIST_HEAD(&hw->agg_list);
822 /* Initialize max burst size */
823 if (!hw->max_burst_size)
824 ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
825 status = ice_init_fltr_mgmt_struct(hw);
827 goto err_unroll_sched;
829 /* Get MAC information */
830 /* A single port can report up to two (LAN and WoL) addresses */
831 mac_buf = ice_calloc(hw, 2,
832 sizeof(struct ice_aqc_manage_mac_read_resp));
833 mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp);
836 status = ICE_ERR_NO_MEMORY;
837 goto err_unroll_fltr_mgmt_struct;
840 status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
841 ice_free(hw, mac_buf);
844 goto err_unroll_fltr_mgmt_struct;
845 /* enable jumbo frame support at MAC level */
846 status = ice_aq_set_mac_cfg(hw, ICE_AQ_SET_MAC_FRAME_SIZE_MAX, NULL);
848 goto err_unroll_fltr_mgmt_struct;
849 status = ice_init_hw_tbls(hw);
851 goto err_unroll_fltr_mgmt_struct;
852 ice_init_lock(&hw->tnl_lock);
855 err_unroll_fltr_mgmt_struct:
856 ice_cleanup_fltr_mgmt_struct(hw);
858 ice_sched_cleanup_all(hw);
860 ice_free(hw, hw->port_info);
861 hw->port_info = NULL;
863 ice_destroy_all_ctrlq(hw);
868 * ice_deinit_hw - unroll initialization operations done by ice_init_hw
869 * @hw: pointer to the hardware structure
871 * This should be called only during nominal operation, not as a result of
872 * ice_init_hw() failing since ice_init_hw() will take care of unrolling
873 * applicable initializations if it fails for any reason.
875 void ice_deinit_hw(struct ice_hw *hw)
877 ice_cleanup_fltr_mgmt_struct(hw);
879 ice_sched_cleanup_all(hw);
880 ice_sched_clear_agg(hw);
882 ice_free_hw_tbls(hw);
883 ice_destroy_lock(&hw->tnl_lock);
886 ice_free(hw, hw->port_info);
887 hw->port_info = NULL;
890 ice_destroy_all_ctrlq(hw);
892 /* Clear VSI contexts if not already cleared */
893 ice_clear_all_vsi_ctx(hw);
897 * ice_check_reset - Check to see if a global reset is complete
898 * @hw: pointer to the hardware structure
900 enum ice_status ice_check_reset(struct ice_hw *hw)
902 u32 cnt, reg = 0, grst_timeout, uld_mask;
904 /* Poll for Device Active state in case a recent CORER, GLOBR,
905 * or EMPR has occurred. The grst delay value is in 100ms units.
906 * Add 1sec for outstanding AQ commands that can take a long time.
908 grst_timeout = ((rd32(hw, GLGEN_RSTCTL) & GLGEN_RSTCTL_GRSTDEL_M) >>
909 GLGEN_RSTCTL_GRSTDEL_S) + 10;
911 for (cnt = 0; cnt < grst_timeout; cnt++) {
912 ice_msec_delay(100, true);
913 reg = rd32(hw, GLGEN_RSTAT);
914 if (!(reg & GLGEN_RSTAT_DEVSTATE_M))
918 if (cnt == grst_timeout) {
919 ice_debug(hw, ICE_DBG_INIT, "Global reset polling failed to complete.\n");
920 return ICE_ERR_RESET_FAILED;
923 #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
924 GLNVM_ULD_PCIER_DONE_1_M |\
925 GLNVM_ULD_CORER_DONE_M |\
926 GLNVM_ULD_GLOBR_DONE_M |\
927 GLNVM_ULD_POR_DONE_M |\
928 GLNVM_ULD_POR_DONE_1_M |\
929 GLNVM_ULD_PCIER_DONE_2_M)
931 uld_mask = ICE_RESET_DONE_MASK;
933 /* Device is Active; check Global Reset processes are done */
934 for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
935 reg = rd32(hw, GLNVM_ULD) & uld_mask;
936 if (reg == uld_mask) {
937 ice_debug(hw, ICE_DBG_INIT, "Global reset processes done. %d\n", cnt);
940 ice_msec_delay(10, true);
943 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
944 ice_debug(hw, ICE_DBG_INIT, "Wait for Reset Done timed out. GLNVM_ULD = 0x%x\n",
946 return ICE_ERR_RESET_FAILED;
953 * ice_pf_reset - Reset the PF
954 * @hw: pointer to the hardware structure
956 * If a global reset has been triggered, this function checks
957 * for its completion and then issues the PF reset
959 static enum ice_status ice_pf_reset(struct ice_hw *hw)
963 /* If at function entry a global reset was already in progress, i.e.
964 * state is not 'device active' or any of the reset done bits are not
965 * set in GLNVM_ULD, there is no need for a PF Reset; poll until the
966 * global reset is done.
968 if ((rd32(hw, GLGEN_RSTAT) & GLGEN_RSTAT_DEVSTATE_M) ||
969 (rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK) ^ ICE_RESET_DONE_MASK) {
970 /* poll on global reset currently in progress until done */
971 if (ice_check_reset(hw))
972 return ICE_ERR_RESET_FAILED;
978 reg = rd32(hw, PFGEN_CTRL);
980 wr32(hw, PFGEN_CTRL, (reg | PFGEN_CTRL_PFSWR_M));
982 /* Wait for the PFR to complete. The wait time is the global config lock
983 * timeout plus the PFR timeout which will account for a possible reset
984 * that is occurring during a download package operation.
986 for (cnt = 0; cnt < ICE_GLOBAL_CFG_LOCK_TIMEOUT +
987 ICE_PF_RESET_WAIT_COUNT; cnt++) {
988 reg = rd32(hw, PFGEN_CTRL);
989 if (!(reg & PFGEN_CTRL_PFSWR_M))
992 ice_msec_delay(1, true);
995 if (cnt == ICE_PF_RESET_WAIT_COUNT) {
996 ice_debug(hw, ICE_DBG_INIT, "PF reset polling failed to complete.\n");
997 return ICE_ERR_RESET_FAILED;
1004 * ice_reset - Perform different types of reset
1005 * @hw: pointer to the hardware structure
1006 * @req: reset request
1008 * This function triggers a reset as specified by the req parameter.
1011 * If anything other than a PF reset is triggered, PXE mode is restored.
1012 * This has to be cleared using ice_clear_pxe_mode again, once the AQ
1013 * interface has been restored in the rebuild flow.
1015 enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
1021 return ice_pf_reset(hw);
1022 case ICE_RESET_CORER:
1023 ice_debug(hw, ICE_DBG_INIT, "CoreR requested\n");
1024 val = GLGEN_RTRIG_CORER_M;
1026 case ICE_RESET_GLOBR:
1027 ice_debug(hw, ICE_DBG_INIT, "GlobalR requested\n");
1028 val = GLGEN_RTRIG_GLOBR_M;
1031 return ICE_ERR_PARAM;
1034 val |= rd32(hw, GLGEN_RTRIG);
1035 wr32(hw, GLGEN_RTRIG, val);
1038 /* wait for the FW to be ready */
1039 return ice_check_reset(hw);
1043 * ice_copy_rxq_ctx_to_hw
1044 * @hw: pointer to the hardware structure
1045 * @ice_rxq_ctx: pointer to the rxq context
1046 * @rxq_index: the index of the Rx queue
1048 * Copies rxq context from dense structure to HW register space
1050 static enum ice_status
1051 ice_copy_rxq_ctx_to_hw(struct ice_hw *hw, u8 *ice_rxq_ctx, u32 rxq_index)
1056 return ICE_ERR_BAD_PTR;
1058 if (rxq_index > QRX_CTRL_MAX_INDEX)
1059 return ICE_ERR_PARAM;
1061 /* Copy each dword separately to HW */
1062 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++) {
1063 wr32(hw, QRX_CONTEXT(i, rxq_index),
1064 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1066 ice_debug(hw, ICE_DBG_QCTX, "qrxdata[%d]: %08X\n", i,
1067 *((u32 *)(ice_rxq_ctx + (i * sizeof(u32)))));
1073 /* LAN Rx Queue Context */
1074 static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
1075 /* Field Width LSB */
1076 ICE_CTX_STORE(ice_rlan_ctx, head, 13, 0),
1077 ICE_CTX_STORE(ice_rlan_ctx, cpuid, 8, 13),
1078 ICE_CTX_STORE(ice_rlan_ctx, base, 57, 32),
1079 ICE_CTX_STORE(ice_rlan_ctx, qlen, 13, 89),
1080 ICE_CTX_STORE(ice_rlan_ctx, dbuf, 7, 102),
1081 ICE_CTX_STORE(ice_rlan_ctx, hbuf, 5, 109),
1082 ICE_CTX_STORE(ice_rlan_ctx, dtype, 2, 114),
1083 ICE_CTX_STORE(ice_rlan_ctx, dsize, 1, 116),
1084 ICE_CTX_STORE(ice_rlan_ctx, crcstrip, 1, 117),
1085 ICE_CTX_STORE(ice_rlan_ctx, l2tsel, 1, 119),
1086 ICE_CTX_STORE(ice_rlan_ctx, hsplit_0, 4, 120),
1087 ICE_CTX_STORE(ice_rlan_ctx, hsplit_1, 2, 124),
1088 ICE_CTX_STORE(ice_rlan_ctx, showiv, 1, 127),
1089 ICE_CTX_STORE(ice_rlan_ctx, rxmax, 14, 174),
1090 ICE_CTX_STORE(ice_rlan_ctx, tphrdesc_ena, 1, 193),
1091 ICE_CTX_STORE(ice_rlan_ctx, tphwdesc_ena, 1, 194),
1092 ICE_CTX_STORE(ice_rlan_ctx, tphdata_ena, 1, 195),
1093 ICE_CTX_STORE(ice_rlan_ctx, tphhead_ena, 1, 196),
1094 ICE_CTX_STORE(ice_rlan_ctx, lrxqthresh, 3, 198),
1095 ICE_CTX_STORE(ice_rlan_ctx, prefena, 1, 201),
1101 * @hw: pointer to the hardware structure
1102 * @rlan_ctx: pointer to the rxq context
1103 * @rxq_index: the index of the Rx queue
1105 * Converts rxq context from sparse to dense structure and then writes
1106 * it to HW register space and enables the hardware to prefetch descriptors
1107 * instead of only fetching them on demand
1110 ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
1113 u8 ctx_buf[ICE_RXQ_CTX_SZ] = { 0 };
1116 return ICE_ERR_BAD_PTR;
1118 rlan_ctx->prefena = 1;
1120 ice_set_ctx(hw, (u8 *)rlan_ctx, ctx_buf, ice_rlan_ctx_info);
1121 return ice_copy_rxq_ctx_to_hw(hw, ctx_buf, rxq_index);
1126 * @hw: pointer to the hardware structure
1127 * @rxq_index: the index of the Rx queue to clear
1129 * Clears rxq context in HW register space
1131 enum ice_status ice_clear_rxq_ctx(struct ice_hw *hw, u32 rxq_index)
1135 if (rxq_index > QRX_CTRL_MAX_INDEX)
1136 return ICE_ERR_PARAM;
1138 /* Clear each dword register separately */
1139 for (i = 0; i < ICE_RXQ_CTX_SIZE_DWORDS; i++)
1140 wr32(hw, QRX_CONTEXT(i, rxq_index), 0);
1145 /* LAN Tx Queue Context */
1146 const struct ice_ctx_ele ice_tlan_ctx_info[] = {
1147 /* Field Width LSB */
1148 ICE_CTX_STORE(ice_tlan_ctx, base, 57, 0),
1149 ICE_CTX_STORE(ice_tlan_ctx, port_num, 3, 57),
1150 ICE_CTX_STORE(ice_tlan_ctx, cgd_num, 5, 60),
1151 ICE_CTX_STORE(ice_tlan_ctx, pf_num, 3, 65),
1152 ICE_CTX_STORE(ice_tlan_ctx, vmvf_num, 10, 68),
1153 ICE_CTX_STORE(ice_tlan_ctx, vmvf_type, 2, 78),
1154 ICE_CTX_STORE(ice_tlan_ctx, src_vsi, 10, 80),
1155 ICE_CTX_STORE(ice_tlan_ctx, tsyn_ena, 1, 90),
1156 ICE_CTX_STORE(ice_tlan_ctx, internal_usage_flag, 1, 91),
1157 ICE_CTX_STORE(ice_tlan_ctx, alt_vlan, 1, 92),
1158 ICE_CTX_STORE(ice_tlan_ctx, cpuid, 8, 93),
1159 ICE_CTX_STORE(ice_tlan_ctx, wb_mode, 1, 101),
1160 ICE_CTX_STORE(ice_tlan_ctx, tphrd_desc, 1, 102),
1161 ICE_CTX_STORE(ice_tlan_ctx, tphrd, 1, 103),
1162 ICE_CTX_STORE(ice_tlan_ctx, tphwr_desc, 1, 104),
1163 ICE_CTX_STORE(ice_tlan_ctx, cmpq_id, 9, 105),
1164 ICE_CTX_STORE(ice_tlan_ctx, qnum_in_func, 14, 114),
1165 ICE_CTX_STORE(ice_tlan_ctx, itr_notification_mode, 1, 128),
1166 ICE_CTX_STORE(ice_tlan_ctx, adjust_prof_id, 6, 129),
1167 ICE_CTX_STORE(ice_tlan_ctx, qlen, 13, 135),
1168 ICE_CTX_STORE(ice_tlan_ctx, quanta_prof_idx, 4, 148),
1169 ICE_CTX_STORE(ice_tlan_ctx, tso_ena, 1, 152),
1170 ICE_CTX_STORE(ice_tlan_ctx, tso_qnum, 11, 153),
1171 ICE_CTX_STORE(ice_tlan_ctx, legacy_int, 1, 164),
1172 ICE_CTX_STORE(ice_tlan_ctx, drop_ena, 1, 165),
1173 ICE_CTX_STORE(ice_tlan_ctx, cache_prof_idx, 2, 166),
1174 ICE_CTX_STORE(ice_tlan_ctx, pkt_shaper_prof_idx, 3, 168),
1175 ICE_CTX_STORE(ice_tlan_ctx, int_q_state, 122, 171),
1180 * ice_copy_tx_cmpltnq_ctx_to_hw
1181 * @hw: pointer to the hardware structure
1182 * @ice_tx_cmpltnq_ctx: pointer to the Tx completion queue context
1183 * @tx_cmpltnq_index: the index of the completion queue
1185 * Copies Tx completion queue context from dense structure to HW register space
1187 static enum ice_status
1188 ice_copy_tx_cmpltnq_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_cmpltnq_ctx,
1189 u32 tx_cmpltnq_index)
1193 if (!ice_tx_cmpltnq_ctx)
1194 return ICE_ERR_BAD_PTR;
1196 if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1197 return ICE_ERR_PARAM;
1199 /* Copy each dword separately to HW */
1200 for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++) {
1201 wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index),
1202 *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1204 ice_debug(hw, ICE_DBG_QCTX, "cmpltnqdata[%d]: %08X\n", i,
1205 *((u32 *)(ice_tx_cmpltnq_ctx + (i * sizeof(u32)))));
1211 /* LAN Tx Completion Queue Context */
1212 static const struct ice_ctx_ele ice_tx_cmpltnq_ctx_info[] = {
1213 /* Field Width LSB */
1214 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, base, 57, 0),
1215 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, q_len, 18, 64),
1216 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, generation, 1, 96),
1217 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, wrt_ptr, 22, 97),
1218 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, pf_num, 3, 128),
1219 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_num, 10, 131),
1220 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, vmvf_type, 2, 141),
1221 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, tph_desc_wr, 1, 160),
1222 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cpuid, 8, 161),
1223 ICE_CTX_STORE(ice_tx_cmpltnq_ctx, cmpltn_cache, 512, 192),
1228 * ice_write_tx_cmpltnq_ctx
1229 * @hw: pointer to the hardware structure
1230 * @tx_cmpltnq_ctx: pointer to the completion queue context
1231 * @tx_cmpltnq_index: the index of the completion queue
1233 * Converts completion queue context from sparse to dense structure and then
1234 * writes it to HW register space
1237 ice_write_tx_cmpltnq_ctx(struct ice_hw *hw,
1238 struct ice_tx_cmpltnq_ctx *tx_cmpltnq_ctx,
1239 u32 tx_cmpltnq_index)
1241 u8 ctx_buf[ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1243 ice_set_ctx(hw, (u8 *)tx_cmpltnq_ctx, ctx_buf, ice_tx_cmpltnq_ctx_info);
1244 return ice_copy_tx_cmpltnq_ctx_to_hw(hw, ctx_buf, tx_cmpltnq_index);
1248 * ice_clear_tx_cmpltnq_ctx
1249 * @hw: pointer to the hardware structure
1250 * @tx_cmpltnq_index: the index of the completion queue to clear
1252 * Clears Tx completion queue context in HW register space
1255 ice_clear_tx_cmpltnq_ctx(struct ice_hw *hw, u32 tx_cmpltnq_index)
1259 if (tx_cmpltnq_index > GLTCLAN_CQ_CNTX0_MAX_INDEX)
1260 return ICE_ERR_PARAM;
1262 /* Clear each dword register separately */
1263 for (i = 0; i < ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS; i++)
1264 wr32(hw, GLTCLAN_CQ_CNTX(i, tx_cmpltnq_index), 0);
1270 * ice_copy_tx_drbell_q_ctx_to_hw
1271 * @hw: pointer to the hardware structure
1272 * @ice_tx_drbell_q_ctx: pointer to the doorbell queue context
1273 * @tx_drbell_q_index: the index of the doorbell queue
1275 * Copies doorbell queue context from dense structure to HW register space
1277 static enum ice_status
1278 ice_copy_tx_drbell_q_ctx_to_hw(struct ice_hw *hw, u8 *ice_tx_drbell_q_ctx,
1279 u32 tx_drbell_q_index)
1283 if (!ice_tx_drbell_q_ctx)
1284 return ICE_ERR_BAD_PTR;
1286 if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1287 return ICE_ERR_PARAM;
1289 /* Copy each dword separately to HW */
1290 for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++) {
1291 wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index),
1292 *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1294 ice_debug(hw, ICE_DBG_QCTX, "tx_drbell_qdata[%d]: %08X\n", i,
1295 *((u32 *)(ice_tx_drbell_q_ctx + (i * sizeof(u32)))));
1301 /* LAN Tx Doorbell Queue Context info */
1302 static const struct ice_ctx_ele ice_tx_drbell_q_ctx_info[] = {
1303 /* Field Width LSB */
1304 ICE_CTX_STORE(ice_tx_drbell_q_ctx, base, 57, 0),
1305 ICE_CTX_STORE(ice_tx_drbell_q_ctx, ring_len, 13, 64),
1306 ICE_CTX_STORE(ice_tx_drbell_q_ctx, pf_num, 3, 80),
1307 ICE_CTX_STORE(ice_tx_drbell_q_ctx, vf_num, 8, 84),
1308 ICE_CTX_STORE(ice_tx_drbell_q_ctx, vmvf_type, 2, 94),
1309 ICE_CTX_STORE(ice_tx_drbell_q_ctx, cpuid, 8, 96),
1310 ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_rd, 1, 104),
1311 ICE_CTX_STORE(ice_tx_drbell_q_ctx, tph_desc_wr, 1, 108),
1312 ICE_CTX_STORE(ice_tx_drbell_q_ctx, db_q_en, 1, 112),
1313 ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_head, 13, 128),
1314 ICE_CTX_STORE(ice_tx_drbell_q_ctx, rd_tail, 13, 144),
1319 * ice_write_tx_drbell_q_ctx
1320 * @hw: pointer to the hardware structure
1321 * @tx_drbell_q_ctx: pointer to the doorbell queue context
1322 * @tx_drbell_q_index: the index of the doorbell queue
1324 * Converts doorbell queue context from sparse to dense structure and then
1325 * writes it to HW register space
1328 ice_write_tx_drbell_q_ctx(struct ice_hw *hw,
1329 struct ice_tx_drbell_q_ctx *tx_drbell_q_ctx,
1330 u32 tx_drbell_q_index)
1332 u8 ctx_buf[ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS * sizeof(u32)] = { 0 };
1334 ice_set_ctx(hw, (u8 *)tx_drbell_q_ctx, ctx_buf,
1335 ice_tx_drbell_q_ctx_info);
1336 return ice_copy_tx_drbell_q_ctx_to_hw(hw, ctx_buf, tx_drbell_q_index);
1340 * ice_clear_tx_drbell_q_ctx
1341 * @hw: pointer to the hardware structure
1342 * @tx_drbell_q_index: the index of the doorbell queue to clear
1344 * Clears doorbell queue context in HW register space
1347 ice_clear_tx_drbell_q_ctx(struct ice_hw *hw, u32 tx_drbell_q_index)
1351 if (tx_drbell_q_index > QTX_COMM_DBLQ_DBELL_MAX_INDEX)
1352 return ICE_ERR_PARAM;
1354 /* Clear each dword register separately */
1355 for (i = 0; i < ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS; i++)
1356 wr32(hw, QTX_COMM_DBLQ_CNTX(i, tx_drbell_q_index), 0);
1361 /* FW Admin Queue command wrappers */
1364 * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
1365 * @hw: pointer to the HW struct
1366 * @desc: descriptor describing the command
1367 * @buf: buffer to use for indirect commands (NULL for direct commands)
1368 * @buf_size: size of buffer for indirect commands (0 for direct commands)
1369 * @cd: pointer to command details structure
1371 * Helper function to send FW Admin Queue commands to the FW Admin Queue.
1374 ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
1375 u16 buf_size, struct ice_sq_cd *cd)
1377 return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
1382 * @hw: pointer to the HW struct
1383 * @cd: pointer to command details structure or NULL
1385 * Get the firmware version (0x0001) from the admin queue commands
1387 enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
1389 struct ice_aqc_get_ver *resp;
1390 struct ice_aq_desc desc;
1391 enum ice_status status;
1393 resp = &desc.params.get_ver;
1395 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_ver);
1397 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1400 hw->fw_branch = resp->fw_branch;
1401 hw->fw_maj_ver = resp->fw_major;
1402 hw->fw_min_ver = resp->fw_minor;
1403 hw->fw_patch = resp->fw_patch;
1404 hw->fw_build = LE32_TO_CPU(resp->fw_build);
1405 hw->api_branch = resp->api_branch;
1406 hw->api_maj_ver = resp->api_major;
1407 hw->api_min_ver = resp->api_minor;
1408 hw->api_patch = resp->api_patch;
1415 * ice_aq_send_driver_ver
1416 * @hw: pointer to the HW struct
1417 * @dv: driver's major, minor version
1418 * @cd: pointer to command details structure or NULL
1420 * Send the driver version (0x0002) to the firmware
1423 ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
1424 struct ice_sq_cd *cd)
1426 struct ice_aqc_driver_ver *cmd;
1427 struct ice_aq_desc desc;
1430 cmd = &desc.params.driver_ver;
1433 return ICE_ERR_PARAM;
1435 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
1437 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1438 cmd->major_ver = dv->major_ver;
1439 cmd->minor_ver = dv->minor_ver;
1440 cmd->build_ver = dv->build_ver;
1441 cmd->subbuild_ver = dv->subbuild_ver;
1444 while (len < sizeof(dv->driver_string) &&
1445 IS_ASCII(dv->driver_string[len]) && dv->driver_string[len])
1448 return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
1453 * @hw: pointer to the HW struct
1454 * @unloading: is the driver unloading itself
1456 * Tell the Firmware that we're shutting down the AdminQ and whether
1457 * or not the driver is unloading as well (0x0003).
1459 enum ice_status ice_aq_q_shutdown(struct ice_hw *hw, bool unloading)
1461 struct ice_aqc_q_shutdown *cmd;
1462 struct ice_aq_desc desc;
1464 cmd = &desc.params.q_shutdown;
1466 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_q_shutdown);
1469 cmd->driver_unloading = ICE_AQC_DRIVER_UNLOADING;
1471 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
1476 * @hw: pointer to the HW struct
1478 * @access: access type
1479 * @sdp_number: resource number
1480 * @timeout: the maximum time in ms that the driver may hold the resource
1481 * @cd: pointer to command details structure or NULL
1483 * Requests common resource using the admin queue commands (0x0008).
1484 * When attempting to acquire the Global Config Lock, the driver can
1485 * learn of three states:
1486 * 1) ICE_SUCCESS - acquired lock, and can perform download package
1487 * 2) ICE_ERR_AQ_ERROR - did not get lock, driver should fail to load
1488 * 3) ICE_ERR_AQ_NO_WORK - did not get lock, but another driver has
1489 * successfully downloaded the package; the driver does
1490 * not have to download the package and can continue
1493 * Note that if the caller is in an acquire lock, perform action, release lock
1494 * phase of operation, it is possible that the FW may detect a timeout and issue
1495 * a CORER. In this case, the driver will receive a CORER interrupt and will
1496 * have to determine its cause. The calling thread that is handling this flow
1497 * will likely get an error propagated back to it indicating the Download
1498 * Package, Update Package or the Release Resource AQ commands timed out.
1500 static enum ice_status
1501 ice_aq_req_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1502 enum ice_aq_res_access_type access, u8 sdp_number, u32 *timeout,
1503 struct ice_sq_cd *cd)
1505 struct ice_aqc_req_res *cmd_resp;
1506 struct ice_aq_desc desc;
1507 enum ice_status status;
1509 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1511 cmd_resp = &desc.params.res_owner;
1513 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_req_res);
1515 cmd_resp->res_id = CPU_TO_LE16(res);
1516 cmd_resp->access_type = CPU_TO_LE16(access);
1517 cmd_resp->res_number = CPU_TO_LE32(sdp_number);
1518 cmd_resp->timeout = CPU_TO_LE32(*timeout);
1521 status = ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1523 /* The completion specifies the maximum time in ms that the driver
1524 * may hold the resource in the Timeout field.
1527 /* Global config lock response utilizes an additional status field.
1529 * If the Global config lock resource is held by some other driver, the
1530 * command completes with ICE_AQ_RES_GLBL_IN_PROG in the status field
1531 * and the timeout field indicates the maximum time the current owner
1532 * of the resource has to free it.
1534 if (res == ICE_GLOBAL_CFG_LOCK_RES_ID) {
1535 if (LE16_TO_CPU(cmd_resp->status) == ICE_AQ_RES_GLBL_SUCCESS) {
1536 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1538 } else if (LE16_TO_CPU(cmd_resp->status) ==
1539 ICE_AQ_RES_GLBL_IN_PROG) {
1540 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1541 return ICE_ERR_AQ_ERROR;
1542 } else if (LE16_TO_CPU(cmd_resp->status) ==
1543 ICE_AQ_RES_GLBL_DONE) {
1544 return ICE_ERR_AQ_NO_WORK;
1547 /* invalid FW response, force a timeout immediately */
1549 return ICE_ERR_AQ_ERROR;
1552 /* If the resource is held by some other driver, the command completes
1553 * with a busy return value and the timeout field indicates the maximum
1554 * time the current owner of the resource has to free it.
1556 if (!status || hw->adminq.sq_last_status == ICE_AQ_RC_EBUSY)
1557 *timeout = LE32_TO_CPU(cmd_resp->timeout);
1563 * ice_aq_release_res
1564 * @hw: pointer to the HW struct
1566 * @sdp_number: resource number
1567 * @cd: pointer to command details structure or NULL
1569 * release common resource using the admin queue commands (0x0009)
1571 static enum ice_status
1572 ice_aq_release_res(struct ice_hw *hw, enum ice_aq_res_ids res, u8 sdp_number,
1573 struct ice_sq_cd *cd)
1575 struct ice_aqc_req_res *cmd;
1576 struct ice_aq_desc desc;
1578 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1580 cmd = &desc.params.res_owner;
1582 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_release_res);
1584 cmd->res_id = CPU_TO_LE16(res);
1585 cmd->res_number = CPU_TO_LE32(sdp_number);
1587 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
1592 * @hw: pointer to the HW structure
1594 * @access: access type (read or write)
1595 * @timeout: timeout in milliseconds
1597 * This function will attempt to acquire the ownership of a resource.
1600 ice_acquire_res(struct ice_hw *hw, enum ice_aq_res_ids res,
1601 enum ice_aq_res_access_type access, u32 timeout)
1603 #define ICE_RES_POLLING_DELAY_MS 10
1604 u32 delay = ICE_RES_POLLING_DELAY_MS;
1605 u32 time_left = timeout;
1606 enum ice_status status;
1608 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1610 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1612 /* A return code of ICE_ERR_AQ_NO_WORK means that another driver has
1613 * previously acquired the resource and performed any necessary updates;
1614 * in this case the caller does not obtain the resource and has no
1615 * further work to do.
1617 if (status == ICE_ERR_AQ_NO_WORK)
1618 goto ice_acquire_res_exit;
1621 ice_debug(hw, ICE_DBG_RES, "resource %d acquire type %d failed.\n", res, access);
1623 /* If necessary, poll until the current lock owner timeouts */
1624 timeout = time_left;
1625 while (status && timeout && time_left) {
1626 ice_msec_delay(delay, true);
1627 timeout = (timeout > delay) ? timeout - delay : 0;
1628 status = ice_aq_req_res(hw, res, access, 0, &time_left, NULL);
1630 if (status == ICE_ERR_AQ_NO_WORK)
1631 /* lock free, but no work to do */
1638 if (status && status != ICE_ERR_AQ_NO_WORK)
1639 ice_debug(hw, ICE_DBG_RES, "resource acquire timed out.\n");
1641 ice_acquire_res_exit:
1642 if (status == ICE_ERR_AQ_NO_WORK) {
1643 if (access == ICE_RES_WRITE)
1644 ice_debug(hw, ICE_DBG_RES, "resource indicates no work to do.\n");
1646 ice_debug(hw, ICE_DBG_RES, "Warning: ICE_ERR_AQ_NO_WORK not expected\n");
1653 * @hw: pointer to the HW structure
1656 * This function will release a resource using the proper Admin Command.
1658 void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res)
1660 enum ice_status status;
1661 u32 total_delay = 0;
1663 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1665 status = ice_aq_release_res(hw, res, 0, NULL);
1667 /* there are some rare cases when trying to release the resource
1668 * results in an admin queue timeout, so handle them correctly
1670 while ((status == ICE_ERR_AQ_TIMEOUT) &&
1671 (total_delay < hw->adminq.sq_cmd_timeout)) {
1672 ice_msec_delay(1, true);
1673 status = ice_aq_release_res(hw, res, 0, NULL);
1679 * ice_aq_alloc_free_res - command to allocate/free resources
1680 * @hw: pointer to the HW struct
1681 * @num_entries: number of resource entries in buffer
1682 * @buf: Indirect buffer to hold data parameters and response
1683 * @buf_size: size of buffer for indirect commands
1684 * @opc: pass in the command opcode
1685 * @cd: pointer to command details structure or NULL
1687 * Helper function to allocate/free resources using the admin queue commands
1690 ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
1691 struct ice_aqc_alloc_free_res_elem *buf, u16 buf_size,
1692 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
1694 struct ice_aqc_alloc_free_res_cmd *cmd;
1695 struct ice_aq_desc desc;
1697 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
1699 cmd = &desc.params.sw_res_ctrl;
1702 return ICE_ERR_PARAM;
1704 if (buf_size < (num_entries * sizeof(buf->elem[0])))
1705 return ICE_ERR_PARAM;
1707 ice_fill_dflt_direct_cmd_desc(&desc, opc);
1709 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
1711 cmd->num_entries = CPU_TO_LE16(num_entries);
1713 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
1717 * ice_alloc_hw_res - allocate resource
1718 * @hw: pointer to the HW struct
1719 * @type: type of resource
1720 * @num: number of resources to allocate
1721 * @btm: allocate from bottom
1722 * @res: pointer to array that will receive the resources
1725 ice_alloc_hw_res(struct ice_hw *hw, u16 type, u16 num, bool btm, u16 *res)
1727 struct ice_aqc_alloc_free_res_elem *buf;
1728 enum ice_status status;
1731 buf_len = ice_struct_size(buf, elem, num);
1732 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1734 return ICE_ERR_NO_MEMORY;
1736 /* Prepare buffer to allocate resource. */
1737 buf->num_elems = CPU_TO_LE16(num);
1738 buf->res_type = CPU_TO_LE16(type | ICE_AQC_RES_TYPE_FLAG_DEDICATED |
1739 ICE_AQC_RES_TYPE_FLAG_IGNORE_INDEX);
1741 buf->res_type |= CPU_TO_LE16(ICE_AQC_RES_TYPE_FLAG_SCAN_BOTTOM);
1743 status = ice_aq_alloc_free_res(hw, 1, buf, buf_len,
1744 ice_aqc_opc_alloc_res, NULL);
1746 goto ice_alloc_res_exit;
1748 ice_memcpy(res, buf->elem, sizeof(*buf->elem) * num,
1749 ICE_NONDMA_TO_NONDMA);
1757 * ice_free_hw_res - free allocated HW resource
1758 * @hw: pointer to the HW struct
1759 * @type: type of resource to free
1760 * @num: number of resources
1761 * @res: pointer to array that contains the resources to free
1763 enum ice_status ice_free_hw_res(struct ice_hw *hw, u16 type, u16 num, u16 *res)
1765 struct ice_aqc_alloc_free_res_elem *buf;
1766 enum ice_status status;
1769 buf_len = ice_struct_size(buf, elem, num);
1770 buf = (struct ice_aqc_alloc_free_res_elem *)ice_malloc(hw, buf_len);
1772 return ICE_ERR_NO_MEMORY;
1774 /* Prepare buffer to free resource. */
1775 buf->num_elems = CPU_TO_LE16(num);
1776 buf->res_type = CPU_TO_LE16(type);
1777 ice_memcpy(buf->elem, res, sizeof(*buf->elem) * num,
1778 ICE_NONDMA_TO_NONDMA);
1780 status = ice_aq_alloc_free_res(hw, num, buf, buf_len,
1781 ice_aqc_opc_free_res, NULL);
1783 ice_debug(hw, ICE_DBG_SW, "CQ CMD Buffer:\n");
1790 * ice_get_num_per_func - determine number of resources per PF
1791 * @hw: pointer to the HW structure
1792 * @max: value to be evenly split between each PF
1794 * Determine the number of valid functions by going through the bitmap returned
1795 * from parsing capabilities and use this to calculate the number of resources
1796 * per PF based on the max value passed in.
1798 static u32 ice_get_num_per_func(struct ice_hw *hw, u32 max)
1802 #define ICE_CAPS_VALID_FUNCS_M 0xFF
1803 funcs = ice_hweight8(hw->dev_caps.common_cap.valid_functions &
1804 ICE_CAPS_VALID_FUNCS_M);
1813 * ice_print_led_caps - print LED capabilities
1814 * @hw: pointer to the ice_hw instance
1815 * @caps: pointer to common caps instance
1816 * @prefix: string to prefix when printing
1817 * @debug: set to indicate debug print
1820 ice_print_led_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
1821 char const *prefix, bool debug)
1826 ice_debug(hw, ICE_DBG_INIT, "%s: led_pin_num = %d\n", prefix,
1829 ice_info(hw, "%s: led_pin_num = %d\n", prefix,
1832 for (i = 0; i < ICE_MAX_SUPPORTED_GPIO_LED; i++) {
1837 ice_debug(hw, ICE_DBG_INIT, "%s: led[%d] = %d\n",
1838 prefix, i, caps->led[i]);
1840 ice_info(hw, "%s: led[%d] = %d\n", prefix, i,
1846 * ice_print_sdp_caps - print SDP capabilities
1847 * @hw: pointer to the ice_hw instance
1848 * @caps: pointer to common caps instance
1849 * @prefix: string to prefix when printing
1850 * @debug: set to indicate debug print
1853 ice_print_sdp_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
1854 char const *prefix, bool debug)
1859 ice_debug(hw, ICE_DBG_INIT, "%s: sdp_pin_num = %d\n", prefix,
1862 ice_info(hw, "%s: sdp_pin_num = %d\n", prefix,
1865 for (i = 0; i < ICE_MAX_SUPPORTED_GPIO_SDP; i++) {
1870 ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%d] = %d\n",
1871 prefix, i, caps->sdp[i]);
1873 ice_info(hw, "%s: sdp[%d] = %d\n", prefix,
1879 * ice_parse_common_caps - parse common device/function capabilities
1880 * @hw: pointer to the HW struct
1881 * @caps: pointer to common capabilities structure
1882 * @elem: the capability element to parse
1883 * @prefix: message prefix for tracing capabilities
1885 * Given a capability element, extract relevant details into the common
1886 * capability structure.
1888 * Returns: true if the capability matches one of the common capability ids,
1892 ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
1893 struct ice_aqc_list_caps_elem *elem, const char *prefix)
1895 u32 logical_id = LE32_TO_CPU(elem->logical_id);
1896 u32 phys_id = LE32_TO_CPU(elem->phys_id);
1897 u32 number = LE32_TO_CPU(elem->number);
1898 u16 cap = LE16_TO_CPU(elem->cap);
1902 case ICE_AQC_CAPS_SWITCHING_MODE:
1903 caps->switching_mode = number;
1904 ice_debug(hw, ICE_DBG_INIT, "%s: switching_mode = %d\n", prefix,
1905 caps->switching_mode);
1907 case ICE_AQC_CAPS_MANAGEABILITY_MODE:
1908 caps->mgmt_mode = number;
1909 caps->mgmt_protocols_mctp = logical_id;
1910 ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_mode = %d\n", prefix,
1912 ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_protocols_mctp = %d\n", prefix,
1913 caps->mgmt_protocols_mctp);
1915 case ICE_AQC_CAPS_OS2BMC:
1916 caps->os2bmc = number;
1917 ice_debug(hw, ICE_DBG_INIT, "%s: os2bmc = %d\n", prefix, caps->os2bmc);
1919 case ICE_AQC_CAPS_VALID_FUNCTIONS:
1920 caps->valid_functions = number;
1921 ice_debug(hw, ICE_DBG_INIT, "%s: valid_functions (bitmap) = %d\n", prefix,
1922 caps->valid_functions);
1924 case ICE_AQC_CAPS_SRIOV:
1925 caps->sr_iov_1_1 = (number == 1);
1926 ice_debug(hw, ICE_DBG_INIT, "%s: sr_iov_1_1 = %d\n", prefix,
1929 case ICE_AQC_CAPS_802_1QBG:
1930 caps->evb_802_1_qbg = (number == 1);
1931 ice_debug(hw, ICE_DBG_INIT, "%s: evb_802_1_qbg = %d\n", prefix, number);
1933 case ICE_AQC_CAPS_802_1BR:
1934 caps->evb_802_1_qbh = (number == 1);
1935 ice_debug(hw, ICE_DBG_INIT, "%s: evb_802_1_qbh = %d\n", prefix, number);
1937 case ICE_AQC_CAPS_DCB:
1938 caps->dcb = (number == 1);
1939 caps->active_tc_bitmap = logical_id;
1940 caps->maxtc = phys_id;
1941 ice_debug(hw, ICE_DBG_INIT, "%s: dcb = %d\n", prefix, caps->dcb);
1942 ice_debug(hw, ICE_DBG_INIT, "%s: active_tc_bitmap = %d\n", prefix,
1943 caps->active_tc_bitmap);
1944 ice_debug(hw, ICE_DBG_INIT, "%s: maxtc = %d\n", prefix, caps->maxtc);
1946 case ICE_AQC_CAPS_ISCSI:
1947 caps->iscsi = (number == 1);
1948 ice_debug(hw, ICE_DBG_INIT, "%s: iscsi = %d\n", prefix, caps->iscsi);
1950 case ICE_AQC_CAPS_RSS:
1951 caps->rss_table_size = number;
1952 caps->rss_table_entry_width = logical_id;
1953 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_size = %d\n", prefix,
1954 caps->rss_table_size);
1955 ice_debug(hw, ICE_DBG_INIT, "%s: rss_table_entry_width = %d\n", prefix,
1956 caps->rss_table_entry_width);
1958 case ICE_AQC_CAPS_RXQS:
1959 caps->num_rxq = number;
1960 caps->rxq_first_id = phys_id;
1961 ice_debug(hw, ICE_DBG_INIT, "%s: num_rxq = %d\n", prefix,
1963 ice_debug(hw, ICE_DBG_INIT, "%s: rxq_first_id = %d\n", prefix,
1964 caps->rxq_first_id);
1966 case ICE_AQC_CAPS_TXQS:
1967 caps->num_txq = number;
1968 caps->txq_first_id = phys_id;
1969 ice_debug(hw, ICE_DBG_INIT, "%s: num_txq = %d\n", prefix,
1971 ice_debug(hw, ICE_DBG_INIT, "%s: txq_first_id = %d\n", prefix,
1972 caps->txq_first_id);
1974 case ICE_AQC_CAPS_MSIX:
1975 caps->num_msix_vectors = number;
1976 caps->msix_vector_first_id = phys_id;
1977 ice_debug(hw, ICE_DBG_INIT, "%s: num_msix_vectors = %d\n", prefix,
1978 caps->num_msix_vectors);
1979 ice_debug(hw, ICE_DBG_INIT, "%s: msix_vector_first_id = %d\n", prefix,
1980 caps->msix_vector_first_id);
1982 case ICE_AQC_CAPS_NVM_VER:
1984 case ICE_AQC_CAPS_NVM_MGMT:
1985 caps->nvm_unified_update =
1986 (number & ICE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
1988 ice_debug(hw, ICE_DBG_INIT, "%s: nvm_unified_update = %d\n", prefix,
1989 caps->nvm_unified_update);
1991 case ICE_AQC_CAPS_CEM:
1992 caps->mgmt_cem = (number == 1);
1993 ice_debug(hw, ICE_DBG_INIT, "%s: mgmt_cem = %d\n", prefix,
1996 case ICE_AQC_CAPS_LED:
1997 if (phys_id < ICE_MAX_SUPPORTED_GPIO_LED) {
1998 caps->led[phys_id] = true;
1999 caps->led_pin_num++;
2000 ice_debug(hw, ICE_DBG_INIT, "%s: led[%d] = 1\n", prefix, phys_id);
2003 case ICE_AQC_CAPS_SDP:
2004 if (phys_id < ICE_MAX_SUPPORTED_GPIO_SDP) {
2005 caps->sdp[phys_id] = true;
2006 caps->sdp_pin_num++;
2007 ice_debug(hw, ICE_DBG_INIT, "%s: sdp[%d] = 1\n", prefix, phys_id);
2010 case ICE_AQC_CAPS_WR_CSR_PROT:
2011 caps->wr_csr_prot = number;
2012 caps->wr_csr_prot |= (u64)logical_id << 32;
2013 ice_debug(hw, ICE_DBG_INIT, "%s: wr_csr_prot = 0x%llX\n", prefix,
2014 (unsigned long long)caps->wr_csr_prot);
2016 case ICE_AQC_CAPS_WOL_PROXY:
2017 caps->num_wol_proxy_fltr = number;
2018 caps->wol_proxy_vsi_seid = logical_id;
2019 caps->apm_wol_support = !!(phys_id & ICE_WOL_SUPPORT_M);
2020 caps->acpi_prog_mthd = !!(phys_id &
2021 ICE_ACPI_PROG_MTHD_M);
2022 caps->proxy_support = !!(phys_id & ICE_PROXY_SUPPORT_M);
2023 ice_debug(hw, ICE_DBG_INIT, "%s: num_wol_proxy_fltr = %d\n", prefix,
2024 caps->num_wol_proxy_fltr);
2025 ice_debug(hw, ICE_DBG_INIT, "%s: wol_proxy_vsi_seid = %d\n", prefix,
2026 caps->wol_proxy_vsi_seid);
2028 case ICE_AQC_CAPS_MAX_MTU:
2029 caps->max_mtu = number;
2030 ice_debug(hw, ICE_DBG_INIT, "%s: max_mtu = %d\n",
2031 prefix, caps->max_mtu);
2034 /* Not one of the recognized common capabilities */
2042 * ice_recalc_port_limited_caps - Recalculate port limited capabilities
2043 * @hw: pointer to the HW structure
2044 * @caps: pointer to capabilities structure to fix
2046 * Re-calculate the capabilities that are dependent on the number of physical
2047 * ports; i.e. some features are not supported or function differently on
2048 * devices with more than 4 ports.
2051 ice_recalc_port_limited_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps)
2053 /* This assumes device capabilities are always scanned before function
2054 * capabilities during the initialization flow.
2056 if (hw->dev_caps.num_funcs > 4) {
2057 /* Max 4 TCs per port */
2059 ice_debug(hw, ICE_DBG_INIT, "reducing maxtc to %d (based on #ports)\n",
2065 * ice_parse_vf_func_caps - Parse ICE_AQC_CAPS_VF function caps
2066 * @hw: pointer to the HW struct
2067 * @func_p: pointer to function capabilities structure
2068 * @cap: pointer to the capability element to parse
2070 * Extract function capabilities for ICE_AQC_CAPS_VF.
2073 ice_parse_vf_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2074 struct ice_aqc_list_caps_elem *cap)
2076 u32 number = LE32_TO_CPU(cap->number);
2077 u32 logical_id = LE32_TO_CPU(cap->logical_id);
2079 func_p->num_allocd_vfs = number;
2080 func_p->vf_base_id = logical_id;
2081 ice_debug(hw, ICE_DBG_INIT, "func caps: num_allocd_vfs = %d\n",
2082 func_p->num_allocd_vfs);
2083 ice_debug(hw, ICE_DBG_INIT, "func caps: vf_base_id = %d\n",
2084 func_p->vf_base_id);
2088 * ice_parse_vsi_func_caps - Parse ICE_AQC_CAPS_VSI function caps
2089 * @hw: pointer to the HW struct
2090 * @func_p: pointer to function capabilities structure
2091 * @cap: pointer to the capability element to parse
2093 * Extract function capabilities for ICE_AQC_CAPS_VSI.
2096 ice_parse_vsi_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2097 struct ice_aqc_list_caps_elem *cap)
2099 func_p->guar_num_vsi = ice_get_num_per_func(hw, ICE_MAX_VSI);
2100 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi (fw) = %d\n",
2101 LE32_TO_CPU(cap->number));
2102 ice_debug(hw, ICE_DBG_INIT, "func caps: guar_num_vsi = %d\n",
2103 func_p->guar_num_vsi);
2107 * ice_parse_func_caps - Parse function capabilities
2108 * @hw: pointer to the HW struct
2109 * @func_p: pointer to function capabilities structure
2110 * @buf: buffer containing the function capability records
2111 * @cap_count: the number of capabilities
2113 * Helper function to parse function (0x000A) capabilities list. For
2114 * capabilities shared between device and function, this relies on
2115 * ice_parse_common_caps.
2117 * Loop through the list of provided capabilities and extract the relevant
2118 * data into the function capabilities structured.
2121 ice_parse_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p,
2122 void *buf, u32 cap_count)
2124 struct ice_aqc_list_caps_elem *cap_resp;
2127 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
2129 ice_memset(func_p, 0, sizeof(*func_p), ICE_NONDMA_MEM);
2131 for (i = 0; i < cap_count; i++) {
2132 u16 cap = LE16_TO_CPU(cap_resp[i].cap);
2135 found = ice_parse_common_caps(hw, &func_p->common_cap,
2136 &cap_resp[i], "func caps");
2139 case ICE_AQC_CAPS_VF:
2140 ice_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
2142 case ICE_AQC_CAPS_VSI:
2143 ice_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
2146 /* Don't list common capabilities as unknown */
2148 ice_debug(hw, ICE_DBG_INIT, "func caps: unknown capability[%d]: 0x%x\n",
2154 ice_print_led_caps(hw, &func_p->common_cap, "func caps", true);
2155 ice_print_sdp_caps(hw, &func_p->common_cap, "func caps", true);
2157 ice_recalc_port_limited_caps(hw, &func_p->common_cap);
2161 * ice_parse_valid_functions_cap - Parse ICE_AQC_CAPS_VALID_FUNCTIONS caps
2162 * @hw: pointer to the HW struct
2163 * @dev_p: pointer to device capabilities structure
2164 * @cap: capability element to parse
2166 * Parse ICE_AQC_CAPS_VALID_FUNCTIONS for device capabilities.
2169 ice_parse_valid_functions_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2170 struct ice_aqc_list_caps_elem *cap)
2172 u32 number = LE32_TO_CPU(cap->number);
2174 dev_p->num_funcs = ice_hweight32(number);
2175 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_funcs = %d\n",
2180 * ice_parse_vf_dev_caps - Parse ICE_AQC_CAPS_VF device caps
2181 * @hw: pointer to the HW struct
2182 * @dev_p: pointer to device capabilities structure
2183 * @cap: capability element to parse
2185 * Parse ICE_AQC_CAPS_VF for device capabilities.
2188 ice_parse_vf_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2189 struct ice_aqc_list_caps_elem *cap)
2191 u32 number = LE32_TO_CPU(cap->number);
2193 dev_p->num_vfs_exposed = number;
2194 ice_debug(hw, ICE_DBG_INIT, "dev_caps: num_vfs_exposed = %d\n",
2195 dev_p->num_vfs_exposed);
2199 * ice_parse_vsi_dev_caps - Parse ICE_AQC_CAPS_VSI device caps
2200 * @hw: pointer to the HW struct
2201 * @dev_p: pointer to device capabilities structure
2202 * @cap: capability element to parse
2204 * Parse ICE_AQC_CAPS_VSI for device capabilities.
2207 ice_parse_vsi_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2208 struct ice_aqc_list_caps_elem *cap)
2210 u32 number = LE32_TO_CPU(cap->number);
2212 dev_p->num_vsi_allocd_to_host = number;
2213 ice_debug(hw, ICE_DBG_INIT, "dev caps: num_vsi_allocd_to_host = %d\n",
2214 dev_p->num_vsi_allocd_to_host);
2218 * ice_parse_dev_caps - Parse device capabilities
2219 * @hw: pointer to the HW struct
2220 * @dev_p: pointer to device capabilities structure
2221 * @buf: buffer containing the device capability records
2222 * @cap_count: the number of capabilities
2224 * Helper device to parse device (0x000B) capabilities list. For
2225 * capabilities shared between device and function, this relies on
2226 * ice_parse_common_caps.
2228 * Loop through the list of provided capabilities and extract the relevant
2229 * data into the device capabilities structured.
2232 ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p,
2233 void *buf, u32 cap_count)
2235 struct ice_aqc_list_caps_elem *cap_resp;
2238 cap_resp = (struct ice_aqc_list_caps_elem *)buf;
2240 ice_memset(dev_p, 0, sizeof(*dev_p), ICE_NONDMA_MEM);
2242 for (i = 0; i < cap_count; i++) {
2243 u16 cap = LE16_TO_CPU(cap_resp[i].cap);
2246 found = ice_parse_common_caps(hw, &dev_p->common_cap,
2247 &cap_resp[i], "dev caps");
2250 case ICE_AQC_CAPS_VALID_FUNCTIONS:
2251 ice_parse_valid_functions_cap(hw, dev_p, &cap_resp[i]);
2253 case ICE_AQC_CAPS_VF:
2254 ice_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
2256 case ICE_AQC_CAPS_VSI:
2257 ice_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
2260 /* Don't list common capabilities as unknown */
2262 ice_debug(hw, ICE_DBG_INIT, "dev caps: unknown capability[%d]: 0x%x\n",
2268 ice_print_led_caps(hw, &dev_p->common_cap, "dev caps", true);
2269 ice_print_sdp_caps(hw, &dev_p->common_cap, "dev caps", true);
2271 ice_recalc_port_limited_caps(hw, &dev_p->common_cap);
2275 * ice_aq_list_caps - query function/device capabilities
2276 * @hw: pointer to the HW struct
2277 * @buf: a buffer to hold the capabilities
2278 * @buf_size: size of the buffer
2279 * @cap_count: if not NULL, set to the number of capabilities reported
2280 * @opc: capabilities type to discover, device or function
2281 * @cd: pointer to command details structure or NULL
2283 * Get the function (0x000A) or device (0x000B) capabilities description from
2284 * firmware and store it in the buffer.
2286 * If the cap_count pointer is not NULL, then it is set to the number of
2287 * capabilities firmware will report. Note that if the buffer size is too
2288 * small, it is possible the command will return ICE_AQ_ERR_ENOMEM. The
2289 * cap_count will still be updated in this case. It is recommended that the
2290 * buffer size be set to ICE_AQ_MAX_BUF_LEN (the largest possible buffer that
2291 * firmware could return) to avoid this.
2293 static enum ice_status
2294 ice_aq_list_caps(struct ice_hw *hw, void *buf, u16 buf_size, u32 *cap_count,
2295 enum ice_adminq_opc opc, struct ice_sq_cd *cd)
2297 struct ice_aqc_list_caps *cmd;
2298 struct ice_aq_desc desc;
2299 enum ice_status status;
2301 cmd = &desc.params.get_cap;
2303 if (opc != ice_aqc_opc_list_func_caps &&
2304 opc != ice_aqc_opc_list_dev_caps)
2305 return ICE_ERR_PARAM;
2307 ice_fill_dflt_direct_cmd_desc(&desc, opc);
2308 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
2311 *cap_count = LE32_TO_CPU(cmd->count);
2317 * ice_discover_dev_caps - Read and extract device capabilities
2318 * @hw: pointer to the hardware structure
2319 * @dev_caps: pointer to device capabilities structure
2321 * Read the device capabilities and extract them into the dev_caps structure
2324 static enum ice_status
2325 ice_discover_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_caps)
2327 enum ice_status status;
2331 cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
2333 return ICE_ERR_NO_MEMORY;
2335 /* Although the driver doesn't know the number of capabilities the
2336 * device will return, we can simply send a 4KB buffer, the maximum
2337 * possible size that firmware can return.
2339 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2341 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2342 ice_aqc_opc_list_dev_caps, NULL);
2344 ice_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
2351 * ice_discover_func_caps - Read and extract function capabilities
2352 * @hw: pointer to the hardware structure
2353 * @func_caps: pointer to function capabilities structure
2355 * Read the function capabilities and extract them into the func_caps structure
2358 static enum ice_status
2359 ice_discover_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_caps)
2361 enum ice_status status;
2365 cbuf = ice_malloc(hw, ICE_AQ_MAX_BUF_LEN);
2367 return ICE_ERR_NO_MEMORY;
2369 /* Although the driver doesn't know the number of capabilities the
2370 * device will return, we can simply send a 4KB buffer, the maximum
2371 * possible size that firmware can return.
2373 cap_count = ICE_AQ_MAX_BUF_LEN / sizeof(struct ice_aqc_list_caps_elem);
2375 status = ice_aq_list_caps(hw, cbuf, ICE_AQ_MAX_BUF_LEN, &cap_count,
2376 ice_aqc_opc_list_func_caps, NULL);
2378 ice_parse_func_caps(hw, func_caps, cbuf, cap_count);
2385 * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
2386 * @hw: pointer to the hardware structure
2388 void ice_set_safe_mode_caps(struct ice_hw *hw)
2390 struct ice_hw_func_caps *func_caps = &hw->func_caps;
2391 struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
2392 u32 valid_func, rxq_first_id, txq_first_id;
2393 u32 msix_vector_first_id, max_mtu;
2396 /* cache some func_caps values that should be restored after memset */
2397 valid_func = func_caps->common_cap.valid_functions;
2398 txq_first_id = func_caps->common_cap.txq_first_id;
2399 rxq_first_id = func_caps->common_cap.rxq_first_id;
2400 msix_vector_first_id = func_caps->common_cap.msix_vector_first_id;
2401 max_mtu = func_caps->common_cap.max_mtu;
2403 /* unset func capabilities */
2404 memset(func_caps, 0, sizeof(*func_caps));
2406 /* restore cached values */
2407 func_caps->common_cap.valid_functions = valid_func;
2408 func_caps->common_cap.txq_first_id = txq_first_id;
2409 func_caps->common_cap.rxq_first_id = rxq_first_id;
2410 func_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
2411 func_caps->common_cap.max_mtu = max_mtu;
2413 /* one Tx and one Rx queue in safe mode */
2414 func_caps->common_cap.num_rxq = 1;
2415 func_caps->common_cap.num_txq = 1;
2417 /* two MSIX vectors, one for traffic and one for misc causes */
2418 func_caps->common_cap.num_msix_vectors = 2;
2419 func_caps->guar_num_vsi = 1;
2421 /* cache some dev_caps values that should be restored after memset */
2422 valid_func = dev_caps->common_cap.valid_functions;
2423 txq_first_id = dev_caps->common_cap.txq_first_id;
2424 rxq_first_id = dev_caps->common_cap.rxq_first_id;
2425 msix_vector_first_id = dev_caps->common_cap.msix_vector_first_id;
2426 max_mtu = dev_caps->common_cap.max_mtu;
2427 num_funcs = dev_caps->num_funcs;
2429 /* unset dev capabilities */
2430 memset(dev_caps, 0, sizeof(*dev_caps));
2432 /* restore cached values */
2433 dev_caps->common_cap.valid_functions = valid_func;
2434 dev_caps->common_cap.txq_first_id = txq_first_id;
2435 dev_caps->common_cap.rxq_first_id = rxq_first_id;
2436 dev_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
2437 dev_caps->common_cap.max_mtu = max_mtu;
2438 dev_caps->num_funcs = num_funcs;
2440 /* one Tx and one Rx queue per function in safe mode */
2441 dev_caps->common_cap.num_rxq = num_funcs;
2442 dev_caps->common_cap.num_txq = num_funcs;
2444 /* two MSIX vectors per function */
2445 dev_caps->common_cap.num_msix_vectors = 2 * num_funcs;
2449 * ice_get_caps - get info about the HW
2450 * @hw: pointer to the hardware structure
2452 enum ice_status ice_get_caps(struct ice_hw *hw)
2454 enum ice_status status;
2456 status = ice_discover_dev_caps(hw, &hw->dev_caps);
2460 return ice_discover_func_caps(hw, &hw->func_caps);
2464 * ice_aq_manage_mac_write - manage MAC address write command
2465 * @hw: pointer to the HW struct
2466 * @mac_addr: MAC address to be written as LAA/LAA+WoL/Port address
2467 * @flags: flags to control write behavior
2468 * @cd: pointer to command details structure or NULL
2470 * This function is used to write MAC address to the NVM (0x0108).
2473 ice_aq_manage_mac_write(struct ice_hw *hw, const u8 *mac_addr, u8 flags,
2474 struct ice_sq_cd *cd)
2476 struct ice_aqc_manage_mac_write *cmd;
2477 struct ice_aq_desc desc;
2479 cmd = &desc.params.mac_write;
2480 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_manage_mac_write);
2483 ice_memcpy(cmd->mac_addr, mac_addr, ETH_ALEN, ICE_NONDMA_TO_DMA);
2485 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2489 * ice_aq_clear_pxe_mode
2490 * @hw: pointer to the HW struct
2492 * Tell the firmware that the driver is taking over from PXE (0x0110).
2494 static enum ice_status ice_aq_clear_pxe_mode(struct ice_hw *hw)
2496 struct ice_aq_desc desc;
2498 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_pxe_mode);
2499 desc.params.clear_pxe.rx_cnt = ICE_AQC_CLEAR_PXE_RX_CNT;
2501 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
2505 * ice_clear_pxe_mode - clear pxe operations mode
2506 * @hw: pointer to the HW struct
2508 * Make sure all PXE mode settings are cleared, including things
2509 * like descriptor fetch/write-back mode.
2511 void ice_clear_pxe_mode(struct ice_hw *hw)
2513 if (ice_check_sq_alive(hw, &hw->adminq))
2514 ice_aq_clear_pxe_mode(hw);
2518 * ice_aq_set_port_params - set physical port parameters.
2519 * @pi: pointer to the port info struct
2520 * @bad_frame_vsi: defines the VSI to which bad frames are forwarded
2521 * @save_bad_pac: if set packets with errors are forwarded to the bad frames VSI
2522 * @pad_short_pac: if set transmit packets smaller than 60 bytes are padded
2523 * @double_vlan: if set double VLAN is enabled
2524 * @cd: pointer to command details structure or NULL
2526 * Set Physical port parameters (0x0203)
2529 ice_aq_set_port_params(struct ice_port_info *pi, u16 bad_frame_vsi,
2530 bool save_bad_pac, bool pad_short_pac, bool double_vlan,
2531 struct ice_sq_cd *cd)
2534 struct ice_aqc_set_port_params *cmd;
2535 struct ice_hw *hw = pi->hw;
2536 struct ice_aq_desc desc;
2539 cmd = &desc.params.set_port_params;
2541 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_params);
2542 cmd->bad_frame_vsi = CPU_TO_LE16(bad_frame_vsi);
2544 cmd_flags |= ICE_AQC_SET_P_PARAMS_SAVE_BAD_PACKETS;
2546 cmd_flags |= ICE_AQC_SET_P_PARAMS_PAD_SHORT_PACKETS;
2548 cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA;
2549 cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
2551 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
2555 * ice_get_link_speed_based_on_phy_type - returns link speed
2556 * @phy_type_low: lower part of phy_type
2557 * @phy_type_high: higher part of phy_type
2559 * This helper function will convert an entry in PHY type structure
2560 * [phy_type_low, phy_type_high] to its corresponding link speed.
2561 * Note: In the structure of [phy_type_low, phy_type_high], there should
2562 * be one bit set, as this function will convert one PHY type to its
2564 * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2565 * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
2568 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high)
2570 u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2571 u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2573 switch (phy_type_low) {
2574 case ICE_PHY_TYPE_LOW_100BASE_TX:
2575 case ICE_PHY_TYPE_LOW_100M_SGMII:
2576 speed_phy_type_low = ICE_AQ_LINK_SPEED_100MB;
2578 case ICE_PHY_TYPE_LOW_1000BASE_T:
2579 case ICE_PHY_TYPE_LOW_1000BASE_SX:
2580 case ICE_PHY_TYPE_LOW_1000BASE_LX:
2581 case ICE_PHY_TYPE_LOW_1000BASE_KX:
2582 case ICE_PHY_TYPE_LOW_1G_SGMII:
2583 speed_phy_type_low = ICE_AQ_LINK_SPEED_1000MB;
2585 case ICE_PHY_TYPE_LOW_2500BASE_T:
2586 case ICE_PHY_TYPE_LOW_2500BASE_X:
2587 case ICE_PHY_TYPE_LOW_2500BASE_KX:
2588 speed_phy_type_low = ICE_AQ_LINK_SPEED_2500MB;
2590 case ICE_PHY_TYPE_LOW_5GBASE_T:
2591 case ICE_PHY_TYPE_LOW_5GBASE_KR:
2592 speed_phy_type_low = ICE_AQ_LINK_SPEED_5GB;
2594 case ICE_PHY_TYPE_LOW_10GBASE_T:
2595 case ICE_PHY_TYPE_LOW_10G_SFI_DA:
2596 case ICE_PHY_TYPE_LOW_10GBASE_SR:
2597 case ICE_PHY_TYPE_LOW_10GBASE_LR:
2598 case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
2599 case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
2600 case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
2601 speed_phy_type_low = ICE_AQ_LINK_SPEED_10GB;
2603 case ICE_PHY_TYPE_LOW_25GBASE_T:
2604 case ICE_PHY_TYPE_LOW_25GBASE_CR:
2605 case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
2606 case ICE_PHY_TYPE_LOW_25GBASE_CR1:
2607 case ICE_PHY_TYPE_LOW_25GBASE_SR:
2608 case ICE_PHY_TYPE_LOW_25GBASE_LR:
2609 case ICE_PHY_TYPE_LOW_25GBASE_KR:
2610 case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
2611 case ICE_PHY_TYPE_LOW_25GBASE_KR1:
2612 case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
2613 case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
2614 speed_phy_type_low = ICE_AQ_LINK_SPEED_25GB;
2616 case ICE_PHY_TYPE_LOW_40GBASE_CR4:
2617 case ICE_PHY_TYPE_LOW_40GBASE_SR4:
2618 case ICE_PHY_TYPE_LOW_40GBASE_LR4:
2619 case ICE_PHY_TYPE_LOW_40GBASE_KR4:
2620 case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
2621 case ICE_PHY_TYPE_LOW_40G_XLAUI:
2622 speed_phy_type_low = ICE_AQ_LINK_SPEED_40GB;
2624 case ICE_PHY_TYPE_LOW_50GBASE_CR2:
2625 case ICE_PHY_TYPE_LOW_50GBASE_SR2:
2626 case ICE_PHY_TYPE_LOW_50GBASE_LR2:
2627 case ICE_PHY_TYPE_LOW_50GBASE_KR2:
2628 case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
2629 case ICE_PHY_TYPE_LOW_50G_LAUI2:
2630 case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
2631 case ICE_PHY_TYPE_LOW_50G_AUI2:
2632 case ICE_PHY_TYPE_LOW_50GBASE_CP:
2633 case ICE_PHY_TYPE_LOW_50GBASE_SR:
2634 case ICE_PHY_TYPE_LOW_50GBASE_FR:
2635 case ICE_PHY_TYPE_LOW_50GBASE_LR:
2636 case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
2637 case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
2638 case ICE_PHY_TYPE_LOW_50G_AUI1:
2639 speed_phy_type_low = ICE_AQ_LINK_SPEED_50GB;
2641 case ICE_PHY_TYPE_LOW_100GBASE_CR4:
2642 case ICE_PHY_TYPE_LOW_100GBASE_SR4:
2643 case ICE_PHY_TYPE_LOW_100GBASE_LR4:
2644 case ICE_PHY_TYPE_LOW_100GBASE_KR4:
2645 case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
2646 case ICE_PHY_TYPE_LOW_100G_CAUI4:
2647 case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
2648 case ICE_PHY_TYPE_LOW_100G_AUI4:
2649 case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
2650 case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
2651 case ICE_PHY_TYPE_LOW_100GBASE_CP2:
2652 case ICE_PHY_TYPE_LOW_100GBASE_SR2:
2653 case ICE_PHY_TYPE_LOW_100GBASE_DR:
2654 speed_phy_type_low = ICE_AQ_LINK_SPEED_100GB;
2657 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
2661 switch (phy_type_high) {
2662 case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
2663 case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
2664 case ICE_PHY_TYPE_HIGH_100G_CAUI2:
2665 case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
2666 case ICE_PHY_TYPE_HIGH_100G_AUI2:
2667 speed_phy_type_high = ICE_AQ_LINK_SPEED_100GB;
2670 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN;
2674 if (speed_phy_type_low == ICE_AQ_LINK_SPEED_UNKNOWN &&
2675 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2676 return ICE_AQ_LINK_SPEED_UNKNOWN;
2677 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2678 speed_phy_type_high != ICE_AQ_LINK_SPEED_UNKNOWN)
2679 return ICE_AQ_LINK_SPEED_UNKNOWN;
2680 else if (speed_phy_type_low != ICE_AQ_LINK_SPEED_UNKNOWN &&
2681 speed_phy_type_high == ICE_AQ_LINK_SPEED_UNKNOWN)
2682 return speed_phy_type_low;
2684 return speed_phy_type_high;
2688 * ice_update_phy_type
2689 * @phy_type_low: pointer to the lower part of phy_type
2690 * @phy_type_high: pointer to the higher part of phy_type
2691 * @link_speeds_bitmap: targeted link speeds bitmap
2693 * Note: For the link_speeds_bitmap structure, you can check it at
2694 * [ice_aqc_get_link_status->link_speed]. Caller can pass in
2695 * link_speeds_bitmap include multiple speeds.
2697 * Each entry in this [phy_type_low, phy_type_high] structure will
2698 * present a certain link speed. This helper function will turn on bits
2699 * in [phy_type_low, phy_type_high] structure based on the value of
2700 * link_speeds_bitmap input parameter.
2703 ice_update_phy_type(u64 *phy_type_low, u64 *phy_type_high,
2704 u16 link_speeds_bitmap)
2711 /* We first check with low part of phy_type */
2712 for (index = 0; index <= ICE_PHY_TYPE_LOW_MAX_INDEX; index++) {
2713 pt_low = BIT_ULL(index);
2714 speed = ice_get_link_speed_based_on_phy_type(pt_low, 0);
2716 if (link_speeds_bitmap & speed)
2717 *phy_type_low |= BIT_ULL(index);
2720 /* We then check with high part of phy_type */
2721 for (index = 0; index <= ICE_PHY_TYPE_HIGH_MAX_INDEX; index++) {
2722 pt_high = BIT_ULL(index);
2723 speed = ice_get_link_speed_based_on_phy_type(0, pt_high);
2725 if (link_speeds_bitmap & speed)
2726 *phy_type_high |= BIT_ULL(index);
2731 * ice_aq_set_phy_cfg
2732 * @hw: pointer to the HW struct
2733 * @pi: port info structure of the interested logical port
2734 * @cfg: structure with PHY configuration data to be set
2735 * @cd: pointer to command details structure or NULL
2737 * Set the various PHY configuration parameters supported on the Port.
2738 * One or more of the Set PHY config parameters may be ignored in an MFP
2739 * mode as the PF may not have the privilege to set some of the PHY Config
2740 * parameters. This status will be indicated by the command response (0x0601).
2743 ice_aq_set_phy_cfg(struct ice_hw *hw, struct ice_port_info *pi,
2744 struct ice_aqc_set_phy_cfg_data *cfg, struct ice_sq_cd *cd)
2746 struct ice_aq_desc desc;
2747 enum ice_status status;
2750 return ICE_ERR_PARAM;
2752 /* Ensure that only valid bits of cfg->caps can be turned on. */
2753 if (cfg->caps & ~ICE_AQ_PHY_ENA_VALID_MASK) {
2754 ice_debug(hw, ICE_DBG_PHY, "Invalid bit is set in ice_aqc_set_phy_cfg_data->caps : 0x%x\n",
2757 cfg->caps &= ICE_AQ_PHY_ENA_VALID_MASK;
2760 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_phy_cfg);
2761 desc.params.set_phy.lport_num = pi->lport;
2762 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
2764 ice_debug(hw, ICE_DBG_LINK, "set phy cfg\n");
2765 ice_debug(hw, ICE_DBG_LINK, " phy_type_low = 0x%llx\n",
2766 (unsigned long long)LE64_TO_CPU(cfg->phy_type_low));
2767 ice_debug(hw, ICE_DBG_LINK, " phy_type_high = 0x%llx\n",
2768 (unsigned long long)LE64_TO_CPU(cfg->phy_type_high));
2769 ice_debug(hw, ICE_DBG_LINK, " caps = 0x%x\n", cfg->caps);
2770 ice_debug(hw, ICE_DBG_LINK, " low_power_ctrl_an = 0x%x\n",
2771 cfg->low_power_ctrl_an);
2772 ice_debug(hw, ICE_DBG_LINK, " eee_cap = 0x%x\n", cfg->eee_cap);
2773 ice_debug(hw, ICE_DBG_LINK, " eeer_value = 0x%x\n", cfg->eeer_value);
2774 ice_debug(hw, ICE_DBG_LINK, " link_fec_opt = 0x%x\n",
2777 status = ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
2779 if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
2780 status = ICE_SUCCESS;
2783 pi->phy.curr_user_phy_cfg = *cfg;
2789 * ice_update_link_info - update status of the HW network link
2790 * @pi: port info structure of the interested logical port
2792 enum ice_status ice_update_link_info(struct ice_port_info *pi)
2794 struct ice_link_status *li;
2795 enum ice_status status;
2798 return ICE_ERR_PARAM;
2800 li = &pi->phy.link_info;
2802 status = ice_aq_get_link_info(pi, true, NULL, NULL);
2806 if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
2807 struct ice_aqc_get_phy_caps_data *pcaps;
2811 pcaps = (struct ice_aqc_get_phy_caps_data *)
2812 ice_malloc(hw, sizeof(*pcaps));
2814 return ICE_ERR_NO_MEMORY;
2816 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
2819 ice_free(hw, pcaps);
2826 * ice_cache_phy_user_req
2827 * @pi: port information structure
2828 * @cache_data: PHY logging data
2829 * @cache_mode: PHY logging mode
2831 * Log the user request on (FC, FEC, SPEED) for later user.
2834 ice_cache_phy_user_req(struct ice_port_info *pi,
2835 struct ice_phy_cache_mode_data cache_data,
2836 enum ice_phy_cache_mode cache_mode)
2841 switch (cache_mode) {
2843 pi->phy.curr_user_fc_req = cache_data.data.curr_user_fc_req;
2845 case ICE_SPEED_MODE:
2846 pi->phy.curr_user_speed_req =
2847 cache_data.data.curr_user_speed_req;
2850 pi->phy.curr_user_fec_req = cache_data.data.curr_user_fec_req;
2858 * ice_caps_to_fc_mode
2859 * @caps: PHY capabilities
2861 * Convert PHY FC capabilities to ice FC mode
2863 enum ice_fc_mode ice_caps_to_fc_mode(u8 caps)
2865 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE &&
2866 caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2869 if (caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE)
2870 return ICE_FC_TX_PAUSE;
2872 if (caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE)
2873 return ICE_FC_RX_PAUSE;
2879 * ice_caps_to_fec_mode
2880 * @caps: PHY capabilities
2881 * @fec_options: Link FEC options
2883 * Convert PHY FEC capabilities to ice FEC mode
2885 enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options)
2887 if (caps & ICE_AQC_PHY_EN_AUTO_FEC)
2888 return ICE_FEC_AUTO;
2890 if (fec_options & (ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
2891 ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
2892 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN |
2893 ICE_AQC_PHY_FEC_25G_KR_REQ))
2894 return ICE_FEC_BASER;
2896 if (fec_options & (ICE_AQC_PHY_FEC_25G_RS_528_REQ |
2897 ICE_AQC_PHY_FEC_25G_RS_544_REQ |
2898 ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN))
2901 return ICE_FEC_NONE;
2905 * ice_cfg_phy_fc - Configure PHY FC data based on FC mode
2906 * @pi: port information structure
2907 * @cfg: PHY configuration data to set FC mode
2908 * @req_mode: FC mode to configure
2910 static enum ice_status
2911 ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
2912 enum ice_fc_mode req_mode)
2914 struct ice_phy_cache_mode_data cache_data;
2915 u8 pause_mask = 0x0;
2918 return ICE_ERR_BAD_PTR;
2923 struct ice_aqc_get_phy_caps_data *pcaps;
2924 enum ice_status status;
2926 pcaps = (struct ice_aqc_get_phy_caps_data *)
2927 ice_malloc(pi->hw, sizeof(*pcaps));
2929 return ICE_ERR_NO_MEMORY;
2931 /* Query the value of FC that both the NIC and attached media
2934 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
2937 ice_free(pi->hw, pcaps);
2941 pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2942 pause_mask |= pcaps->caps & ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2944 ice_free(pi->hw, pcaps);
2948 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2949 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2951 case ICE_FC_RX_PAUSE:
2952 pause_mask |= ICE_AQC_PHY_EN_RX_LINK_PAUSE;
2954 case ICE_FC_TX_PAUSE:
2955 pause_mask |= ICE_AQC_PHY_EN_TX_LINK_PAUSE;
2961 /* clear the old pause settings */
2962 cfg->caps &= ~(ICE_AQC_PHY_EN_TX_LINK_PAUSE |
2963 ICE_AQC_PHY_EN_RX_LINK_PAUSE);
2965 /* set the new capabilities */
2966 cfg->caps |= pause_mask;
2968 /* Cache user FC request */
2969 cache_data.data.curr_user_fc_req = req_mode;
2970 ice_cache_phy_user_req(pi, cache_data, ICE_FC_MODE);
2977 * @pi: port information structure
2978 * @aq_failures: pointer to status code, specific to ice_set_fc routine
2979 * @ena_auto_link_update: enable automatic link update
2981 * Set the requested flow control mode.
2984 ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
2986 struct ice_aqc_set_phy_cfg_data cfg = { 0 };
2987 struct ice_aqc_get_phy_caps_data *pcaps;
2988 enum ice_status status;
2991 if (!pi || !aq_failures)
2992 return ICE_ERR_BAD_PTR;
2997 pcaps = (struct ice_aqc_get_phy_caps_data *)
2998 ice_malloc(hw, sizeof(*pcaps));
3000 return ICE_ERR_NO_MEMORY;
3002 /* Get the current PHY config */
3003 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
3006 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
3010 ice_copy_phy_caps_to_cfg(pi, pcaps, &cfg);
3012 /* Configure the set PHY data */
3013 status = ice_cfg_phy_fc(pi, &cfg, pi->fc.req_mode);
3015 if (status != ICE_ERR_BAD_PTR)
3016 *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
3021 /* If the capabilities have changed, then set the new config */
3022 if (cfg.caps != pcaps->caps) {
3023 int retry_count, retry_max = 10;
3025 /* Auto restart link so settings take effect */
3026 if (ena_auto_link_update)
3027 cfg.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3029 status = ice_aq_set_phy_cfg(hw, pi, &cfg, NULL);
3031 *aq_failures = ICE_SET_FC_AQ_FAIL_SET;
3035 /* Update the link info
3036 * It sometimes takes a really long time for link to
3037 * come back from the atomic reset. Thus, we wait a
3040 for (retry_count = 0; retry_count < retry_max; retry_count++) {
3041 status = ice_update_link_info(pi);
3043 if (status == ICE_SUCCESS)
3046 ice_msec_delay(100, true);
3050 *aq_failures = ICE_SET_FC_AQ_FAIL_UPDATE;
3054 ice_free(hw, pcaps);
3059 * ice_phy_caps_equals_cfg
3060 * @phy_caps: PHY capabilities
3061 * @phy_cfg: PHY configuration
3063 * Helper function to determine if PHY capabilities matches PHY
3067 ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *phy_caps,
3068 struct ice_aqc_set_phy_cfg_data *phy_cfg)
3070 u8 caps_mask, cfg_mask;
3072 if (!phy_caps || !phy_cfg)
3075 /* These bits are not common between capabilities and configuration.
3076 * Do not use them to determine equality.
3078 caps_mask = ICE_AQC_PHY_CAPS_MASK & ~(ICE_AQC_PHY_AN_MODE |
3079 ICE_AQC_PHY_EN_MOD_QUAL);
3080 cfg_mask = ICE_AQ_PHY_ENA_VALID_MASK & ~ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
3082 if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
3083 phy_caps->phy_type_high != phy_cfg->phy_type_high ||
3084 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
3085 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
3086 phy_caps->eee_cap != phy_cfg->eee_cap ||
3087 phy_caps->eeer_value != phy_cfg->eeer_value ||
3088 phy_caps->link_fec_options != phy_cfg->link_fec_opt)
3095 * ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
3096 * @pi: port information structure
3097 * @caps: PHY ability structure to copy date from
3098 * @cfg: PHY configuration structure to copy data to
3100 * Helper function to copy AQC PHY get ability data to PHY set configuration
3104 ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
3105 struct ice_aqc_get_phy_caps_data *caps,
3106 struct ice_aqc_set_phy_cfg_data *cfg)
3108 if (!pi || !caps || !cfg)
3111 ice_memset(cfg, 0, sizeof(*cfg), ICE_NONDMA_MEM);
3112 cfg->phy_type_low = caps->phy_type_low;
3113 cfg->phy_type_high = caps->phy_type_high;
3114 cfg->caps = caps->caps;
3115 cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
3116 cfg->eee_cap = caps->eee_cap;
3117 cfg->eeer_value = caps->eeer_value;
3118 cfg->link_fec_opt = caps->link_fec_options;
3119 cfg->module_compliance_enforcement =
3120 caps->module_compliance_enforcement;
3122 if (ice_fw_supports_link_override(pi->hw)) {
3123 struct ice_link_default_override_tlv tlv;
3125 if (ice_get_link_default_override(&tlv, pi))
3128 if (tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE)
3129 cfg->module_compliance_enforcement |=
3130 ICE_LINK_OVERRIDE_STRICT_MODE;
3135 * ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
3136 * @pi: port information structure
3137 * @cfg: PHY configuration data to set FEC mode
3138 * @fec: FEC mode to configure
3141 ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
3142 enum ice_fec_mode fec)
3144 struct ice_aqc_get_phy_caps_data *pcaps;
3145 enum ice_status status = ICE_SUCCESS;
3149 return ICE_ERR_BAD_PTR;
3153 pcaps = (struct ice_aqc_get_phy_caps_data *)
3154 ice_malloc(hw, sizeof(*pcaps));
3156 return ICE_ERR_NO_MEMORY;
3158 status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps,
3163 cfg->caps |= (pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC);
3164 cfg->link_fec_opt = pcaps->link_fec_options;
3168 /* Clear RS bits, and AND BASE-R ability
3169 * bits and OR request bits.
3171 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
3172 ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
3173 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
3174 ICE_AQC_PHY_FEC_25G_KR_REQ;
3177 /* Clear BASE-R bits, and AND RS ability
3178 * bits and OR request bits.
3180 cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
3181 cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
3182 ICE_AQC_PHY_FEC_25G_RS_544_REQ;
3185 /* Clear all FEC option bits. */
3186 cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
3189 /* AND auto FEC bit, and all caps bits. */
3190 cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
3191 cfg->link_fec_opt |= pcaps->link_fec_options;
3194 status = ICE_ERR_PARAM;
3198 if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw)) {
3199 struct ice_link_default_override_tlv tlv;
3201 if (ice_get_link_default_override(&tlv, pi))
3204 if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
3205 (tlv.options & ICE_LINK_OVERRIDE_EN))
3206 cfg->link_fec_opt = tlv.fec_options;
3210 ice_free(hw, pcaps);
3216 * ice_get_link_status - get status of the HW network link
3217 * @pi: port information structure
3218 * @link_up: pointer to bool (true/false = linkup/linkdown)
3220 * Variable link_up is true if link is up, false if link is down.
3221 * The variable link_up is invalid if status is non zero. As a
3222 * result of this call, link status reporting becomes enabled
3224 enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
3226 struct ice_phy_info *phy_info;
3227 enum ice_status status = ICE_SUCCESS;
3229 if (!pi || !link_up)
3230 return ICE_ERR_PARAM;
3232 phy_info = &pi->phy;
3234 if (phy_info->get_link_info) {
3235 status = ice_update_link_info(pi);
3238 ice_debug(pi->hw, ICE_DBG_LINK, "get link status error, status = %d\n",
3242 *link_up = phy_info->link_info.link_info & ICE_AQ_LINK_UP;
3248 * ice_aq_set_link_restart_an
3249 * @pi: pointer to the port information structure
3250 * @ena_link: if true: enable link, if false: disable link
3251 * @cd: pointer to command details structure or NULL
3253 * Sets up the link and restarts the Auto-Negotiation over the link.
3256 ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
3257 struct ice_sq_cd *cd)
3259 struct ice_aqc_restart_an *cmd;
3260 struct ice_aq_desc desc;
3262 cmd = &desc.params.restart_an;
3264 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_restart_an);
3266 cmd->cmd_flags = ICE_AQC_RESTART_AN_LINK_RESTART;
3267 cmd->lport_num = pi->lport;
3269 cmd->cmd_flags |= ICE_AQC_RESTART_AN_LINK_ENABLE;
3271 cmd->cmd_flags &= ~ICE_AQC_RESTART_AN_LINK_ENABLE;
3273 return ice_aq_send_cmd(pi->hw, &desc, NULL, 0, cd);
3277 * ice_aq_set_event_mask
3278 * @hw: pointer to the HW struct
3279 * @port_num: port number of the physical function
3280 * @mask: event mask to be set
3281 * @cd: pointer to command details structure or NULL
3283 * Set event mask (0x0613)
3286 ice_aq_set_event_mask(struct ice_hw *hw, u8 port_num, u16 mask,
3287 struct ice_sq_cd *cd)
3289 struct ice_aqc_set_event_mask *cmd;
3290 struct ice_aq_desc desc;
3292 cmd = &desc.params.set_event_mask;
3294 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_event_mask);
3296 cmd->lport_num = port_num;
3298 cmd->event_mask = CPU_TO_LE16(mask);
3299 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3303 * ice_aq_set_mac_loopback
3304 * @hw: pointer to the HW struct
3305 * @ena_lpbk: Enable or Disable loopback
3306 * @cd: pointer to command details structure or NULL
3308 * Enable/disable loopback on a given port
3311 ice_aq_set_mac_loopback(struct ice_hw *hw, bool ena_lpbk, struct ice_sq_cd *cd)
3313 struct ice_aqc_set_mac_lb *cmd;
3314 struct ice_aq_desc desc;
3316 cmd = &desc.params.set_mac_lb;
3318 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_mac_lb);
3320 cmd->lb_mode = ICE_AQ_MAC_LB_EN;
3322 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3326 * ice_aq_set_port_id_led
3327 * @pi: pointer to the port information
3328 * @is_orig_mode: is this LED set to original mode (by the net-list)
3329 * @cd: pointer to command details structure or NULL
3331 * Set LED value for the given port (0x06e9)
3334 ice_aq_set_port_id_led(struct ice_port_info *pi, bool is_orig_mode,
3335 struct ice_sq_cd *cd)
3337 struct ice_aqc_set_port_id_led *cmd;
3338 struct ice_hw *hw = pi->hw;
3339 struct ice_aq_desc desc;
3341 cmd = &desc.params.set_port_id_led;
3343 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_port_id_led);
3346 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_ORIG;
3348 cmd->ident_mode = ICE_AQC_PORT_IDENT_LED_BLINK;
3350 return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
3355 * @hw: pointer to the HW struct
3356 * @lport: bits [7:0] = logical port, bit [8] = logical port valid
3357 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
3358 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
3360 * @set_page: set or ignore the page
3361 * @data: pointer to data buffer to be read/written to the I2C device.
3362 * @length: 1-16 for read, 1 for write.
3363 * @write: 0 read, 1 for write.
3364 * @cd: pointer to command details structure or NULL
3366 * Read/Write SFF EEPROM (0x06EE)
3369 ice_aq_sff_eeprom(struct ice_hw *hw, u16 lport, u8 bus_addr,
3370 u16 mem_addr, u8 page, u8 set_page, u8 *data, u8 length,
3371 bool write, struct ice_sq_cd *cd)
3373 struct ice_aqc_sff_eeprom *cmd;
3374 struct ice_aq_desc desc;
3375 enum ice_status status;
3377 if (!data || (mem_addr & 0xff00))
3378 return ICE_ERR_PARAM;
3380 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_sff_eeprom);
3381 cmd = &desc.params.read_write_sff_param;
3382 desc.flags = CPU_TO_LE16(ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF);
3383 cmd->lport_num = (u8)(lport & 0xff);
3384 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
3385 cmd->i2c_bus_addr = CPU_TO_LE16(((bus_addr >> 1) &
3386 ICE_AQC_SFF_I2CBUS_7BIT_M) |
3388 ICE_AQC_SFF_SET_EEPROM_PAGE_S) &
3389 ICE_AQC_SFF_SET_EEPROM_PAGE_M));
3390 cmd->i2c_mem_addr = CPU_TO_LE16(mem_addr & 0xff);
3391 cmd->eeprom_page = CPU_TO_LE16((u16)page << ICE_AQC_SFF_EEPROM_PAGE_S);
3393 cmd->i2c_bus_addr |= CPU_TO_LE16(ICE_AQC_SFF_IS_WRITE);
3395 status = ice_aq_send_cmd(hw, &desc, data, length, cd);
3400 * __ice_aq_get_set_rss_lut
3401 * @hw: pointer to the hardware structure
3402 * @vsi_id: VSI FW index
3403 * @lut_type: LUT table type
3404 * @lut: pointer to the LUT buffer provided by the caller
3405 * @lut_size: size of the LUT buffer
3406 * @glob_lut_idx: global LUT index
3407 * @set: set true to set the table, false to get the table
3409 * Internal function to get (0x0B05) or set (0x0B03) RSS look up table
3411 static enum ice_status
3412 __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
3413 u16 lut_size, u8 glob_lut_idx, bool set)
3415 struct ice_aqc_get_set_rss_lut *cmd_resp;
3416 struct ice_aq_desc desc;
3417 enum ice_status status;
3420 cmd_resp = &desc.params.get_set_rss_lut;
3423 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_lut);
3424 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3426 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_lut);
3429 cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
3430 ICE_AQC_GSET_RSS_LUT_VSI_ID_S) &
3431 ICE_AQC_GSET_RSS_LUT_VSI_ID_M) |
3432 ICE_AQC_GSET_RSS_LUT_VSI_VALID);
3435 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_VSI:
3436 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF:
3437 case ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL:
3438 flags |= ((lut_type << ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_S) &
3439 ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_M);
3442 status = ICE_ERR_PARAM;
3443 goto ice_aq_get_set_rss_lut_exit;
3446 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_GLOBAL) {
3447 flags |= ((glob_lut_idx << ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_S) &
3448 ICE_AQC_GSET_RSS_LUT_GLOBAL_IDX_M);
3451 goto ice_aq_get_set_rss_lut_send;
3452 } else if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3454 goto ice_aq_get_set_rss_lut_send;
3456 goto ice_aq_get_set_rss_lut_send;
3459 /* LUT size is only valid for Global and PF table types */
3461 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
3462 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG <<
3463 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3464 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3466 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
3467 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
3468 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3469 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3471 case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
3472 if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
3473 flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
3474 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
3475 ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
3480 status = ICE_ERR_PARAM;
3481 goto ice_aq_get_set_rss_lut_exit;
3484 ice_aq_get_set_rss_lut_send:
3485 cmd_resp->flags = CPU_TO_LE16(flags);
3486 status = ice_aq_send_cmd(hw, &desc, lut, lut_size, NULL);
3488 ice_aq_get_set_rss_lut_exit:
3493 * ice_aq_get_rss_lut
3494 * @hw: pointer to the hardware structure
3495 * @vsi_handle: software VSI handle
3496 * @lut_type: LUT table type
3497 * @lut: pointer to the LUT buffer provided by the caller
3498 * @lut_size: size of the LUT buffer
3500 * get the RSS lookup table, PF or VSI type
3503 ice_aq_get_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
3504 u8 *lut, u16 lut_size)
3506 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3507 return ICE_ERR_PARAM;
3509 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3510 lut_type, lut, lut_size, 0, false);
3514 * ice_aq_set_rss_lut
3515 * @hw: pointer to the hardware structure
3516 * @vsi_handle: software VSI handle
3517 * @lut_type: LUT table type
3518 * @lut: pointer to the LUT buffer provided by the caller
3519 * @lut_size: size of the LUT buffer
3521 * set the RSS lookup table, PF or VSI type
3524 ice_aq_set_rss_lut(struct ice_hw *hw, u16 vsi_handle, u8 lut_type,
3525 u8 *lut, u16 lut_size)
3527 if (!ice_is_vsi_valid(hw, vsi_handle) || !lut)
3528 return ICE_ERR_PARAM;
3530 return __ice_aq_get_set_rss_lut(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3531 lut_type, lut, lut_size, 0, true);
3535 * __ice_aq_get_set_rss_key
3536 * @hw: pointer to the HW struct
3537 * @vsi_id: VSI FW index
3538 * @key: pointer to key info struct
3539 * @set: set true to set the key, false to get the key
3541 * get (0x0B04) or set (0x0B02) the RSS key per VSI
3544 ice_status __ice_aq_get_set_rss_key(struct ice_hw *hw, u16 vsi_id,
3545 struct ice_aqc_get_set_rss_keys *key,
3548 struct ice_aqc_get_set_rss_key *cmd_resp;
3549 u16 key_size = sizeof(*key);
3550 struct ice_aq_desc desc;
3552 cmd_resp = &desc.params.get_set_rss_key;
3555 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_set_rss_key);
3556 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3558 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_rss_key);
3561 cmd_resp->vsi_id = CPU_TO_LE16(((vsi_id <<
3562 ICE_AQC_GSET_RSS_KEY_VSI_ID_S) &
3563 ICE_AQC_GSET_RSS_KEY_VSI_ID_M) |
3564 ICE_AQC_GSET_RSS_KEY_VSI_VALID);
3566 return ice_aq_send_cmd(hw, &desc, key, key_size, NULL);
3570 * ice_aq_get_rss_key
3571 * @hw: pointer to the HW struct
3572 * @vsi_handle: software VSI handle
3573 * @key: pointer to key info struct
3575 * get the RSS key per VSI
3578 ice_aq_get_rss_key(struct ice_hw *hw, u16 vsi_handle,
3579 struct ice_aqc_get_set_rss_keys *key)
3581 if (!ice_is_vsi_valid(hw, vsi_handle) || !key)
3582 return ICE_ERR_PARAM;
3584 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3589 * ice_aq_set_rss_key
3590 * @hw: pointer to the HW struct
3591 * @vsi_handle: software VSI handle
3592 * @keys: pointer to key info struct
3594 * set the RSS key per VSI
3597 ice_aq_set_rss_key(struct ice_hw *hw, u16 vsi_handle,
3598 struct ice_aqc_get_set_rss_keys *keys)
3600 if (!ice_is_vsi_valid(hw, vsi_handle) || !keys)
3601 return ICE_ERR_PARAM;
3603 return __ice_aq_get_set_rss_key(hw, ice_get_hw_vsi_num(hw, vsi_handle),
3608 * ice_aq_add_lan_txq
3609 * @hw: pointer to the hardware structure
3610 * @num_qgrps: Number of added queue groups
3611 * @qg_list: list of queue groups to be added
3612 * @buf_size: size of buffer for indirect command
3613 * @cd: pointer to command details structure or NULL
3615 * Add Tx LAN queue (0x0C30)
3618 * Prior to calling add Tx LAN queue:
3619 * Initialize the following as part of the Tx queue context:
3620 * Completion queue ID if the queue uses Completion queue, Quanta profile,
3621 * Cache profile and Packet shaper profile.
3623 * After add Tx LAN queue AQ command is completed:
3624 * Interrupts should be associated with specific queues,
3625 * Association of Tx queue to Doorbell queue is not part of Add LAN Tx queue
3629 ice_aq_add_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3630 struct ice_aqc_add_tx_qgrp *qg_list, u16 buf_size,
3631 struct ice_sq_cd *cd)
3633 struct ice_aqc_add_tx_qgrp *list;
3634 struct ice_aqc_add_txqs *cmd;
3635 struct ice_aq_desc desc;
3636 u16 i, sum_size = 0;
3638 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3640 cmd = &desc.params.add_txqs;
3642 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_add_txqs);
3645 return ICE_ERR_PARAM;
3647 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3648 return ICE_ERR_PARAM;
3650 for (i = 0, list = qg_list; i < num_qgrps; i++) {
3651 sum_size += ice_struct_size(list, txqs, list->num_txqs);
3652 list = (struct ice_aqc_add_tx_qgrp *)(list->txqs +
3656 if (buf_size != sum_size)
3657 return ICE_ERR_PARAM;
3659 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3661 cmd->num_qgrps = num_qgrps;
3663 return ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3667 * ice_aq_dis_lan_txq
3668 * @hw: pointer to the hardware structure
3669 * @num_qgrps: number of groups in the list
3670 * @qg_list: the list of groups to disable
3671 * @buf_size: the total size of the qg_list buffer in bytes
3672 * @rst_src: if called due to reset, specifies the reset source
3673 * @vmvf_num: the relative VM or VF number that is undergoing the reset
3674 * @cd: pointer to command details structure or NULL
3676 * Disable LAN Tx queue (0x0C31)
3678 static enum ice_status
3679 ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
3680 struct ice_aqc_dis_txq_item *qg_list, u16 buf_size,
3681 enum ice_disq_rst_src rst_src, u16 vmvf_num,
3682 struct ice_sq_cd *cd)
3684 struct ice_aqc_dis_txq_item *item;
3685 struct ice_aqc_dis_txqs *cmd;
3686 struct ice_aq_desc desc;
3687 enum ice_status status;
3690 ice_debug(hw, ICE_DBG_TRACE, "%s\n", __func__);
3691 cmd = &desc.params.dis_txqs;
3692 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dis_txqs);
3694 /* qg_list can be NULL only in VM/VF reset flow */
3695 if (!qg_list && !rst_src)
3696 return ICE_ERR_PARAM;
3698 if (num_qgrps > ICE_LAN_TXQ_MAX_QGRPS)
3699 return ICE_ERR_PARAM;
3701 cmd->num_entries = num_qgrps;
3703 cmd->vmvf_and_timeout = CPU_TO_LE16((5 << ICE_AQC_Q_DIS_TIMEOUT_S) &
3704 ICE_AQC_Q_DIS_TIMEOUT_M);
3708 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VM_RESET;
3709 cmd->vmvf_and_timeout |=
3710 CPU_TO_LE16(vmvf_num & ICE_AQC_Q_DIS_VMVF_NUM_M);
3713 cmd->cmd_type = ICE_AQC_Q_DIS_CMD_VF_RESET;
3714 /* In this case, FW expects vmvf_num to be absolute VF ID */
3715 cmd->vmvf_and_timeout |=
3716 CPU_TO_LE16((vmvf_num + hw->func_caps.vf_base_id) &
3717 ICE_AQC_Q_DIS_VMVF_NUM_M);
3724 /* flush pipe on time out */
3725 cmd->cmd_type |= ICE_AQC_Q_DIS_CMD_FLUSH_PIPE;
3726 /* If no queue group info, we are in a reset flow. Issue the AQ */
3730 /* set RD bit to indicate that command buffer is provided by the driver
3731 * and it needs to be read by the firmware
3733 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3735 for (i = 0, item = qg_list; i < num_qgrps; i++) {
3736 u16 item_size = ice_struct_size(item, q_id, item->num_qs);
3738 /* If the num of queues is even, add 2 bytes of padding */
3739 if ((item->num_qs % 2) == 0)
3744 item = (struct ice_aqc_dis_txq_item *)((u8 *)item + item_size);
3748 return ICE_ERR_PARAM;
3751 status = ice_aq_send_cmd(hw, &desc, qg_list, buf_size, cd);
3754 ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
3755 vmvf_num, hw->adminq.sq_last_status);
3757 ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
3758 LE16_TO_CPU(qg_list[0].q_id[0]),
3759 hw->adminq.sq_last_status);
3765 * ice_aq_move_recfg_lan_txq
3766 * @hw: pointer to the hardware structure
3767 * @num_qs: number of queues to move/reconfigure
3768 * @is_move: true if this operation involves node movement
3769 * @is_tc_change: true if this operation involves a TC change
3770 * @subseq_call: true if this operation is a subsequent call
3771 * @flush_pipe: on timeout, true to flush pipe, false to return EAGAIN
3772 * @timeout: timeout in units of 100 usec (valid values 0-50)
3773 * @blocked_cgds: out param, bitmap of CGDs that timed out if returning EAGAIN
3774 * @buf: struct containing src/dest TEID and per-queue info
3775 * @buf_size: size of buffer for indirect command
3776 * @txqs_moved: out param, number of queues successfully moved
3777 * @cd: pointer to command details structure or NULL
3779 * Move / Reconfigure Tx LAN queues (0x0C32)
3782 ice_aq_move_recfg_lan_txq(struct ice_hw *hw, u8 num_qs, bool is_move,
3783 bool is_tc_change, bool subseq_call, bool flush_pipe,
3784 u8 timeout, u32 *blocked_cgds,
3785 struct ice_aqc_move_txqs_data *buf, u16 buf_size,
3786 u8 *txqs_moved, struct ice_sq_cd *cd)
3788 struct ice_aqc_move_txqs *cmd;
3789 struct ice_aq_desc desc;
3790 enum ice_status status;
3792 cmd = &desc.params.move_txqs;
3793 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_move_recfg_txqs);
3795 #define ICE_LAN_TXQ_MOVE_TIMEOUT_MAX 50
3796 if (timeout > ICE_LAN_TXQ_MOVE_TIMEOUT_MAX)
3797 return ICE_ERR_PARAM;
3799 if (is_tc_change && !flush_pipe && !blocked_cgds)
3800 return ICE_ERR_PARAM;
3802 if (!is_move && !is_tc_change)
3803 return ICE_ERR_PARAM;
3805 desc.flags |= CPU_TO_LE16(ICE_AQ_FLAG_RD);
3808 cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_MOVE;
3811 cmd->cmd_type |= ICE_AQC_Q_CMD_TYPE_TC_CHANGE;
3814 cmd->cmd_type |= ICE_AQC_Q_CMD_SUBSEQ_CALL;
3817 cmd->cmd_type |= ICE_AQC_Q_CMD_FLUSH_PIPE;
3819 cmd->num_qs = num_qs;
3820 cmd->timeout = ((timeout << ICE_AQC_Q_CMD_TIMEOUT_S) &
3821 ICE_AQC_Q_CMD_TIMEOUT_M);
3823 status = ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
3825 if (!status && txqs_moved)
3826 *txqs_moved = cmd->num_qs;
3828 if (hw->adminq.sq_last_status == ICE_AQ_RC_EAGAIN &&
3829 is_tc_change && !flush_pipe)
3830 *blocked_cgds = LE32_TO_CPU(cmd->blocked_cgds);
3835 /* End of FW Admin Queue command wrappers */
3838 * ice_write_byte - write a byte to a packed context structure
3839 * @src_ctx: the context structure to read from
3840 * @dest_ctx: the context to be written to
3841 * @ce_info: a description of the struct to be filled
3844 ice_write_byte(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3846 u8 src_byte, dest_byte, mask;
3850 /* copy from the next struct field */
3851 from = src_ctx + ce_info->offset;
3853 /* prepare the bits and mask */
3854 shift_width = ce_info->lsb % 8;
3855 mask = (u8)(BIT(ce_info->width) - 1);
3860 /* shift to correct alignment */
3861 mask <<= shift_width;
3862 src_byte <<= shift_width;
3864 /* get the current bits from the target bit string */
3865 dest = dest_ctx + (ce_info->lsb / 8);
3867 ice_memcpy(&dest_byte, dest, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
3869 dest_byte &= ~mask; /* get the bits not changing */
3870 dest_byte |= src_byte; /* add in the new bits */
3872 /* put it all back */
3873 ice_memcpy(dest, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
3877 * ice_write_word - write a word to a packed context structure
3878 * @src_ctx: the context structure to read from
3879 * @dest_ctx: the context to be written to
3880 * @ce_info: a description of the struct to be filled
3883 ice_write_word(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3890 /* copy from the next struct field */
3891 from = src_ctx + ce_info->offset;
3893 /* prepare the bits and mask */
3894 shift_width = ce_info->lsb % 8;
3895 mask = BIT(ce_info->width) - 1;
3897 /* don't swizzle the bits until after the mask because the mask bits
3898 * will be in a different bit position on big endian machines
3900 src_word = *(u16 *)from;
3903 /* shift to correct alignment */
3904 mask <<= shift_width;
3905 src_word <<= shift_width;
3907 /* get the current bits from the target bit string */
3908 dest = dest_ctx + (ce_info->lsb / 8);
3910 ice_memcpy(&dest_word, dest, sizeof(dest_word), ICE_DMA_TO_NONDMA);
3912 dest_word &= ~(CPU_TO_LE16(mask)); /* get the bits not changing */
3913 dest_word |= CPU_TO_LE16(src_word); /* add in the new bits */
3915 /* put it all back */
3916 ice_memcpy(dest, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
3920 * ice_write_dword - write a dword to a packed context structure
3921 * @src_ctx: the context structure to read from
3922 * @dest_ctx: the context to be written to
3923 * @ce_info: a description of the struct to be filled
3926 ice_write_dword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3928 u32 src_dword, mask;
3933 /* copy from the next struct field */
3934 from = src_ctx + ce_info->offset;
3936 /* prepare the bits and mask */
3937 shift_width = ce_info->lsb % 8;
3939 /* if the field width is exactly 32 on an x86 machine, then the shift
3940 * operation will not work because the SHL instructions count is masked
3941 * to 5 bits so the shift will do nothing
3943 if (ce_info->width < 32)
3944 mask = BIT(ce_info->width) - 1;
3948 /* don't swizzle the bits until after the mask because the mask bits
3949 * will be in a different bit position on big endian machines
3951 src_dword = *(u32 *)from;
3954 /* shift to correct alignment */
3955 mask <<= shift_width;
3956 src_dword <<= shift_width;
3958 /* get the current bits from the target bit string */
3959 dest = dest_ctx + (ce_info->lsb / 8);
3961 ice_memcpy(&dest_dword, dest, sizeof(dest_dword), ICE_DMA_TO_NONDMA);
3963 dest_dword &= ~(CPU_TO_LE32(mask)); /* get the bits not changing */
3964 dest_dword |= CPU_TO_LE32(src_dword); /* add in the new bits */
3966 /* put it all back */
3967 ice_memcpy(dest, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
3971 * ice_write_qword - write a qword to a packed context structure
3972 * @src_ctx: the context structure to read from
3973 * @dest_ctx: the context to be written to
3974 * @ce_info: a description of the struct to be filled
3977 ice_write_qword(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
3979 u64 src_qword, mask;
3984 /* copy from the next struct field */
3985 from = src_ctx + ce_info->offset;
3987 /* prepare the bits and mask */
3988 shift_width = ce_info->lsb % 8;
3990 /* if the field width is exactly 64 on an x86 machine, then the shift
3991 * operation will not work because the SHL instructions count is masked
3992 * to 6 bits so the shift will do nothing
3994 if (ce_info->width < 64)
3995 mask = BIT_ULL(ce_info->width) - 1;
3999 /* don't swizzle the bits until after the mask because the mask bits
4000 * will be in a different bit position on big endian machines
4002 src_qword = *(u64 *)from;
4005 /* shift to correct alignment */
4006 mask <<= shift_width;
4007 src_qword <<= shift_width;
4009 /* get the current bits from the target bit string */
4010 dest = dest_ctx + (ce_info->lsb / 8);
4012 ice_memcpy(&dest_qword, dest, sizeof(dest_qword), ICE_DMA_TO_NONDMA);
4014 dest_qword &= ~(CPU_TO_LE64(mask)); /* get the bits not changing */
4015 dest_qword |= CPU_TO_LE64(src_qword); /* add in the new bits */
4017 /* put it all back */
4018 ice_memcpy(dest, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
4022 * ice_set_ctx - set context bits in packed structure
4023 * @hw: pointer to the hardware structure
4024 * @src_ctx: pointer to a generic non-packed context structure
4025 * @dest_ctx: pointer to memory for the packed structure
4026 * @ce_info: a description of the structure to be transformed
4029 ice_set_ctx(struct ice_hw *hw, u8 *src_ctx, u8 *dest_ctx,
4030 const struct ice_ctx_ele *ce_info)
4034 for (f = 0; ce_info[f].width; f++) {
4035 /* We have to deal with each element of the FW response
4036 * using the correct size so that we are correct regardless
4037 * of the endianness of the machine.
4039 if (ce_info[f].width > (ce_info[f].size_of * BITS_PER_BYTE)) {
4040 ice_debug(hw, ICE_DBG_QCTX, "Field %d width of %d bits larger than size of %d byte(s) ... skipping write\n",
4041 f, ce_info[f].width, ce_info[f].size_of);
4044 switch (ce_info[f].size_of) {
4046 ice_write_byte(src_ctx, dest_ctx, &ce_info[f]);
4049 ice_write_word(src_ctx, dest_ctx, &ce_info[f]);
4052 ice_write_dword(src_ctx, dest_ctx, &ce_info[f]);
4055 ice_write_qword(src_ctx, dest_ctx, &ce_info[f]);
4058 return ICE_ERR_INVAL_SIZE;
4066 * ice_read_byte - read context byte into struct
4067 * @src_ctx: the context structure to read from
4068 * @dest_ctx: the context to be written to
4069 * @ce_info: a description of the struct to be filled
4072 ice_read_byte(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4078 /* prepare the bits and mask */
4079 shift_width = ce_info->lsb % 8;
4080 mask = (u8)(BIT(ce_info->width) - 1);
4082 /* shift to correct alignment */
4083 mask <<= shift_width;
4085 /* get the current bits from the src bit string */
4086 src = src_ctx + (ce_info->lsb / 8);
4088 ice_memcpy(&dest_byte, src, sizeof(dest_byte), ICE_DMA_TO_NONDMA);
4090 dest_byte &= ~(mask);
4092 dest_byte >>= shift_width;
4094 /* get the address from the struct field */
4095 target = dest_ctx + ce_info->offset;
4097 /* put it back in the struct */
4098 ice_memcpy(target, &dest_byte, sizeof(dest_byte), ICE_NONDMA_TO_DMA);
4102 * ice_read_word - read context word into struct
4103 * @src_ctx: the context structure to read from
4104 * @dest_ctx: the context to be written to
4105 * @ce_info: a description of the struct to be filled
4108 ice_read_word(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4110 u16 dest_word, mask;
4115 /* prepare the bits and mask */
4116 shift_width = ce_info->lsb % 8;
4117 mask = BIT(ce_info->width) - 1;
4119 /* shift to correct alignment */
4120 mask <<= shift_width;
4122 /* get the current bits from the src bit string */
4123 src = src_ctx + (ce_info->lsb / 8);
4125 ice_memcpy(&src_word, src, sizeof(src_word), ICE_DMA_TO_NONDMA);
4127 /* the data in the memory is stored as little endian so mask it
4130 src_word &= ~(CPU_TO_LE16(mask));
4132 /* get the data back into host order before shifting */
4133 dest_word = LE16_TO_CPU(src_word);
4135 dest_word >>= shift_width;
4137 /* get the address from the struct field */
4138 target = dest_ctx + ce_info->offset;
4140 /* put it back in the struct */
4141 ice_memcpy(target, &dest_word, sizeof(dest_word), ICE_NONDMA_TO_DMA);
4145 * ice_read_dword - read context dword into struct
4146 * @src_ctx: the context structure to read from
4147 * @dest_ctx: the context to be written to
4148 * @ce_info: a description of the struct to be filled
4151 ice_read_dword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4153 u32 dest_dword, mask;
4158 /* prepare the bits and mask */
4159 shift_width = ce_info->lsb % 8;
4161 /* if the field width is exactly 32 on an x86 machine, then the shift
4162 * operation will not work because the SHL instructions count is masked
4163 * to 5 bits so the shift will do nothing
4165 if (ce_info->width < 32)
4166 mask = BIT(ce_info->width) - 1;
4170 /* shift to correct alignment */
4171 mask <<= shift_width;
4173 /* get the current bits from the src bit string */
4174 src = src_ctx + (ce_info->lsb / 8);
4176 ice_memcpy(&src_dword, src, sizeof(src_dword), ICE_DMA_TO_NONDMA);
4178 /* the data in the memory is stored as little endian so mask it
4181 src_dword &= ~(CPU_TO_LE32(mask));
4183 /* get the data back into host order before shifting */
4184 dest_dword = LE32_TO_CPU(src_dword);
4186 dest_dword >>= shift_width;
4188 /* get the address from the struct field */
4189 target = dest_ctx + ce_info->offset;
4191 /* put it back in the struct */
4192 ice_memcpy(target, &dest_dword, sizeof(dest_dword), ICE_NONDMA_TO_DMA);
4196 * ice_read_qword - read context qword into struct
4197 * @src_ctx: the context structure to read from
4198 * @dest_ctx: the context to be written to
4199 * @ce_info: a description of the struct to be filled
4202 ice_read_qword(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4204 u64 dest_qword, mask;
4209 /* prepare the bits and mask */
4210 shift_width = ce_info->lsb % 8;
4212 /* if the field width is exactly 64 on an x86 machine, then the shift
4213 * operation will not work because the SHL instructions count is masked
4214 * to 6 bits so the shift will do nothing
4216 if (ce_info->width < 64)
4217 mask = BIT_ULL(ce_info->width) - 1;
4221 /* shift to correct alignment */
4222 mask <<= shift_width;
4224 /* get the current bits from the src bit string */
4225 src = src_ctx + (ce_info->lsb / 8);
4227 ice_memcpy(&src_qword, src, sizeof(src_qword), ICE_DMA_TO_NONDMA);
4229 /* the data in the memory is stored as little endian so mask it
4232 src_qword &= ~(CPU_TO_LE64(mask));
4234 /* get the data back into host order before shifting */
4235 dest_qword = LE64_TO_CPU(src_qword);
4237 dest_qword >>= shift_width;
4239 /* get the address from the struct field */
4240 target = dest_ctx + ce_info->offset;
4242 /* put it back in the struct */
4243 ice_memcpy(target, &dest_qword, sizeof(dest_qword), ICE_NONDMA_TO_DMA);
4247 * ice_get_ctx - extract context bits from a packed structure
4248 * @src_ctx: pointer to a generic packed context structure
4249 * @dest_ctx: pointer to a generic non-packed context structure
4250 * @ce_info: a description of the structure to be read from
4253 ice_get_ctx(u8 *src_ctx, u8 *dest_ctx, struct ice_ctx_ele *ce_info)
4257 for (f = 0; ce_info[f].width; f++) {
4258 switch (ce_info[f].size_of) {
4260 ice_read_byte(src_ctx, dest_ctx, &ce_info[f]);
4263 ice_read_word(src_ctx, dest_ctx, &ce_info[f]);
4266 ice_read_dword(src_ctx, dest_ctx, &ce_info[f]);
4269 ice_read_qword(src_ctx, dest_ctx, &ce_info[f]);
4272 /* nothing to do, just keep going */
4281 * ice_get_lan_q_ctx - get the LAN queue context for the given VSI and TC
4282 * @hw: pointer to the HW struct
4283 * @vsi_handle: software VSI handle
4285 * @q_handle: software queue handle
4288 ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
4290 struct ice_vsi_ctx *vsi;
4291 struct ice_q_ctx *q_ctx;
4293 vsi = ice_get_vsi_ctx(hw, vsi_handle);
4296 if (q_handle >= vsi->num_lan_q_entries[tc])
4298 if (!vsi->lan_q_ctx[tc])
4300 q_ctx = vsi->lan_q_ctx[tc];
4301 return &q_ctx[q_handle];
4306 * @pi: port information structure
4307 * @vsi_handle: software VSI handle
4309 * @q_handle: software queue handle
4310 * @num_qgrps: Number of added queue groups
4311 * @buf: list of queue groups to be added
4312 * @buf_size: size of buffer for indirect command
4313 * @cd: pointer to command details structure or NULL
4315 * This function adds one LAN queue
4318 ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
4319 u8 num_qgrps, struct ice_aqc_add_tx_qgrp *buf, u16 buf_size,
4320 struct ice_sq_cd *cd)
4322 struct ice_aqc_txsched_elem_data node = { 0 };
4323 struct ice_sched_node *parent;
4324 struct ice_q_ctx *q_ctx;
4325 enum ice_status status;
4328 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4331 if (num_qgrps > 1 || buf->num_txqs > 1)
4332 return ICE_ERR_MAX_LIMIT;
4336 if (!ice_is_vsi_valid(hw, vsi_handle))
4337 return ICE_ERR_PARAM;
4339 ice_acquire_lock(&pi->sched_lock);
4341 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handle);
4343 ice_debug(hw, ICE_DBG_SCHED, "Enaq: invalid queue handle %d\n",
4345 status = ICE_ERR_PARAM;
4349 /* find a parent node */
4350 parent = ice_sched_get_free_qparent(pi, vsi_handle, tc,
4351 ICE_SCHED_NODE_OWNER_LAN);
4353 status = ICE_ERR_PARAM;
4357 buf->parent_teid = parent->info.node_teid;
4358 node.parent_teid = parent->info.node_teid;
4359 /* Mark that the values in the "generic" section as valid. The default
4360 * value in the "generic" section is zero. This means that :
4361 * - Scheduling mode is Bytes Per Second (BPS), indicated by Bit 0.
4362 * - 0 priority among siblings, indicated by Bit 1-3.
4363 * - WFQ, indicated by Bit 4.
4364 * - 0 Adjustment value is used in PSM credit update flow, indicated by
4366 * - Bit 7 is reserved.
4367 * Without setting the generic section as valid in valid_sections, the
4368 * Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
4370 buf->txqs[0].info.valid_sections =
4371 ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
4372 ICE_AQC_ELEM_VALID_EIR;
4373 buf->txqs[0].info.generic = 0;
4374 buf->txqs[0].info.cir_bw.bw_profile_idx =
4375 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
4376 buf->txqs[0].info.cir_bw.bw_alloc =
4377 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
4378 buf->txqs[0].info.eir_bw.bw_profile_idx =
4379 CPU_TO_LE16(ICE_SCHED_DFLT_RL_PROF_ID);
4380 buf->txqs[0].info.eir_bw.bw_alloc =
4381 CPU_TO_LE16(ICE_SCHED_DFLT_BW_WT);
4383 /* add the LAN queue */
4384 status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
4385 if (status != ICE_SUCCESS) {
4386 ice_debug(hw, ICE_DBG_SCHED, "enable queue %d failed %d\n",
4387 LE16_TO_CPU(buf->txqs[0].txq_id),
4388 hw->adminq.sq_last_status);
4392 node.node_teid = buf->txqs[0].q_teid;
4393 node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
4394 q_ctx->q_handle = q_handle;
4395 q_ctx->q_teid = LE32_TO_CPU(node.node_teid);
4397 /* add a leaf node into scheduler tree queue layer */
4398 status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
4400 status = ice_sched_replay_q_bw(pi, q_ctx);
4403 ice_release_lock(&pi->sched_lock);
4409 * @pi: port information structure
4410 * @vsi_handle: software VSI handle
4412 * @num_queues: number of queues
4413 * @q_handles: pointer to software queue handle array
4414 * @q_ids: pointer to the q_id array
4415 * @q_teids: pointer to queue node teids
4416 * @rst_src: if called due to reset, specifies the reset source
4417 * @vmvf_num: the relative VM or VF number that is undergoing the reset
4418 * @cd: pointer to command details structure or NULL
4420 * This function removes queues and their corresponding nodes in SW DB
4423 ice_dis_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u8 num_queues,
4424 u16 *q_handles, u16 *q_ids, u32 *q_teids,
4425 enum ice_disq_rst_src rst_src, u16 vmvf_num,
4426 struct ice_sq_cd *cd)
4428 enum ice_status status = ICE_ERR_DOES_NOT_EXIST;
4429 struct ice_aqc_dis_txq_item *qg_list;
4430 struct ice_q_ctx *q_ctx;
4434 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4440 /* if queue is disabled already yet the disable queue command
4441 * has to be sent to complete the VF reset, then call
4442 * ice_aq_dis_lan_txq without any queue information
4445 return ice_aq_dis_lan_txq(hw, 0, NULL, 0, rst_src,
4450 buf_size = ice_struct_size(qg_list, q_id, 1);
4451 qg_list = (struct ice_aqc_dis_txq_item *)ice_malloc(hw, buf_size);
4453 return ICE_ERR_NO_MEMORY;
4455 ice_acquire_lock(&pi->sched_lock);
4457 for (i = 0; i < num_queues; i++) {
4458 struct ice_sched_node *node;
4460 node = ice_sched_find_node_by_teid(pi->root, q_teids[i]);
4463 q_ctx = ice_get_lan_q_ctx(hw, vsi_handle, tc, q_handles[i]);
4465 ice_debug(hw, ICE_DBG_SCHED, "invalid queue handle%d\n",
4469 if (q_ctx->q_handle != q_handles[i]) {
4470 ice_debug(hw, ICE_DBG_SCHED, "Err:handles %d %d\n",
4471 q_ctx->q_handle, q_handles[i]);
4474 qg_list->parent_teid = node->info.parent_teid;
4475 qg_list->num_qs = 1;
4476 qg_list->q_id[0] = CPU_TO_LE16(q_ids[i]);
4477 status = ice_aq_dis_lan_txq(hw, 1, qg_list, buf_size, rst_src,
4480 if (status != ICE_SUCCESS)
4482 ice_free_sched_node(pi, node);
4483 q_ctx->q_handle = ICE_INVAL_Q_HANDLE;
4485 ice_release_lock(&pi->sched_lock);
4486 ice_free(hw, qg_list);
4491 * ice_cfg_vsi_qs - configure the new/existing VSI queues
4492 * @pi: port information structure
4493 * @vsi_handle: software VSI handle
4494 * @tc_bitmap: TC bitmap
4495 * @maxqs: max queues array per TC
4496 * @owner: LAN or RDMA
4498 * This function adds/updates the VSI queues per TC.
4500 static enum ice_status
4501 ice_cfg_vsi_qs(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
4502 u16 *maxqs, u8 owner)
4504 enum ice_status status = ICE_SUCCESS;
4507 if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY)
4510 if (!ice_is_vsi_valid(pi->hw, vsi_handle))
4511 return ICE_ERR_PARAM;
4513 ice_acquire_lock(&pi->sched_lock);
4515 ice_for_each_traffic_class(i) {
4516 /* configuration is possible only if TC node is present */
4517 if (!ice_sched_get_tc_node(pi, i))
4520 status = ice_sched_cfg_vsi(pi, vsi_handle, i, maxqs[i], owner,
4521 ice_is_tc_ena(tc_bitmap, i));
4526 ice_release_lock(&pi->sched_lock);
4531 * ice_cfg_vsi_lan - configure VSI LAN queues
4532 * @pi: port information structure
4533 * @vsi_handle: software VSI handle
4534 * @tc_bitmap: TC bitmap
4535 * @max_lanqs: max LAN queues array per TC
4537 * This function adds/updates the VSI LAN queues per TC.
4540 ice_cfg_vsi_lan(struct ice_port_info *pi, u16 vsi_handle, u16 tc_bitmap,
4543 return ice_cfg_vsi_qs(pi, vsi_handle, tc_bitmap, max_lanqs,
4544 ICE_SCHED_NODE_OWNER_LAN);
4548 * ice_is_main_vsi - checks whether the VSI is main VSI
4549 * @hw: pointer to the HW struct
4550 * @vsi_handle: VSI handle
4552 * Checks whether the VSI is the main VSI (the first PF VSI created on
4555 static bool ice_is_main_vsi(struct ice_hw *hw, u16 vsi_handle)
4557 return vsi_handle == ICE_MAIN_VSI_HANDLE && hw->vsi_ctx[vsi_handle];
4561 * ice_replay_pre_init - replay pre initialization
4562 * @hw: pointer to the HW struct
4563 * @sw: pointer to switch info struct for which function initializes filters
4565 * Initializes required config data for VSI, FD, ACL, and RSS before replay.
4567 static enum ice_status
4568 ice_replay_pre_init(struct ice_hw *hw, struct ice_switch_info *sw)
4570 enum ice_status status;
4573 /* Delete old entries from replay filter list head if there is any */
4574 ice_rm_sw_replay_rule_info(hw, sw);
4575 /* In start of replay, move entries into replay_rules list, it
4576 * will allow adding rules entries back to filt_rules list,
4577 * which is operational list.
4579 for (i = 0; i < ICE_MAX_NUM_RECIPES; i++)
4580 LIST_REPLACE_INIT(&sw->recp_list[i].filt_rules,
4581 &sw->recp_list[i].filt_replay_rules);
4582 ice_sched_replay_agg_vsi_preinit(hw);
4584 status = ice_sched_replay_root_node_bw(hw->port_info);
4588 return ice_sched_replay_tc_node_bw(hw->port_info);
4592 * ice_replay_vsi - replay VSI configuration
4593 * @hw: pointer to the HW struct
4594 * @vsi_handle: driver VSI handle
4596 * Restore all VSI configuration after reset. It is required to call this
4597 * function with main VSI first.
4599 enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle)
4601 struct ice_switch_info *sw = hw->switch_info;
4602 struct ice_port_info *pi = hw->port_info;
4603 enum ice_status status;
4605 if (!ice_is_vsi_valid(hw, vsi_handle))
4606 return ICE_ERR_PARAM;
4608 /* Replay pre-initialization if there is any */
4609 if (ice_is_main_vsi(hw, vsi_handle)) {
4610 status = ice_replay_pre_init(hw, sw);
4614 /* Replay per VSI all RSS configurations */
4615 status = ice_replay_rss_cfg(hw, vsi_handle);
4618 /* Replay per VSI all filters */
4619 status = ice_replay_vsi_all_fltr(hw, pi, vsi_handle);
4621 status = ice_replay_vsi_agg(hw, vsi_handle);
4626 * ice_replay_post - post replay configuration cleanup
4627 * @hw: pointer to the HW struct
4629 * Post replay cleanup.
4631 void ice_replay_post(struct ice_hw *hw)
4633 /* Delete old entries from replay filter list head */
4634 ice_rm_all_sw_replay_rule_info(hw);
4635 ice_sched_replay_agg(hw);
4639 * ice_stat_update40 - read 40 bit stat from the chip and update stat values
4640 * @hw: ptr to the hardware info
4641 * @reg: offset of 64 bit HW register to read from
4642 * @prev_stat_loaded: bool to specify if previous stats are loaded
4643 * @prev_stat: ptr to previous loaded stat value
4644 * @cur_stat: ptr to current stat value
4647 ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4648 u64 *prev_stat, u64 *cur_stat)
4650 u64 new_data = rd64(hw, reg) & (BIT_ULL(40) - 1);
4652 /* device stats are not reset at PFR, they likely will not be zeroed
4653 * when the driver starts. Thus, save the value from the first read
4654 * without adding to the statistic value so that we report stats which
4655 * count up from zero.
4657 if (!prev_stat_loaded) {
4658 *prev_stat = new_data;
4662 /* Calculate the difference between the new and old values, and then
4663 * add it to the software stat value.
4665 if (new_data >= *prev_stat)
4666 *cur_stat += new_data - *prev_stat;
4668 /* to manage the potential roll-over */
4669 *cur_stat += (new_data + BIT_ULL(40)) - *prev_stat;
4671 /* Update the previously stored value to prepare for next read */
4672 *prev_stat = new_data;
4676 * ice_stat_update32 - read 32 bit stat from the chip and update stat values
4677 * @hw: ptr to the hardware info
4678 * @reg: offset of HW register to read from
4679 * @prev_stat_loaded: bool to specify if previous stats are loaded
4680 * @prev_stat: ptr to previous loaded stat value
4681 * @cur_stat: ptr to current stat value
4684 ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
4685 u64 *prev_stat, u64 *cur_stat)
4689 new_data = rd32(hw, reg);
4691 /* device stats are not reset at PFR, they likely will not be zeroed
4692 * when the driver starts. Thus, save the value from the first read
4693 * without adding to the statistic value so that we report stats which
4694 * count up from zero.
4696 if (!prev_stat_loaded) {
4697 *prev_stat = new_data;
4701 /* Calculate the difference between the new and old values, and then
4702 * add it to the software stat value.
4704 if (new_data >= *prev_stat)
4705 *cur_stat += new_data - *prev_stat;
4707 /* to manage the potential roll-over */
4708 *cur_stat += (new_data + BIT_ULL(32)) - *prev_stat;
4710 /* Update the previously stored value to prepare for next read */
4711 *prev_stat = new_data;
4715 * ice_stat_update_repc - read GLV_REPC stats from chip and update stat values
4716 * @hw: ptr to the hardware info
4717 * @vsi_handle: VSI handle
4718 * @prev_stat_loaded: bool to specify if the previous stat values are loaded
4719 * @cur_stats: ptr to current stats structure
4721 * The GLV_REPC statistic register actually tracks two 16bit statistics, and
4722 * thus cannot be read using the normal ice_stat_update32 function.
4724 * Read the GLV_REPC register associated with the given VSI, and update the
4725 * rx_no_desc and rx_error values in the ice_eth_stats structure.
4727 * Because the statistics in GLV_REPC stick at 0xFFFF, the register must be
4728 * cleared each time it's read.
4730 * Note that the GLV_RDPC register also counts the causes that would trigger
4731 * GLV_REPC. However, it does not give the finer grained detail about why the
4732 * packets are being dropped. The GLV_REPC values can be used to distinguish
4733 * whether Rx packets are dropped due to errors or due to no available
4737 ice_stat_update_repc(struct ice_hw *hw, u16 vsi_handle, bool prev_stat_loaded,
4738 struct ice_eth_stats *cur_stats)
4740 u16 vsi_num, no_desc, error_cnt;
4743 if (!ice_is_vsi_valid(hw, vsi_handle))
4746 vsi_num = ice_get_hw_vsi_num(hw, vsi_handle);
4748 /* If we haven't loaded stats yet, just clear the current value */
4749 if (!prev_stat_loaded) {
4750 wr32(hw, GLV_REPC(vsi_num), 0);
4754 repc = rd32(hw, GLV_REPC(vsi_num));
4755 no_desc = (repc & GLV_REPC_NO_DESC_CNT_M) >> GLV_REPC_NO_DESC_CNT_S;
4756 error_cnt = (repc & GLV_REPC_ERROR_CNT_M) >> GLV_REPC_ERROR_CNT_S;
4758 /* Clear the count by writing to the stats register */
4759 wr32(hw, GLV_REPC(vsi_num), 0);
4761 cur_stats->rx_no_desc += no_desc;
4762 cur_stats->rx_errors += error_cnt;
4766 * ice_aq_alternate_write
4767 * @hw: pointer to the hardware structure
4768 * @reg_addr0: address of first dword to be written
4769 * @reg_val0: value to be written under 'reg_addr0'
4770 * @reg_addr1: address of second dword to be written
4771 * @reg_val1: value to be written under 'reg_addr1'
4773 * Write one or two dwords to alternate structure. Fields are indicated
4774 * by 'reg_addr0' and 'reg_addr1' register numbers.
4777 ice_aq_alternate_write(struct ice_hw *hw, u32 reg_addr0, u32 reg_val0,
4778 u32 reg_addr1, u32 reg_val1)
4780 struct ice_aqc_read_write_alt_direct *cmd;
4781 struct ice_aq_desc desc;
4782 enum ice_status status;
4784 cmd = &desc.params.read_write_alt_direct;
4786 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_write_alt_direct);
4787 cmd->dword0_addr = CPU_TO_LE32(reg_addr0);
4788 cmd->dword1_addr = CPU_TO_LE32(reg_addr1);
4789 cmd->dword0_value = CPU_TO_LE32(reg_val0);
4790 cmd->dword1_value = CPU_TO_LE32(reg_val1);
4792 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
4798 * ice_aq_alternate_read
4799 * @hw: pointer to the hardware structure
4800 * @reg_addr0: address of first dword to be read
4801 * @reg_val0: pointer for data read from 'reg_addr0'
4802 * @reg_addr1: address of second dword to be read
4803 * @reg_val1: pointer for data read from 'reg_addr1'
4805 * Read one or two dwords from alternate structure. Fields are indicated
4806 * by 'reg_addr0' and 'reg_addr1' register numbers. If 'reg_val1' pointer
4807 * is not passed then only register at 'reg_addr0' is read.
4810 ice_aq_alternate_read(struct ice_hw *hw, u32 reg_addr0, u32 *reg_val0,
4811 u32 reg_addr1, u32 *reg_val1)
4813 struct ice_aqc_read_write_alt_direct *cmd;
4814 struct ice_aq_desc desc;
4815 enum ice_status status;
4817 cmd = &desc.params.read_write_alt_direct;
4820 return ICE_ERR_PARAM;
4822 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_read_alt_direct);
4823 cmd->dword0_addr = CPU_TO_LE32(reg_addr0);
4824 cmd->dword1_addr = CPU_TO_LE32(reg_addr1);
4826 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
4828 if (status == ICE_SUCCESS) {
4829 *reg_val0 = LE32_TO_CPU(cmd->dword0_value);
4832 *reg_val1 = LE32_TO_CPU(cmd->dword1_value);
4839 * ice_aq_alternate_write_done
4840 * @hw: pointer to the HW structure.
4841 * @bios_mode: indicates whether the command is executed by UEFI or legacy BIOS
4842 * @reset_needed: indicates the SW should trigger GLOBAL reset
4844 * Indicates to the FW that alternate structures have been changed.
4847 ice_aq_alternate_write_done(struct ice_hw *hw, u8 bios_mode, bool *reset_needed)
4849 struct ice_aqc_done_alt_write *cmd;
4850 struct ice_aq_desc desc;
4851 enum ice_status status;
4853 cmd = &desc.params.done_alt_write;
4856 return ICE_ERR_PARAM;
4858 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_done_alt_write);
4859 cmd->flags = bios_mode;
4861 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
4863 *reset_needed = (LE16_TO_CPU(cmd->flags) &
4864 ICE_AQC_RESP_RESET_NEEDED) != 0;
4870 * ice_aq_alternate_clear
4871 * @hw: pointer to the HW structure.
4873 * Clear the alternate structures of the port from which the function
4876 enum ice_status ice_aq_alternate_clear(struct ice_hw *hw)
4878 struct ice_aq_desc desc;
4879 enum ice_status status;
4881 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_clear_port_alt_write);
4883 status = ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
4889 * ice_sched_query_elem - query element information from HW
4890 * @hw: pointer to the HW struct
4891 * @node_teid: node TEID to be queried
4892 * @buf: buffer to element information
4894 * This function queries HW element information
4897 ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
4898 struct ice_aqc_txsched_elem_data *buf)
4900 u16 buf_size, num_elem_ret = 0;
4901 enum ice_status status;
4903 buf_size = sizeof(*buf);
4904 ice_memset(buf, 0, buf_size, ICE_NONDMA_MEM);
4905 buf->node_teid = CPU_TO_LE32(node_teid);
4906 status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
4908 if (status != ICE_SUCCESS || num_elem_ret != 1)
4909 ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
4914 * ice_get_fw_mode - returns FW mode
4915 * @hw: pointer to the HW struct
4917 enum ice_fw_modes ice_get_fw_mode(struct ice_hw *hw)
4919 #define ICE_FW_MODE_DBG_M BIT(0)
4920 #define ICE_FW_MODE_REC_M BIT(1)
4921 #define ICE_FW_MODE_ROLLBACK_M BIT(2)
4924 /* check the current FW mode */
4925 fw_mode = rd32(hw, GL_MNG_FWSM) & GL_MNG_FWSM_FW_MODES_M;
4927 if (fw_mode & ICE_FW_MODE_DBG_M)
4928 return ICE_FW_MODE_DBG;
4929 else if (fw_mode & ICE_FW_MODE_REC_M)
4930 return ICE_FW_MODE_REC;
4931 else if (fw_mode & ICE_FW_MODE_ROLLBACK_M)
4932 return ICE_FW_MODE_ROLLBACK;
4934 return ICE_FW_MODE_NORMAL;
4938 * ice_cfg_get_cur_lldp_persist_status
4939 * @hw: pointer to the HW struct
4940 * @lldp_status: return value of LLDP persistent status
4942 * Get the current status of LLDP persistent
4945 ice_get_cur_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status)
4947 struct ice_port_info *pi = hw->port_info;
4948 enum ice_status ret;
4953 return ICE_ERR_BAD_PTR;
4955 ret = ice_acquire_nvm(hw, ICE_RES_READ);
4959 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_LLDP_PRESERVED_MOD_ID,
4960 ICE_AQC_NVM_CUR_LLDP_PERSIST_RD_OFFSET,
4961 ICE_AQC_NVM_LLDP_STATUS_RD_LEN, &raw_data,
4964 data = LE32_TO_CPU(raw_data);
4965 mask = ICE_AQC_NVM_LLDP_STATUS_M <<
4966 (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
4968 *lldp_status = data >>
4969 (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
4972 ice_release_nvm(hw);
4978 * ice_get_dflt_lldp_persist_status
4979 * @hw: pointer to the HW struct
4980 * @lldp_status: return value of LLDP persistent status
4982 * Get the default status of LLDP persistent
4985 ice_get_dflt_lldp_persist_status(struct ice_hw *hw, u32 *lldp_status)
4987 struct ice_port_info *pi = hw->port_info;
4988 u32 data, mask, loc_data, loc_data_tmp;
4989 enum ice_status ret;
4990 __le16 loc_raw_data;
4994 return ICE_ERR_BAD_PTR;
4996 ret = ice_acquire_nvm(hw, ICE_RES_READ);
5000 /* Read the offset of EMP_SR_PTR */
5001 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT,
5002 ICE_AQC_NVM_EMP_SR_PTR_OFFSET,
5003 ICE_AQC_NVM_EMP_SR_PTR_RD_LEN,
5004 &loc_raw_data, false, true, NULL);
5008 loc_data = LE16_TO_CPU(loc_raw_data);
5009 if (loc_data & ICE_AQC_NVM_EMP_SR_PTR_TYPE_M) {
5010 loc_data &= ICE_AQC_NVM_EMP_SR_PTR_M;
5011 loc_data *= ICE_AQC_NVM_SECTOR_UNIT;
5013 loc_data *= ICE_AQC_NVM_WORD_UNIT;
5016 /* Read the offset of LLDP configuration pointer */
5017 loc_data += ICE_AQC_NVM_LLDP_CFG_PTR_OFFSET;
5018 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, loc_data,
5019 ICE_AQC_NVM_LLDP_CFG_PTR_RD_LEN, &loc_raw_data,
5024 loc_data_tmp = LE16_TO_CPU(loc_raw_data);
5025 loc_data_tmp *= ICE_AQC_NVM_WORD_UNIT;
5026 loc_data += loc_data_tmp;
5028 /* We need to skip LLDP configuration section length (2 bytes) */
5029 loc_data += ICE_AQC_NVM_LLDP_CFG_HEADER_LEN;
5031 /* Read the LLDP Default Configure */
5032 ret = ice_aq_read_nvm(hw, ICE_AQC_NVM_START_POINT, loc_data,
5033 ICE_AQC_NVM_LLDP_STATUS_RD_LEN, &raw_data, false,
5036 data = LE32_TO_CPU(raw_data);
5037 mask = ICE_AQC_NVM_LLDP_STATUS_M <<
5038 (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
5040 *lldp_status = data >>
5041 (ICE_AQC_NVM_LLDP_STATUS_M_LEN * pi->lport);
5045 ice_release_nvm(hw);
5051 * ice_fw_supports_link_override
5052 * @hw: pointer to the hardware structure
5054 * Checks if the firmware supports link override
5056 bool ice_fw_supports_link_override(struct ice_hw *hw)
5058 if (hw->api_maj_ver == ICE_FW_API_LINK_OVERRIDE_MAJ) {
5059 if (hw->api_min_ver > ICE_FW_API_LINK_OVERRIDE_MIN)
5061 if (hw->api_min_ver == ICE_FW_API_LINK_OVERRIDE_MIN &&
5062 hw->api_patch >= ICE_FW_API_LINK_OVERRIDE_PATCH)
5064 } else if (hw->api_maj_ver > ICE_FW_API_LINK_OVERRIDE_MAJ) {
5072 * ice_get_link_default_override
5073 * @ldo: pointer to the link default override struct
5074 * @pi: pointer to the port info struct
5076 * Gets the link default override for a port
5079 ice_get_link_default_override(struct ice_link_default_override_tlv *ldo,
5080 struct ice_port_info *pi)
5082 u16 i, tlv, tlv_len, tlv_start, buf, offset;
5083 struct ice_hw *hw = pi->hw;
5084 enum ice_status status;
5086 status = ice_get_pfa_module_tlv(hw, &tlv, &tlv_len,
5087 ICE_SR_LINK_DEFAULT_OVERRIDE_PTR);
5089 ice_debug(hw, ICE_DBG_INIT, "Failed to read link override TLV.\n");
5093 /* Each port has its own config; calculate for our port */
5094 tlv_start = tlv + pi->lport * ICE_SR_PFA_LINK_OVERRIDE_WORDS +
5095 ICE_SR_PFA_LINK_OVERRIDE_OFFSET;
5097 /* link options first */
5098 status = ice_read_sr_word(hw, tlv_start, &buf);
5100 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
5103 ldo->options = buf & ICE_LINK_OVERRIDE_OPT_M;
5104 ldo->phy_config = (buf & ICE_LINK_OVERRIDE_PHY_CFG_M) >>
5105 ICE_LINK_OVERRIDE_PHY_CFG_S;
5107 /* link PHY config */
5108 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_FEC_OFFSET;
5109 status = ice_read_sr_word(hw, offset, &buf);
5111 ice_debug(hw, ICE_DBG_INIT, "Failed to read override phy config.\n");
5114 ldo->fec_options = buf & ICE_LINK_OVERRIDE_FEC_OPT_M;
5117 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET;
5118 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
5119 status = ice_read_sr_word(hw, (offset + i), &buf);
5121 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
5124 /* shift 16 bits at a time to fill 64 bits */
5125 ldo->phy_type_low |= ((u64)buf << (i * 16));
5128 /* PHY types high */
5129 offset = tlv_start + ICE_SR_PFA_LINK_OVERRIDE_PHY_OFFSET +
5130 ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS;
5131 for (i = 0; i < ICE_SR_PFA_LINK_OVERRIDE_PHY_WORDS; i++) {
5132 status = ice_read_sr_word(hw, (offset + i), &buf);
5134 ice_debug(hw, ICE_DBG_INIT, "Failed to read override link options.\n");
5137 /* shift 16 bits at a time to fill 64 bits */
5138 ldo->phy_type_high |= ((u64)buf << (i * 16));
5145 * ice_is_phy_caps_an_enabled - check if PHY capabilities autoneg is enabled
5146 * @caps: get PHY capability data
5148 bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
5150 if (caps->caps & ICE_AQC_PHY_AN_MODE ||
5151 caps->low_power_ctrl_an & (ICE_AQC_PHY_AN_EN_CLAUSE28 |
5152 ICE_AQC_PHY_AN_EN_CLAUSE73 |
5153 ICE_AQC_PHY_AN_EN_CLAUSE37))
5160 * ice_aq_set_lldp_mib - Set the LLDP MIB
5161 * @hw: pointer to the HW struct
5162 * @mib_type: Local, Remote or both Local and Remote MIBs
5163 * @buf: pointer to the caller-supplied buffer to store the MIB block
5164 * @buf_size: size of the buffer (in bytes)
5165 * @cd: pointer to command details structure or NULL
5167 * Set the LLDP MIB. (0x0A08)
5170 ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
5171 struct ice_sq_cd *cd)
5173 struct ice_aqc_lldp_set_local_mib *cmd;
5174 struct ice_aq_desc desc;
5176 cmd = &desc.params.lldp_set_mib;
5178 if (buf_size == 0 || !buf)
5179 return ICE_ERR_PARAM;
5181 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
5183 desc.flags |= CPU_TO_LE16((u16)ICE_AQ_FLAG_RD);
5184 desc.datalen = CPU_TO_LE16(buf_size);
5186 cmd->type = mib_type;
5187 cmd->length = CPU_TO_LE16(buf_size);
5189 return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
5193 * ice_fw_supports_lldp_fltr - check NVM version supports lldp_fltr_ctrl
5194 * @hw: pointer to HW struct
5196 bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw)
5198 if (hw->mac_type != ICE_MAC_E810)
5201 if (hw->api_maj_ver == ICE_FW_API_LLDP_FLTR_MAJ) {
5202 if (hw->api_min_ver > ICE_FW_API_LLDP_FLTR_MIN)
5204 if (hw->api_min_ver == ICE_FW_API_LLDP_FLTR_MIN &&
5205 hw->api_patch >= ICE_FW_API_LLDP_FLTR_PATCH)
5207 } else if (hw->api_maj_ver > ICE_FW_API_LLDP_FLTR_MAJ) {
5214 * ice_lldp_fltr_add_remove - add or remove a LLDP Rx switch filter
5215 * @hw: pointer to HW struct
5216 * @vsi_num: absolute HW index for VSI
5217 * @add: boolean for if adding or removing a filter
5220 ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
5222 struct ice_aqc_lldp_filter_ctrl *cmd;
5223 struct ice_aq_desc desc;
5225 cmd = &desc.params.lldp_filter_ctrl;
5227 ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_filter_ctrl);
5230 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_ADD;
5232 cmd->cmd_flags = ICE_AQC_LLDP_FILTER_ACTION_DELETE;
5234 cmd->vsi_num = CPU_TO_LE16(vsi_num);
5236 return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);