1 /******************************************************************************
3 Copyright (c) 2013-2018, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
39 #include "ixl_pf_iov.h"
44 #include "ixl_iw_int.h"
47 static u8 ixl_convert_sysctl_aq_link_speed(u8, bool);
48 static void ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool);
49 static const char * ixl_link_speed_string(enum i40e_aq_link_speed);
50 static u_int ixl_add_maddr(void *, struct sockaddr_dl *, u_int);
51 static u_int ixl_match_maddr(void *, struct sockaddr_dl *, u_int);
52 static char * ixl_switch_element_string(struct sbuf *, u8, u16);
53 static enum ixl_fw_mode ixl_get_fw_mode(struct ixl_pf *);
56 static int ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS);
57 static int ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS);
58 static int ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
59 static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
60 static int ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS);
61 static int ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS);
62 static int ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS);
64 static int ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS);
65 static int ixl_sysctl_set_link_active(SYSCTL_HANDLER_ARGS);
68 static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
69 static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
70 static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
71 static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
72 static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
73 static int ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS);
74 static int ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS);
75 static int ixl_sysctl_hena(SYSCTL_HANDLER_ARGS);
76 static int ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS);
77 static int ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS);
78 static int ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS);
79 static int ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS);
80 static int ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS);
81 static int ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS);
82 static int ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS);
83 static int ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS);
84 static int ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS);
85 static int ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS);
86 static int ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS);
87 static int ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS);
90 static int ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS);
91 static int ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS);
92 static int ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS);
93 static int ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS);
95 static int ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
96 static int ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
100 extern int ixl_enable_iwarp;
101 extern int ixl_limit_iwarp_msix;
104 static const char * const ixl_fc_string[6] = {
113 static char *ixl_fec_string[3] = {
115 "CL74 FC-FEC/BASE-R",
119 MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
122 ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string
125 ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf)
127 u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24);
128 u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF);
129 u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF);
132 "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d",
133 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
134 hw->aq.api_maj_ver, hw->aq.api_min_ver,
135 (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
136 IXL_NVM_VERSION_HI_SHIFT,
137 (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
138 IXL_NVM_VERSION_LO_SHIFT,
140 oem_ver, oem_build, oem_patch);
144 ixl_print_nvm_version(struct ixl_pf *pf)
146 struct i40e_hw *hw = &pf->hw;
147 device_t dev = pf->dev;
150 sbuf = sbuf_new_auto();
151 ixl_nvm_version_str(hw, sbuf);
153 device_printf(dev, "%s\n", sbuf_data(sbuf));
158 * ixl_get_fw_mode - Check the state of FW
159 * @hw: device hardware structure
161 * Identify state of FW. It might be in a recovery mode
162 * which limits functionality and requires special handling
165 * @returns FW mode (normal, recovery, unexpected EMP reset)
167 static enum ixl_fw_mode
168 ixl_get_fw_mode(struct ixl_pf *pf)
170 struct i40e_hw *hw = &pf->hw;
171 enum ixl_fw_mode fw_mode = IXL_FW_MODE_NORMAL;
175 if (pf->recovery_mode)
176 return IXL_FW_MODE_RECOVERY;
178 fwsts = rd32(hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK;
180 /* Is set and has one of expected values */
181 if ((fwsts >= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK &&
182 fwsts <= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK) ||
183 fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK ||
184 fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK)
185 fw_mode = IXL_FW_MODE_RECOVERY;
187 if (fwsts > I40E_GL_FWSTS_FWS1B_EMPR_0 &&
188 fwsts <= I40E_GL_FWSTS_FWS1B_EMPR_10)
189 fw_mode = IXL_FW_MODE_UEMPR;
195 * ixl_pf_reset - Reset the PF
198 * Ensure that FW is in the right state and do the reset
201 * @returns zero on success, or an error code on failure.
204 ixl_pf_reset(struct ixl_pf *pf)
206 struct i40e_hw *hw = &pf->hw;
207 enum i40e_status_code status;
208 enum ixl_fw_mode fw_mode;
210 fw_mode = ixl_get_fw_mode(pf);
211 ixl_dbg_info(pf, "%s: before PF reset FW mode: 0x%08x\n", __func__, fw_mode);
212 if (fw_mode == IXL_FW_MODE_RECOVERY) {
213 atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE);
214 /* Don't try to reset device if it's in recovery mode */
218 status = i40e_pf_reset(hw);
219 if (status == I40E_SUCCESS)
222 /* Check FW mode again in case it has changed while
223 * waiting for reset to complete */
224 fw_mode = ixl_get_fw_mode(pf);
225 ixl_dbg_info(pf, "%s: after PF reset FW mode: 0x%08x\n", __func__, fw_mode);
226 if (fw_mode == IXL_FW_MODE_RECOVERY) {
227 atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE);
231 if (fw_mode == IXL_FW_MODE_UEMPR)
232 device_printf(pf->dev,
233 "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n");
235 device_printf(pf->dev, "PF reset failure %s\n",
236 i40e_stat_str(hw, status));
241 * ixl_setup_hmc - Setup LAN Host Memory Cache
244 * Init and configure LAN Host Memory Cache
246 * @returns 0 on success, EIO on error
249 ixl_setup_hmc(struct ixl_pf *pf)
251 struct i40e_hw *hw = &pf->hw;
252 enum i40e_status_code status;
254 status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
255 hw->func_caps.num_rx_qp, 0, 0);
257 device_printf(pf->dev, "init_lan_hmc failed: %s\n",
258 i40e_stat_str(hw, status));
262 status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
264 device_printf(pf->dev, "configure_lan_hmc failed: %s\n",
265 i40e_stat_str(hw, status));
273 * ixl_shutdown_hmc - Shutdown LAN Host Memory Cache
276 * Shutdown Host Memory Cache if configured.
280 ixl_shutdown_hmc(struct ixl_pf *pf)
282 struct i40e_hw *hw = &pf->hw;
283 enum i40e_status_code status;
285 /* HMC not configured, no need to shutdown */
286 if (hw->hmc.hmc_obj == NULL)
289 status = i40e_shutdown_lan_hmc(hw);
291 device_printf(pf->dev,
292 "Shutdown LAN HMC failed with code %s\n",
293 i40e_stat_str(hw, status));
296 * Write PF ITR values to queue ITR registers.
299 ixl_configure_itr(struct ixl_pf *pf)
301 ixl_configure_tx_itr(pf);
302 ixl_configure_rx_itr(pf);
305 /*********************************************************************
307 * Get the hardware capabilities
309 **********************************************************************/
312 ixl_get_hw_capabilities(struct ixl_pf *pf)
314 struct i40e_aqc_list_capabilities_element_resp *buf;
315 struct i40e_hw *hw = &pf->hw;
316 device_t dev = pf->dev;
317 enum i40e_status_code status;
318 int len, i2c_intfc_num;
322 if (IXL_PF_IN_RECOVERY_MODE(pf)) {
323 hw->func_caps.iwarp = 0;
327 len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
329 if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
330 malloc(len, M_IXL, M_NOWAIT | M_ZERO))) {
331 device_printf(dev, "Unable to allocate cap memory\n");
335 /* This populates the hw struct */
336 status = i40e_aq_discover_capabilities(hw, buf, len,
337 &needed, i40e_aqc_opc_list_func_capabilities, NULL);
339 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
341 /* retry once with a larger buffer */
345 } else if (status != I40E_SUCCESS) {
346 device_printf(dev, "capability discovery failed; status %s, error %s\n",
347 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
352 * Some devices have both MDIO and I2C; since this isn't reported
353 * by the FW, check registers to see if an I2C interface exists.
355 i2c_intfc_num = ixl_find_i2c_interface(pf);
356 if (i2c_intfc_num != -1)
359 /* Determine functions to use for driver I2C accesses */
360 switch (pf->i2c_access_method) {
361 case IXL_I2C_ACCESS_METHOD_BEST_AVAILABLE: {
362 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
363 pf->read_i2c_byte = ixl_read_i2c_byte_aq;
364 pf->write_i2c_byte = ixl_write_i2c_byte_aq;
366 pf->read_i2c_byte = ixl_read_i2c_byte_reg;
367 pf->write_i2c_byte = ixl_write_i2c_byte_reg;
371 case IXL_I2C_ACCESS_METHOD_AQ:
372 pf->read_i2c_byte = ixl_read_i2c_byte_aq;
373 pf->write_i2c_byte = ixl_write_i2c_byte_aq;
375 case IXL_I2C_ACCESS_METHOD_REGISTER_I2CCMD:
376 pf->read_i2c_byte = ixl_read_i2c_byte_reg;
377 pf->write_i2c_byte = ixl_write_i2c_byte_reg;
379 case IXL_I2C_ACCESS_METHOD_BIT_BANG_I2CPARAMS:
380 pf->read_i2c_byte = ixl_read_i2c_byte_bb;
381 pf->write_i2c_byte = ixl_write_i2c_byte_bb;
384 /* Should not happen */
385 device_printf(dev, "Error setting I2C access functions\n");
389 /* Keep link active by default */
390 atomic_set_32(&pf->state, IXL_PF_STATE_LINK_ACTIVE_ON_DOWN);
392 /* Print a subset of the capability information. */
394 "PF-ID[%d]: VFs %d, MSI-X %d, VF MSI-X %d, QPs %d, %s\n",
395 hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors,
396 hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp,
397 (hw->func_caps.mdio_port_mode == 2) ? "I2C" :
398 (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" :
399 (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
405 /* For the set_advertise sysctl */
407 ixl_set_initial_advertised_speeds(struct ixl_pf *pf)
409 device_t dev = pf->dev;
412 /* Make sure to initialize the device to the complete list of
413 * supported speeds on driver load, to ensure unloading and
414 * reloading the driver will restore this value.
416 err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true);
418 /* Non-fatal error */
419 device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n",
424 pf->advertised_speed =
425 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
429 ixl_teardown_hw_structs(struct ixl_pf *pf)
431 enum i40e_status_code status = 0;
432 struct i40e_hw *hw = &pf->hw;
433 device_t dev = pf->dev;
435 /* Shutdown LAN HMC */
436 if (hw->hmc.hmc_obj) {
437 status = i40e_shutdown_lan_hmc(hw);
440 "init: LAN HMC shutdown failure; status %s\n",
441 i40e_stat_str(hw, status));
446 /* Shutdown admin queue */
447 ixl_disable_intr0(hw);
448 status = i40e_shutdown_adminq(hw);
451 "init: Admin Queue shutdown failure; status %s\n",
452 i40e_stat_str(hw, status));
454 ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
460 ** Creates new filter with given MAC address and VLAN ID
462 static struct ixl_mac_filter *
463 ixl_new_filter(struct ixl_ftl_head *headp, const u8 *macaddr, s16 vlan)
465 struct ixl_mac_filter *f;
467 /* create a new empty filter */
468 f = malloc(sizeof(struct ixl_mac_filter),
469 M_IXL, M_NOWAIT | M_ZERO);
471 LIST_INSERT_HEAD(headp, f, ftle);
472 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
480 * ixl_free_filters - Free all filters in given list
481 * headp - pointer to list head
483 * Frees memory used by each entry in the list.
484 * Does not remove filters from HW.
487 ixl_free_filters(struct ixl_ftl_head *headp)
489 struct ixl_mac_filter *f, *nf;
491 f = LIST_FIRST(headp);
493 nf = LIST_NEXT(f, ftle);
502 ixl_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
504 struct ixl_add_maddr_arg *ama = arg;
505 struct ixl_vsi *vsi = ama->vsi;
506 const u8 *macaddr = (u8*)LLADDR(sdl);
507 struct ixl_mac_filter *f;
509 /* Does one already exist */
510 f = ixl_find_filter(&vsi->ftl, macaddr, IXL_VLAN_ANY);
514 f = ixl_new_filter(&ama->to_add, macaddr, IXL_VLAN_ANY);
516 device_printf(vsi->dev, "WARNING: no filter available!!\n");
519 f->flags |= IXL_FILTER_MC;
524 /*********************************************************************
527 * Routines for multicast and vlan filter management.
529 *********************************************************************/
531 ixl_add_multi(struct ixl_vsi *vsi)
533 struct ifnet *ifp = vsi->ifp;
534 struct i40e_hw *hw = vsi->hw;
536 struct ixl_add_maddr_arg cb_arg;
538 IOCTL_DEBUGOUT("ixl_add_multi: begin");
540 mcnt = if_llmaddr_count(ifp);
541 if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
542 i40e_aq_set_vsi_multicast_promiscuous(hw,
543 vsi->seid, TRUE, NULL);
544 /* delete all existing MC filters */
545 ixl_del_multi(vsi, true);
550 LIST_INIT(&cb_arg.to_add);
552 mcnt = if_foreach_llmaddr(ifp, ixl_add_maddr, &cb_arg);
554 ixl_add_hw_filters(vsi, &cb_arg.to_add, mcnt);
556 IOCTL_DEBUGOUT("ixl_add_multi: end");
560 ixl_match_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
562 struct ixl_mac_filter *f = arg;
564 if (ixl_ether_is_equal(f->macaddr, (u8 *)LLADDR(sdl)))
571 ixl_del_multi(struct ixl_vsi *vsi, bool all)
573 struct ixl_ftl_head to_del;
574 struct ifnet *ifp = vsi->ifp;
575 struct ixl_mac_filter *f, *fn;
578 IOCTL_DEBUGOUT("ixl_del_multi: begin");
581 /* Search for removed multicast addresses */
582 LIST_FOREACH_SAFE(f, &vsi->ftl, ftle, fn) {
583 if ((f->flags & IXL_FILTER_MC) == 0 ||
584 (!all && (if_foreach_llmaddr(ifp, ixl_match_maddr, f) == 0)))
587 LIST_REMOVE(f, ftle);
588 LIST_INSERT_HEAD(&to_del, f, ftle);
593 ixl_del_hw_filters(vsi, &to_del, mcnt);
597 ixl_link_up_msg(struct ixl_pf *pf)
599 struct i40e_hw *hw = &pf->hw;
600 struct ifnet *ifp = pf->vsi.ifp;
601 char *req_fec_string, *neg_fec_string;
604 fec_abilities = hw->phy.link_info.req_fec_info;
605 /* If both RS and KR are requested, only show RS */
606 if (fec_abilities & I40E_AQ_REQUEST_FEC_RS)
607 req_fec_string = ixl_fec_string[0];
608 else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR)
609 req_fec_string = ixl_fec_string[1];
611 req_fec_string = ixl_fec_string[2];
613 if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA)
614 neg_fec_string = ixl_fec_string[0];
615 else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
616 neg_fec_string = ixl_fec_string[1];
618 neg_fec_string = ixl_fec_string[2];
620 log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
622 ixl_link_speed_string(hw->phy.link_info.link_speed),
623 req_fec_string, neg_fec_string,
624 (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False",
625 (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX &&
626 hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
627 ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ?
628 ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
629 ixl_fc_string[1] : ixl_fc_string[0]);
633 * Configure admin queue/misc interrupt cause registers in hardware.
636 ixl_configure_intr0_msix(struct ixl_pf *pf)
638 struct i40e_hw *hw = &pf->hw;
641 /* First set up the adminq - vector 0 */
642 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
643 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
645 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
646 I40E_PFINT_ICR0_ENA_GRST_MASK |
647 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
648 I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
649 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
650 I40E_PFINT_ICR0_ENA_VFLR_MASK |
651 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
652 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
653 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
656 * 0x7FF is the end of the queue list.
657 * This means we won't use MSI-X vector 0 for a queue interrupt
660 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
661 /* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
662 wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
664 wr32(hw, I40E_PFINT_DYN_CTL0,
665 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
666 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
668 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
672 ixl_add_ifmedia(struct ifmedia *media, u64 phy_types)
674 /* Display supported media types */
675 if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX))
676 ifmedia_add(media, IFM_ETHER | IFM_100_TX, 0, NULL);
678 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T))
679 ifmedia_add(media, IFM_ETHER | IFM_1000_T, 0, NULL);
680 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX))
681 ifmedia_add(media, IFM_ETHER | IFM_1000_SX, 0, NULL);
682 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX))
683 ifmedia_add(media, IFM_ETHER | IFM_1000_LX, 0, NULL);
685 if (phy_types & (I40E_CAP_PHY_TYPE_2_5GBASE_T))
686 ifmedia_add(media, IFM_ETHER | IFM_2500_T, 0, NULL);
688 if (phy_types & (I40E_CAP_PHY_TYPE_5GBASE_T))
689 ifmedia_add(media, IFM_ETHER | IFM_5000_T, 0, NULL);
691 if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) ||
692 phy_types & (I40E_CAP_PHY_TYPE_XFI) ||
693 phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU))
694 ifmedia_add(media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
696 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR))
697 ifmedia_add(media, IFM_ETHER | IFM_10G_SR, 0, NULL);
698 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR))
699 ifmedia_add(media, IFM_ETHER | IFM_10G_LR, 0, NULL);
700 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T))
701 ifmedia_add(media, IFM_ETHER | IFM_10G_T, 0, NULL);
703 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) ||
704 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) ||
705 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) ||
706 phy_types & (I40E_CAP_PHY_TYPE_XLAUI) ||
707 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
708 ifmedia_add(media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
709 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4))
710 ifmedia_add(media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
711 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4))
712 ifmedia_add(media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
714 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX))
715 ifmedia_add(media, IFM_ETHER | IFM_1000_KX, 0, NULL);
717 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU)
718 || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1))
719 ifmedia_add(media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
720 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC))
721 ifmedia_add(media, IFM_ETHER | IFM_10G_AOC, 0, NULL);
722 if (phy_types & (I40E_CAP_PHY_TYPE_SFI))
723 ifmedia_add(media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
724 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4))
725 ifmedia_add(media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
726 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR))
727 ifmedia_add(media, IFM_ETHER | IFM_10G_KR, 0, NULL);
729 if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2))
730 ifmedia_add(media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
732 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
733 ifmedia_add(media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
734 if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI))
735 ifmedia_add(media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
737 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR))
738 ifmedia_add(media, IFM_ETHER | IFM_25G_KR, 0, NULL);
739 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR))
740 ifmedia_add(media, IFM_ETHER | IFM_25G_CR, 0, NULL);
741 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR))
742 ifmedia_add(media, IFM_ETHER | IFM_25G_SR, 0, NULL);
743 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR))
744 ifmedia_add(media, IFM_ETHER | IFM_25G_LR, 0, NULL);
745 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC))
746 ifmedia_add(media, IFM_ETHER | IFM_25G_AOC, 0, NULL);
747 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC))
748 ifmedia_add(media, IFM_ETHER | IFM_25G_ACC, 0, NULL);
751 /*********************************************************************
753 * Get Firmware Switch configuration
754 * - this will need to be more robust when more complex
755 * switch configurations are enabled.
757 **********************************************************************/
759 ixl_switch_config(struct ixl_pf *pf)
761 struct i40e_hw *hw = &pf->hw;
762 struct ixl_vsi *vsi = &pf->vsi;
763 device_t dev = iflib_get_dev(vsi->ctx);
764 struct i40e_aqc_get_switch_config_resp *sw_config;
765 u8 aq_buf[I40E_AQ_LARGE_BUF];
769 memset(&aq_buf, 0, sizeof(aq_buf));
770 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
771 ret = i40e_aq_get_switch_config(hw, sw_config,
772 sizeof(aq_buf), &next, NULL);
774 device_printf(dev, "aq_get_switch_config() failed, error %d,"
775 " aq_error %d\n", ret, pf->hw.aq.asq_last_status);
778 if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) {
780 "Switch config: header reported: %d in structure, %d total\n",
781 LE16_TO_CPU(sw_config->header.num_reported),
782 LE16_TO_CPU(sw_config->header.num_total));
784 i < LE16_TO_CPU(sw_config->header.num_reported); i++) {
786 "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i,
787 sw_config->element[i].element_type,
788 LE16_TO_CPU(sw_config->element[i].seid),
789 LE16_TO_CPU(sw_config->element[i].uplink_seid),
790 LE16_TO_CPU(sw_config->element[i].downlink_seid));
793 /* Simplified due to a single VSI */
794 vsi->uplink_seid = LE16_TO_CPU(sw_config->element[0].uplink_seid);
795 vsi->downlink_seid = LE16_TO_CPU(sw_config->element[0].downlink_seid);
796 vsi->seid = LE16_TO_CPU(sw_config->element[0].seid);
801 ixl_vsi_add_sysctls(struct ixl_vsi * vsi, const char * sysctl_name, bool queues_sysctls)
803 struct sysctl_oid *tree;
804 struct sysctl_oid_list *child;
805 struct sysctl_oid_list *vsi_list;
807 tree = device_get_sysctl_tree(vsi->dev);
808 child = SYSCTL_CHILDREN(tree);
809 vsi->vsi_node = SYSCTL_ADD_NODE(&vsi->sysctl_ctx, child, OID_AUTO, sysctl_name,
810 CTLFLAG_RD, NULL, "VSI Number");
812 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
813 ixl_add_sysctls_eth_stats(&vsi->sysctl_ctx, vsi_list, &vsi->eth_stats);
815 /* Copy of netstat RX errors counter for validation purposes */
816 SYSCTL_ADD_UQUAD(&vsi->sysctl_ctx, vsi_list, OID_AUTO, "rx_errors",
817 CTLFLAG_RD, &vsi->ierrors,
821 ixl_vsi_add_queues_stats(vsi, &vsi->sysctl_ctx);
825 * Used to set the Tx ITR value for all of the PF LAN VSI's queues.
826 * Writes to the ITR registers immediately.
829 ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS)
831 struct ixl_pf *pf = (struct ixl_pf *)arg1;
832 device_t dev = pf->dev;
834 int requested_tx_itr;
836 requested_tx_itr = pf->tx_itr;
837 error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
838 if ((error) || (req->newptr == NULL))
840 if (pf->dynamic_tx_itr) {
842 "Cannot set TX itr value while dynamic TX itr is enabled\n");
845 if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
847 "Invalid TX itr value; value must be between 0 and %d\n",
852 pf->tx_itr = requested_tx_itr;
853 ixl_configure_tx_itr(pf);
859 * Used to set the Rx ITR value for all of the PF LAN VSI's queues.
860 * Writes to the ITR registers immediately.
863 ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)
865 struct ixl_pf *pf = (struct ixl_pf *)arg1;
866 device_t dev = pf->dev;
868 int requested_rx_itr;
870 requested_rx_itr = pf->rx_itr;
871 error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
872 if ((error) || (req->newptr == NULL))
874 if (pf->dynamic_rx_itr) {
876 "Cannot set RX itr value while dynamic RX itr is enabled\n");
879 if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
881 "Invalid RX itr value; value must be between 0 and %d\n",
886 pf->rx_itr = requested_rx_itr;
887 ixl_configure_rx_itr(pf);
893 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
894 struct sysctl_oid_list *child,
895 struct i40e_hw_port_stats *stats)
897 struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO,
898 "mac", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Mac Statistics");
899 struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
901 struct i40e_eth_stats *eth_stats = &stats->eth;
902 ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
904 struct ixl_sysctl_info ctls[] =
906 {&stats->crc_errors, "crc_errors", "CRC Errors"},
907 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
908 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
909 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
910 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
911 /* Packet Reception Stats */
912 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
913 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
914 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
915 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
916 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
917 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
918 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
919 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
920 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
921 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
922 {&stats->rx_jabber, "rx_jabber", "Received Jabber"},
923 {&stats->checksum_error, "checksum_errors", "Checksum Errors"},
924 /* Packet Transmission Stats */
925 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
926 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
927 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
928 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
929 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
930 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
931 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
933 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
934 {&stats->link_xon_rx, "xon_recvd", "Link XON received"},
935 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
936 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
941 struct ixl_sysctl_info *entry = ctls;
942 while (entry->stat != 0)
944 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
945 CTLFLAG_RD, entry->stat,
952 ixl_set_rss_key(struct ixl_pf *pf)
954 struct i40e_hw *hw = &pf->hw;
955 struct ixl_vsi *vsi = &pf->vsi;
956 device_t dev = pf->dev;
957 u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
958 enum i40e_status_code status;
961 /* Fetch the configured RSS key */
962 rss_getkey((uint8_t *) &rss_seed);
964 ixl_get_default_rss_key(rss_seed);
966 /* Fill out hash function seed */
967 if (hw->mac.type == I40E_MAC_X722) {
968 struct i40e_aqc_get_set_rss_key_data key_data;
969 bcopy(rss_seed, &key_data, 52);
970 status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data);
973 "i40e_aq_set_rss_key status %s, error %s\n",
974 i40e_stat_str(hw, status),
975 i40e_aq_str(hw, hw->aq.asq_last_status));
977 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
978 i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
983 * Configure enabled PCTYPES for RSS.
986 ixl_set_rss_pctypes(struct ixl_pf *pf)
988 struct i40e_hw *hw = &pf->hw;
989 u64 set_hena = 0, hena;
994 rss_hash_config = rss_gethashconfig();
995 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
996 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
997 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
998 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
999 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
1000 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
1001 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
1002 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
1003 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
1004 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
1005 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
1006 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
1007 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
1008 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
1010 if (hw->mac.type == I40E_MAC_X722)
1011 set_hena = IXL_DEFAULT_RSS_HENA_X722;
1013 set_hena = IXL_DEFAULT_RSS_HENA_XL710;
1015 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
1016 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
1018 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
1019 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
1024 ** Setup the PF's RSS parameters.
1027 ixl_config_rss(struct ixl_pf *pf)
1029 ixl_set_rss_key(pf);
1030 ixl_set_rss_pctypes(pf);
1031 ixl_set_rss_hlut(pf);
1035 * In some firmware versions there is default MAC/VLAN filter
1036 * configured which interferes with filters managed by driver.
1037 * Make sure it's removed.
1040 ixl_del_default_hw_filters(struct ixl_vsi *vsi)
1042 struct i40e_aqc_remove_macvlan_element_data e;
1044 bzero(&e, sizeof(e));
1045 bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1047 e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1048 i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1050 bzero(&e, sizeof(e));
1051 bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1053 e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1054 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1055 i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1059 ** Initialize filter list and add filters that the hardware
1060 ** needs to know about.
1062 ** Requires VSI's seid to be set before calling.
1065 ixl_init_filters(struct ixl_vsi *vsi)
1067 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1069 ixl_dbg_filter(pf, "%s: start\n", __func__);
1071 /* Initialize mac filter list for VSI */
1072 LIST_INIT(&vsi->ftl);
1073 vsi->num_hw_filters = 0;
1075 /* Receive broadcast Ethernet frames */
1076 i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL);
1078 if (IXL_VSI_IS_VF(vsi))
1081 ixl_del_default_hw_filters(vsi);
1083 ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY);
1086 * Prevent Tx flow control frames from being sent out by
1087 * non-firmware transmitters.
1088 * This affects every VSI in the PF.
1090 #ifndef IXL_DEBUG_FC
1091 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
1093 if (pf->enable_tx_fc_filter)
1094 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
1099 ixl_reconfigure_filters(struct ixl_vsi *vsi)
1101 struct i40e_hw *hw = vsi->hw;
1102 struct ixl_ftl_head tmp;
1106 * The ixl_add_hw_filters function adds filters configured
1107 * in HW to a list in VSI. Move all filters to a temporary
1108 * list to avoid corrupting it by concatenating to itself.
1111 LIST_CONCAT(&tmp, &vsi->ftl, ixl_mac_filter, ftle);
1112 cnt = vsi->num_hw_filters;
1113 vsi->num_hw_filters = 0;
1115 ixl_add_hw_filters(vsi, &tmp, cnt);
1117 /* Filter could be removed if MAC address was changed */
1118 ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1120 if ((if_getcapenable(vsi->ifp) & IFCAP_VLAN_HWFILTER) == 0)
1123 * VLAN HW filtering is enabled, make sure that filters
1124 * for all registered VLAN tags are configured
1126 ixl_add_vlan_filters(vsi, hw->mac.addr);
1130 * This routine adds a MAC/VLAN filter to the software filter
1131 * list, then adds that new filter to the HW if it doesn't already
1132 * exist in the SW filter list.
1135 ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1137 struct ixl_mac_filter *f, *tmp;
1140 struct ixl_ftl_head to_add;
1147 ixl_dbg_filter(pf, "ixl_add_filter: " MAC_FORMAT ", vlan %4d\n",
1148 MAC_FORMAT_ARGS(macaddr), vlan);
1150 /* Does one already exist */
1151 f = ixl_find_filter(&vsi->ftl, macaddr, vlan);
1156 f = ixl_new_filter(&to_add, macaddr, vlan);
1158 device_printf(dev, "WARNING: no filter available!!\n");
1161 if (f->vlan != IXL_VLAN_ANY)
1162 f->flags |= IXL_FILTER_VLAN;
1167 ** Is this the first vlan being registered, if so we
1168 ** need to remove the ANY filter that indicates we are
1169 ** not in a vlan, and replace that with a 0 filter.
1171 if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
1172 tmp = ixl_find_filter(&vsi->ftl, macaddr, IXL_VLAN_ANY);
1174 struct ixl_ftl_head to_del;
1176 /* Prepare new filter first to avoid removing
1177 * VLAN_ANY filter if allocation fails */
1178 f = ixl_new_filter(&to_add, macaddr, 0);
1180 device_printf(dev, "WARNING: no filter available!!\n");
1181 free(LIST_FIRST(&to_add), M_IXL);
1186 LIST_REMOVE(tmp, ftle);
1188 LIST_INSERT_HEAD(&to_del, tmp, ftle);
1189 ixl_del_hw_filters(vsi, &to_del, 1);
1193 ixl_add_hw_filters(vsi, &to_add, to_add_cnt);
1197 * ixl_add_vlan_filters - Add MAC/VLAN filters for all registered VLANs
1198 * @vsi: pointer to VSI
1199 * @macaddr: MAC address
1201 * Adds MAC/VLAN filter for each VLAN configured on the interface
1202 * if there is enough HW filters. Otherwise adds a single filter
1203 * for all tagged and untagged frames to allow all configured VLANs
1204 * to recieve traffic.
1207 ixl_add_vlan_filters(struct ixl_vsi *vsi, const u8 *macaddr)
1209 struct ixl_ftl_head to_add;
1210 struct ixl_mac_filter *f;
1214 if (vsi->num_vlans == 0 || vsi->num_vlans > IXL_MAX_VLAN_FILTERS) {
1215 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
1220 /* Add filter for untagged frames if it does not exist yet */
1221 f = ixl_find_filter(&vsi->ftl, macaddr, 0);
1223 f = ixl_new_filter(&to_add, macaddr, 0);
1225 device_printf(vsi->dev, "WARNING: no filter available!!\n");
1231 for (i = 1; i < EVL_VLID_MASK; i = vlan + 1) {
1232 bit_ffs_at(vsi->vlans_map, i, IXL_VLANS_MAP_LEN, &vlan);
1236 /* Does one already exist */
1237 f = ixl_find_filter(&vsi->ftl, macaddr, vlan);
1241 f = ixl_new_filter(&to_add, macaddr, vlan);
1243 device_printf(vsi->dev, "WARNING: no filter available!!\n");
1244 ixl_free_filters(&to_add);
1250 ixl_add_hw_filters(vsi, &to_add, to_add_cnt);
1254 ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1256 struct ixl_mac_filter *f, *tmp;
1257 struct ixl_ftl_head ftl_head;
1260 ixl_dbg_filter((struct ixl_pf *)vsi->back,
1261 "ixl_del_filter: " MAC_FORMAT ", vlan %4d\n",
1262 MAC_FORMAT_ARGS(macaddr), vlan);
1264 f = ixl_find_filter(&vsi->ftl, macaddr, vlan);
1268 LIST_REMOVE(f, ftle);
1269 LIST_INIT(&ftl_head);
1270 LIST_INSERT_HEAD(&ftl_head, f, ftle);
1271 if (f->vlan == IXL_VLAN_ANY && (f->flags & IXL_FILTER_VLAN) != 0)
1274 /* If this is not the last vlan just remove the filter */
1275 if (vlan == IXL_VLAN_ANY || vsi->num_vlans > 0) {
1276 ixl_del_hw_filters(vsi, &ftl_head, to_del_cnt);
1280 /* It's the last vlan, we need to switch back to a non-vlan filter */
1281 tmp = ixl_find_filter(&vsi->ftl, macaddr, 0);
1283 LIST_REMOVE(tmp, ftle);
1284 LIST_INSERT_AFTER(f, tmp, ftle);
1287 ixl_del_hw_filters(vsi, &ftl_head, to_del_cnt);
1289 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
1293 * ixl_del_all_vlan_filters - Delete all VLAN filters with given MAC
1294 * @vsi: VSI which filters need to be removed
1295 * @macaddr: MAC address
1297 * Remove all MAC/VLAN filters with a given MAC address. For multicast
1298 * addresses there is always single filter for all VLANs used (IXL_VLAN_ANY)
1299 * so skip them to speed up processing. Those filters should be removed
1300 * using ixl_del_filter function.
1303 ixl_del_all_vlan_filters(struct ixl_vsi *vsi, const u8 *macaddr)
1305 struct ixl_mac_filter *f, *tmp;
1306 struct ixl_ftl_head to_del;
1311 LIST_FOREACH_SAFE(f, &vsi->ftl, ftle, tmp) {
1312 if ((f->flags & IXL_FILTER_MC) != 0 ||
1313 !ixl_ether_is_equal(f->macaddr, macaddr))
1316 LIST_REMOVE(f, ftle);
1317 LIST_INSERT_HEAD(&to_del, f, ftle);
1321 ixl_dbg_filter((struct ixl_pf *)vsi->back,
1322 "%s: " MAC_FORMAT ", to_del_cnt: %d\n",
1323 __func__, MAC_FORMAT_ARGS(macaddr), to_del_cnt);
1325 ixl_del_hw_filters(vsi, &to_del, to_del_cnt);
1329 ** Find the filter with both matching mac addr and vlan id
1331 struct ixl_mac_filter *
1332 ixl_find_filter(struct ixl_ftl_head *headp, const u8 *macaddr, s16 vlan)
1334 struct ixl_mac_filter *f;
1336 LIST_FOREACH(f, headp, ftle) {
1337 if (ixl_ether_is_equal(f->macaddr, macaddr) &&
1338 (f->vlan == vlan)) {
1347 ** This routine takes additions to the vsi filter
1348 ** table and creates an Admin Queue call to create
1349 ** the filters in the hardware.
1352 ixl_add_hw_filters(struct ixl_vsi *vsi, struct ixl_ftl_head *to_add, int cnt)
1354 struct i40e_aqc_add_macvlan_element_data *a, *b;
1355 struct ixl_mac_filter *f, *fn;
1359 enum i40e_status_code status;
1366 ixl_dbg_filter(pf, "ixl_add_hw_filters: cnt: %d\n", cnt);
1369 ixl_dbg_info(pf, "ixl_add_hw_filters: cnt == 0\n");
1373 a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
1374 M_IXL, M_NOWAIT | M_ZERO);
1376 device_printf(dev, "add_hw_filters failed to get memory\n");
1380 LIST_FOREACH(f, to_add, ftle) {
1381 b = &a[j]; // a pox on fvl long names :)
1382 bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
1383 if (f->vlan == IXL_VLAN_ANY) {
1385 b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
1387 b->vlan_tag = f->vlan;
1390 b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
1391 ixl_dbg_filter(pf, "ADD: " MAC_FORMAT "\n",
1392 MAC_FORMAT_ARGS(f->macaddr));
1398 /* Something went wrong */
1400 "%s ERROR: list of filters to short expected: %d, found: %d\n",
1402 ixl_free_filters(to_add);
1406 status = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
1407 if (status == I40E_SUCCESS) {
1408 LIST_CONCAT(&vsi->ftl, to_add, ixl_mac_filter, ftle);
1409 vsi->num_hw_filters += j;
1414 "i40e_aq_add_macvlan status %s, error %s\n",
1415 i40e_stat_str(hw, status),
1416 i40e_aq_str(hw, hw->aq.asq_last_status));
1419 /* Verify which filters were actually configured in HW
1420 * and add them to the list */
1421 LIST_FOREACH_SAFE(f, to_add, ftle, fn) {
1422 LIST_REMOVE(f, ftle);
1423 if (a[j].match_method == I40E_AQC_MM_ERR_NO_RES) {
1425 "%s filter " MAC_FORMAT " VTAG: %d not added\n",
1427 MAC_FORMAT_ARGS(f->macaddr),
1431 LIST_INSERT_HEAD(&vsi->ftl, f, ftle);
1432 vsi->num_hw_filters++;
1442 ** This routine takes removals in the vsi filter
1443 ** table and creates an Admin Queue call to delete
1444 ** the filters in the hardware.
1447 ixl_del_hw_filters(struct ixl_vsi *vsi, struct ixl_ftl_head *to_del, int cnt)
1449 struct i40e_aqc_remove_macvlan_element_data *d, *e;
1453 struct ixl_mac_filter *f, *f_temp;
1454 enum i40e_status_code status;
1461 ixl_dbg_filter(pf, "%s: start, cnt: %d\n", __func__, cnt);
1463 d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
1464 M_IXL, M_NOWAIT | M_ZERO);
1466 device_printf(dev, "%s: failed to get memory\n", __func__);
1470 LIST_FOREACH_SAFE(f, to_del, ftle, f_temp) {
1471 e = &d[j]; // a pox on fvl long names :)
1472 bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
1473 e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1474 if (f->vlan == IXL_VLAN_ANY) {
1476 e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1478 e->vlan_tag = f->vlan;
1481 ixl_dbg_filter(pf, "DEL: " MAC_FORMAT "\n",
1482 MAC_FORMAT_ARGS(f->macaddr));
1484 /* delete entry from the list */
1485 LIST_REMOVE(f, ftle);
1490 if (j != cnt || !LIST_EMPTY(to_del)) {
1491 /* Something went wrong */
1493 "%s ERROR: wrong size of list of filters, expected: %d, found: %d\n",
1495 ixl_free_filters(to_del);
1498 status = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
1501 "%s: i40e_aq_remove_macvlan status %s, error %s\n",
1502 __func__, i40e_stat_str(hw, status),
1503 i40e_aq_str(hw, hw->aq.asq_last_status));
1504 for (int i = 0; i < j; i++) {
1505 if (d[i].error_code == 0)
1508 "%s Filter does not exist " MAC_FORMAT " VTAG: %d\n",
1509 __func__, MAC_FORMAT_ARGS(d[i].mac_addr),
1514 vsi->num_hw_filters -= j;
1519 ixl_dbg_filter(pf, "%s: end\n", __func__);
1523 ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1525 struct i40e_hw *hw = &pf->hw;
1530 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1532 ixl_dbg(pf, IXL_DBG_EN_DIS,
1533 "Enabling PF TX ring %4d / VSI TX ring %4d...\n",
1536 i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE);
1538 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1539 reg |= I40E_QTX_ENA_QENA_REQ_MASK |
1540 I40E_QTX_ENA_QENA_STAT_MASK;
1541 wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
1542 /* Verify the enable took */
1543 for (int j = 0; j < 10; j++) {
1544 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1545 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
1547 i40e_usec_delay(10);
1549 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
1550 device_printf(pf->dev, "TX queue %d still disabled!\n",
1559 ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1561 struct i40e_hw *hw = &pf->hw;
1566 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1568 ixl_dbg(pf, IXL_DBG_EN_DIS,
1569 "Enabling PF RX ring %4d / VSI RX ring %4d...\n",
1572 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1573 reg |= I40E_QRX_ENA_QENA_REQ_MASK |
1574 I40E_QRX_ENA_QENA_STAT_MASK;
1575 wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
1576 /* Verify the enable took */
1577 for (int j = 0; j < 10; j++) {
1578 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1579 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
1581 i40e_usec_delay(10);
1583 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
1584 device_printf(pf->dev, "RX queue %d still disabled!\n",
1593 ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1597 error = ixl_enable_tx_ring(pf, qtag, vsi_qidx);
1598 /* Called function already prints error message */
1601 error = ixl_enable_rx_ring(pf, qtag, vsi_qidx);
1606 * Returns error on first ring that is detected hung.
1609 ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1611 struct i40e_hw *hw = &pf->hw;
1616 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1618 ixl_dbg(pf, IXL_DBG_EN_DIS,
1619 "Disabling PF TX ring %4d / VSI TX ring %4d...\n",
1622 i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE);
1623 i40e_usec_delay(500);
1625 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1626 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
1627 wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
1628 /* Verify the disable took */
1629 for (int j = 0; j < 10; j++) {
1630 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1631 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
1633 i40e_msec_delay(10);
1635 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
1636 device_printf(pf->dev, "TX queue %d still enabled!\n",
1645 * Returns error on first ring that is detected hung.
1648 ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1650 struct i40e_hw *hw = &pf->hw;
1655 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1657 ixl_dbg(pf, IXL_DBG_EN_DIS,
1658 "Disabling PF RX ring %4d / VSI RX ring %4d...\n",
1661 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1662 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
1663 wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
1664 /* Verify the disable took */
1665 for (int j = 0; j < 10; j++) {
1666 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1667 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
1669 i40e_msec_delay(10);
1671 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
1672 device_printf(pf->dev, "RX queue %d still enabled!\n",
1681 ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1685 error = ixl_disable_tx_ring(pf, qtag, vsi_qidx);
1686 /* Called function already prints error message */
1689 error = ixl_disable_rx_ring(pf, qtag, vsi_qidx);
1694 ixl_handle_tx_mdd_event(struct ixl_pf *pf)
1696 struct i40e_hw *hw = &pf->hw;
1697 device_t dev = pf->dev;
1699 bool mdd_detected = false;
1700 bool pf_mdd_detected = false;
1701 bool vf_mdd_detected = false;
1704 u8 pf_mdet_num, vp_mdet_num;
1707 /* find what triggered the MDD event */
1708 reg = rd32(hw, I40E_GL_MDET_TX);
1709 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
1710 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
1711 I40E_GL_MDET_TX_PF_NUM_SHIFT;
1712 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
1713 I40E_GL_MDET_TX_VF_NUM_SHIFT;
1714 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
1715 I40E_GL_MDET_TX_EVENT_SHIFT;
1716 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
1717 I40E_GL_MDET_TX_QUEUE_SHIFT;
1718 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
1719 mdd_detected = true;
1725 reg = rd32(hw, I40E_PF_MDET_TX);
1726 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
1727 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
1728 pf_mdet_num = hw->pf_id;
1729 pf_mdd_detected = true;
1732 /* Check if MDD was caused by a VF */
1733 for (int i = 0; i < pf->num_vfs; i++) {
1735 reg = rd32(hw, I40E_VP_MDET_TX(i));
1736 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
1737 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
1739 vf->num_mdd_events++;
1740 vf_mdd_detected = true;
1744 /* Print out an error message */
1745 if (vf_mdd_detected && pf_mdd_detected)
1747 "Malicious Driver Detection event %d"
1748 " on TX queue %d, pf number %d (PF-%d), vf number %d (VF-%d)\n",
1749 event, queue, pf_num, pf_mdet_num, vf_num, vp_mdet_num);
1750 else if (vf_mdd_detected && !pf_mdd_detected)
1752 "Malicious Driver Detection event %d"
1753 " on TX queue %d, pf number %d, vf number %d (VF-%d)\n",
1754 event, queue, pf_num, vf_num, vp_mdet_num);
1755 else if (!vf_mdd_detected && pf_mdd_detected)
1757 "Malicious Driver Detection event %d"
1758 " on TX queue %d, pf number %d (PF-%d)\n",
1759 event, queue, pf_num, pf_mdet_num);
1760 /* Theoretically shouldn't happen */
1763 "TX Malicious Driver Detection event (unknown)\n");
1767 ixl_handle_rx_mdd_event(struct ixl_pf *pf)
1769 struct i40e_hw *hw = &pf->hw;
1770 device_t dev = pf->dev;
1772 bool mdd_detected = false;
1773 bool pf_mdd_detected = false;
1774 bool vf_mdd_detected = false;
1777 u8 pf_mdet_num, vp_mdet_num;
1781 * GL_MDET_RX doesn't contain VF number information, unlike
1784 reg = rd32(hw, I40E_GL_MDET_RX);
1785 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
1786 pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
1787 I40E_GL_MDET_RX_FUNCTION_SHIFT;
1788 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
1789 I40E_GL_MDET_RX_EVENT_SHIFT;
1790 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
1791 I40E_GL_MDET_RX_QUEUE_SHIFT;
1792 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
1793 mdd_detected = true;
1799 reg = rd32(hw, I40E_PF_MDET_RX);
1800 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
1801 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
1802 pf_mdet_num = hw->pf_id;
1803 pf_mdd_detected = true;
1806 /* Check if MDD was caused by a VF */
1807 for (int i = 0; i < pf->num_vfs; i++) {
1809 reg = rd32(hw, I40E_VP_MDET_RX(i));
1810 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
1811 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
1813 vf->num_mdd_events++;
1814 vf_mdd_detected = true;
1818 /* Print out an error message */
1819 if (vf_mdd_detected && pf_mdd_detected)
1821 "Malicious Driver Detection event %d"
1822 " on RX queue %d, pf number %d (PF-%d), (VF-%d)\n",
1823 event, queue, pf_num, pf_mdet_num, vp_mdet_num);
1824 else if (vf_mdd_detected && !pf_mdd_detected)
1826 "Malicious Driver Detection event %d"
1827 " on RX queue %d, pf number %d, (VF-%d)\n",
1828 event, queue, pf_num, vp_mdet_num);
1829 else if (!vf_mdd_detected && pf_mdd_detected)
1831 "Malicious Driver Detection event %d"
1832 " on RX queue %d, pf number %d (PF-%d)\n",
1833 event, queue, pf_num, pf_mdet_num);
1834 /* Theoretically shouldn't happen */
1837 "RX Malicious Driver Detection event (unknown)\n");
1841 * ixl_handle_mdd_event
1843 * Called from interrupt handler to identify possibly malicious vfs
1844 * (But also detects events from the PF, as well)
1847 ixl_handle_mdd_event(struct ixl_pf *pf)
1849 struct i40e_hw *hw = &pf->hw;
1853 * Handle both TX/RX because it's possible they could
1854 * both trigger in the same interrupt.
1856 ixl_handle_tx_mdd_event(pf);
1857 ixl_handle_rx_mdd_event(pf);
1859 atomic_clear_32(&pf->state, IXL_PF_STATE_MDD_PENDING);
1861 /* re-enable mdd interrupt cause */
1862 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
1863 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
1864 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1869 ixl_enable_intr0(struct i40e_hw *hw)
1873 /* Use IXL_ITR_NONE so ITR isn't updated here */
1874 reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
1875 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
1876 (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
1877 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1881 ixl_disable_intr0(struct i40e_hw *hw)
1885 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
1886 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1891 ixl_enable_queue(struct i40e_hw *hw, int id)
1895 reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1896 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1897 (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
1898 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
1902 ixl_disable_queue(struct i40e_hw *hw, int id)
1906 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
1907 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
1911 ixl_handle_empr_reset(struct ixl_pf *pf)
1913 struct ixl_vsi *vsi = &pf->vsi;
1914 bool is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
1916 ixl_prepare_for_reset(pf, is_up);
1918 * i40e_pf_reset checks the type of reset and acts
1919 * accordingly. If EMP or Core reset was performed
1920 * doing PF reset is not necessary and it sometimes
1925 if (!IXL_PF_IN_RECOVERY_MODE(pf) &&
1926 ixl_get_fw_mode(pf) == IXL_FW_MODE_RECOVERY) {
1927 atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE);
1928 device_printf(pf->dev,
1929 "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
1930 pf->link_up = FALSE;
1931 ixl_update_link_status(pf);
1934 ixl_rebuild_hw_structs_after_reset(pf, is_up);
1936 atomic_clear_32(&pf->state, IXL_PF_STATE_RESETTING);
1940 ixl_update_stats_counters(struct ixl_pf *pf)
1942 struct i40e_hw *hw = &pf->hw;
1943 struct ixl_vsi *vsi = &pf->vsi;
1945 u64 prev_link_xoff_rx = pf->stats.link_xoff_rx;
1947 struct i40e_hw_port_stats *nsd = &pf->stats;
1948 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
1950 /* Update hw stats */
1951 ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
1952 pf->stat_offsets_loaded,
1953 &osd->crc_errors, &nsd->crc_errors);
1954 ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
1955 pf->stat_offsets_loaded,
1956 &osd->illegal_bytes, &nsd->illegal_bytes);
1957 ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
1958 I40E_GLPRT_GORCL(hw->port),
1959 pf->stat_offsets_loaded,
1960 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
1961 ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
1962 I40E_GLPRT_GOTCL(hw->port),
1963 pf->stat_offsets_loaded,
1964 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
1965 ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
1966 pf->stat_offsets_loaded,
1967 &osd->eth.rx_discards,
1968 &nsd->eth.rx_discards);
1969 ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
1970 I40E_GLPRT_UPRCL(hw->port),
1971 pf->stat_offsets_loaded,
1972 &osd->eth.rx_unicast,
1973 &nsd->eth.rx_unicast);
1974 ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
1975 I40E_GLPRT_UPTCL(hw->port),
1976 pf->stat_offsets_loaded,
1977 &osd->eth.tx_unicast,
1978 &nsd->eth.tx_unicast);
1979 ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
1980 I40E_GLPRT_MPRCL(hw->port),
1981 pf->stat_offsets_loaded,
1982 &osd->eth.rx_multicast,
1983 &nsd->eth.rx_multicast);
1984 ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
1985 I40E_GLPRT_MPTCL(hw->port),
1986 pf->stat_offsets_loaded,
1987 &osd->eth.tx_multicast,
1988 &nsd->eth.tx_multicast);
1989 ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
1990 I40E_GLPRT_BPRCL(hw->port),
1991 pf->stat_offsets_loaded,
1992 &osd->eth.rx_broadcast,
1993 &nsd->eth.rx_broadcast);
1994 ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
1995 I40E_GLPRT_BPTCL(hw->port),
1996 pf->stat_offsets_loaded,
1997 &osd->eth.tx_broadcast,
1998 &nsd->eth.tx_broadcast);
2000 ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
2001 pf->stat_offsets_loaded,
2002 &osd->tx_dropped_link_down,
2003 &nsd->tx_dropped_link_down);
2004 ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
2005 pf->stat_offsets_loaded,
2006 &osd->mac_local_faults,
2007 &nsd->mac_local_faults);
2008 ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
2009 pf->stat_offsets_loaded,
2010 &osd->mac_remote_faults,
2011 &nsd->mac_remote_faults);
2012 ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
2013 pf->stat_offsets_loaded,
2014 &osd->rx_length_errors,
2015 &nsd->rx_length_errors);
2017 /* Flow control (LFC) stats */
2018 ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
2019 pf->stat_offsets_loaded,
2020 &osd->link_xon_rx, &nsd->link_xon_rx);
2021 ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
2022 pf->stat_offsets_loaded,
2023 &osd->link_xon_tx, &nsd->link_xon_tx);
2024 ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
2025 pf->stat_offsets_loaded,
2026 &osd->link_xoff_rx, &nsd->link_xoff_rx);
2027 ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
2028 pf->stat_offsets_loaded,
2029 &osd->link_xoff_tx, &nsd->link_xoff_tx);
2032 * For watchdog management we need to know if we have been paused
2033 * during the last interval, so capture that here.
2035 if (pf->stats.link_xoff_rx != prev_link_xoff_rx)
2036 vsi->shared->isc_pause_frames = 1;
2038 /* Packet size stats rx */
2039 ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
2040 I40E_GLPRT_PRC64L(hw->port),
2041 pf->stat_offsets_loaded,
2042 &osd->rx_size_64, &nsd->rx_size_64);
2043 ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
2044 I40E_GLPRT_PRC127L(hw->port),
2045 pf->stat_offsets_loaded,
2046 &osd->rx_size_127, &nsd->rx_size_127);
2047 ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
2048 I40E_GLPRT_PRC255L(hw->port),
2049 pf->stat_offsets_loaded,
2050 &osd->rx_size_255, &nsd->rx_size_255);
2051 ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
2052 I40E_GLPRT_PRC511L(hw->port),
2053 pf->stat_offsets_loaded,
2054 &osd->rx_size_511, &nsd->rx_size_511);
2055 ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
2056 I40E_GLPRT_PRC1023L(hw->port),
2057 pf->stat_offsets_loaded,
2058 &osd->rx_size_1023, &nsd->rx_size_1023);
2059 ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
2060 I40E_GLPRT_PRC1522L(hw->port),
2061 pf->stat_offsets_loaded,
2062 &osd->rx_size_1522, &nsd->rx_size_1522);
2063 ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
2064 I40E_GLPRT_PRC9522L(hw->port),
2065 pf->stat_offsets_loaded,
2066 &osd->rx_size_big, &nsd->rx_size_big);
2068 /* Packet size stats tx */
2069 ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
2070 I40E_GLPRT_PTC64L(hw->port),
2071 pf->stat_offsets_loaded,
2072 &osd->tx_size_64, &nsd->tx_size_64);
2073 ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
2074 I40E_GLPRT_PTC127L(hw->port),
2075 pf->stat_offsets_loaded,
2076 &osd->tx_size_127, &nsd->tx_size_127);
2077 ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
2078 I40E_GLPRT_PTC255L(hw->port),
2079 pf->stat_offsets_loaded,
2080 &osd->tx_size_255, &nsd->tx_size_255);
2081 ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
2082 I40E_GLPRT_PTC511L(hw->port),
2083 pf->stat_offsets_loaded,
2084 &osd->tx_size_511, &nsd->tx_size_511);
2085 ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
2086 I40E_GLPRT_PTC1023L(hw->port),
2087 pf->stat_offsets_loaded,
2088 &osd->tx_size_1023, &nsd->tx_size_1023);
2089 ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
2090 I40E_GLPRT_PTC1522L(hw->port),
2091 pf->stat_offsets_loaded,
2092 &osd->tx_size_1522, &nsd->tx_size_1522);
2093 ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
2094 I40E_GLPRT_PTC9522L(hw->port),
2095 pf->stat_offsets_loaded,
2096 &osd->tx_size_big, &nsd->tx_size_big);
2098 ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
2099 pf->stat_offsets_loaded,
2100 &osd->rx_undersize, &nsd->rx_undersize);
2101 ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
2102 pf->stat_offsets_loaded,
2103 &osd->rx_fragments, &nsd->rx_fragments);
2104 ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
2105 pf->stat_offsets_loaded,
2106 &osd->rx_oversize, &nsd->rx_oversize);
2107 ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
2108 pf->stat_offsets_loaded,
2109 &osd->rx_jabber, &nsd->rx_jabber);
2111 i40e_get_phy_lpi_status(hw, nsd);
2113 i40e_lpi_stat_update(hw, pf->stat_offsets_loaded,
2114 &osd->tx_lpi_count, &nsd->tx_lpi_count,
2115 &osd->rx_lpi_count, &nsd->rx_lpi_count);
2117 pf->stat_offsets_loaded = true;
2120 /* Update vsi stats */
2121 ixl_update_vsi_stats(vsi);
2123 for (int i = 0; i < pf->num_vfs; i++) {
2125 if (vf->vf_flags & VF_FLAG_ENABLED)
2126 ixl_update_eth_stats(&pf->vfs[i].vsi);
2131 * Update VSI-specific ethernet statistics counters.
2134 ixl_update_eth_stats(struct ixl_vsi *vsi)
2136 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2137 struct i40e_hw *hw = &pf->hw;
2138 struct i40e_eth_stats *es;
2139 struct i40e_eth_stats *oes;
2140 u16 stat_idx = vsi->info.stat_counter_idx;
2142 es = &vsi->eth_stats;
2143 oes = &vsi->eth_stats_offsets;
2145 /* Gather up the stats that the hw collects */
2146 ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
2147 vsi->stat_offsets_loaded,
2148 &oes->tx_errors, &es->tx_errors);
2149 ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
2150 vsi->stat_offsets_loaded,
2151 &oes->rx_discards, &es->rx_discards);
2153 ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
2154 I40E_GLV_GORCL(stat_idx),
2155 vsi->stat_offsets_loaded,
2156 &oes->rx_bytes, &es->rx_bytes);
2157 ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
2158 I40E_GLV_UPRCL(stat_idx),
2159 vsi->stat_offsets_loaded,
2160 &oes->rx_unicast, &es->rx_unicast);
2161 ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
2162 I40E_GLV_MPRCL(stat_idx),
2163 vsi->stat_offsets_loaded,
2164 &oes->rx_multicast, &es->rx_multicast);
2165 ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
2166 I40E_GLV_BPRCL(stat_idx),
2167 vsi->stat_offsets_loaded,
2168 &oes->rx_broadcast, &es->rx_broadcast);
2170 ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
2171 I40E_GLV_GOTCL(stat_idx),
2172 vsi->stat_offsets_loaded,
2173 &oes->tx_bytes, &es->tx_bytes);
2174 ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
2175 I40E_GLV_UPTCL(stat_idx),
2176 vsi->stat_offsets_loaded,
2177 &oes->tx_unicast, &es->tx_unicast);
2178 ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
2179 I40E_GLV_MPTCL(stat_idx),
2180 vsi->stat_offsets_loaded,
2181 &oes->tx_multicast, &es->tx_multicast);
2182 ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
2183 I40E_GLV_BPTCL(stat_idx),
2184 vsi->stat_offsets_loaded,
2185 &oes->tx_broadcast, &es->tx_broadcast);
2186 vsi->stat_offsets_loaded = true;
2190 ixl_update_vsi_stats(struct ixl_vsi *vsi)
2193 struct i40e_eth_stats *es;
2194 u64 tx_discards, csum_errs;
2196 struct i40e_hw_port_stats *nsd;
2199 es = &vsi->eth_stats;
2202 ixl_update_eth_stats(vsi);
2204 tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
2207 for (int i = 0; i < vsi->num_rx_queues; i++)
2208 csum_errs += vsi->rx_queues[i].rxr.csum_errs;
2209 nsd->checksum_error = csum_errs;
2211 /* Update ifnet stats */
2212 IXL_SET_IPACKETS(vsi, es->rx_unicast +
2215 IXL_SET_OPACKETS(vsi, es->tx_unicast +
2218 IXL_SET_IBYTES(vsi, es->rx_bytes);
2219 IXL_SET_OBYTES(vsi, es->tx_bytes);
2220 IXL_SET_IMCASTS(vsi, es->rx_multicast);
2221 IXL_SET_OMCASTS(vsi, es->tx_multicast);
2223 IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
2224 nsd->checksum_error + nsd->rx_length_errors +
2225 nsd->rx_undersize + nsd->rx_fragments + nsd->rx_oversize +
2227 IXL_SET_OERRORS(vsi, es->tx_errors);
2228 IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
2229 IXL_SET_OQDROPS(vsi, tx_discards);
2230 IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
2231 IXL_SET_COLLISIONS(vsi, 0);
2235 * Reset all of the stats for the given pf
2238 ixl_pf_reset_stats(struct ixl_pf *pf)
2240 bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
2241 bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
2242 pf->stat_offsets_loaded = false;
2246 * Resets all stats of the given vsi
2249 ixl_vsi_reset_stats(struct ixl_vsi *vsi)
2251 bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
2252 bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
2253 vsi->stat_offsets_loaded = false;
2257 * Read and update a 48 bit stat from the hw
2259 * Since the device stats are not reset at PFReset, they likely will not
2260 * be zeroed when the driver starts. We'll save the first values read
2261 * and use them as offsets to be subtracted from the raw values in order
2262 * to report stats that count from zero.
2265 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
2266 bool offset_loaded, u64 *offset, u64 *stat)
2270 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
2271 new_data = rd64(hw, loreg);
2274 * Use two rd32's instead of one rd64; FreeBSD versions before
2275 * 10 don't support 64-bit bus reads/writes.
2277 new_data = rd32(hw, loreg);
2278 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
2283 if (new_data >= *offset)
2284 *stat = new_data - *offset;
2286 *stat = (new_data + ((u64)1 << 48)) - *offset;
2287 *stat &= 0xFFFFFFFFFFFFULL;
2291 * Read and update a 32 bit stat from the hw
2294 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
2295 bool offset_loaded, u64 *offset, u64 *stat)
2299 new_data = rd32(hw, reg);
2302 if (new_data >= *offset)
2303 *stat = (u32)(new_data - *offset);
2305 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
2309 * Add subset of device sysctls safe to use in recovery mode
2312 ixl_add_sysctls_recovery_mode(struct ixl_pf *pf)
2314 device_t dev = pf->dev;
2316 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2317 struct sysctl_oid_list *ctx_list =
2318 SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2320 struct sysctl_oid *debug_node;
2321 struct sysctl_oid_list *debug_list;
2323 SYSCTL_ADD_PROC(ctx, ctx_list,
2324 OID_AUTO, "fw_version",
2325 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2326 ixl_sysctl_show_fw, "A", "Firmware version");
2328 /* Add sysctls meant to print debug information, but don't list them
2329 * in "sysctl -a" output. */
2330 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2331 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL,
2333 debug_list = SYSCTL_CHILDREN(debug_node);
2335 SYSCTL_ADD_UINT(ctx, debug_list,
2336 OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
2337 &pf->hw.debug_mask, 0, "Shared code debug message level");
2339 SYSCTL_ADD_UINT(ctx, debug_list,
2340 OID_AUTO, "core_debug_mask", CTLFLAG_RW,
2341 &pf->dbg_mask, 0, "Non-shared code debug message level");
2343 SYSCTL_ADD_PROC(ctx, debug_list,
2344 OID_AUTO, "dump_debug_data",
2345 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2346 pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
2348 SYSCTL_ADD_PROC(ctx, debug_list,
2349 OID_AUTO, "do_pf_reset",
2350 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2351 pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
2353 SYSCTL_ADD_PROC(ctx, debug_list,
2354 OID_AUTO, "do_core_reset",
2355 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2356 pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
2358 SYSCTL_ADD_PROC(ctx, debug_list,
2359 OID_AUTO, "do_global_reset",
2360 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2361 pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
2363 SYSCTL_ADD_PROC(ctx, debug_list,
2364 OID_AUTO, "queue_interrupt_table",
2365 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2366 pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
2370 ixl_add_device_sysctls(struct ixl_pf *pf)
2372 device_t dev = pf->dev;
2373 struct i40e_hw *hw = &pf->hw;
2375 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2376 struct sysctl_oid_list *ctx_list =
2377 SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2379 struct sysctl_oid *debug_node;
2380 struct sysctl_oid_list *debug_list;
2382 struct sysctl_oid *fec_node;
2383 struct sysctl_oid_list *fec_list;
2384 struct sysctl_oid *eee_node;
2385 struct sysctl_oid_list *eee_list;
2387 /* Set up sysctls */
2388 SYSCTL_ADD_PROC(ctx, ctx_list,
2389 OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2390 pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
2392 SYSCTL_ADD_PROC(ctx, ctx_list,
2393 OID_AUTO, "advertise_speed",
2394 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2395 ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
2397 SYSCTL_ADD_PROC(ctx, ctx_list,
2398 OID_AUTO, "supported_speeds",
2399 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2400 ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED);
2402 SYSCTL_ADD_PROC(ctx, ctx_list,
2403 OID_AUTO, "current_speed",
2404 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2405 ixl_sysctl_current_speed, "A", "Current Port Speed");
2407 SYSCTL_ADD_PROC(ctx, ctx_list,
2408 OID_AUTO, "fw_version",
2409 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2410 ixl_sysctl_show_fw, "A", "Firmware version");
2412 SYSCTL_ADD_PROC(ctx, ctx_list,
2413 OID_AUTO, "unallocated_queues",
2414 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2415 ixl_sysctl_unallocated_queues, "I",
2416 "Queues not allocated to a PF or VF");
2418 SYSCTL_ADD_PROC(ctx, ctx_list,
2420 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2421 ixl_sysctl_pf_tx_itr, "I",
2422 "Immediately set TX ITR value for all queues");
2424 SYSCTL_ADD_PROC(ctx, ctx_list,
2426 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2427 ixl_sysctl_pf_rx_itr, "I",
2428 "Immediately set RX ITR value for all queues");
2430 SYSCTL_ADD_INT(ctx, ctx_list,
2431 OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
2432 &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR");
2434 SYSCTL_ADD_INT(ctx, ctx_list,
2435 OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
2436 &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
2438 /* Add FEC sysctls for 25G adapters */
2439 if (i40e_is_25G_device(hw->device_id)) {
2440 fec_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2441 OID_AUTO, "fec", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
2443 fec_list = SYSCTL_CHILDREN(fec_node);
2445 SYSCTL_ADD_PROC(ctx, fec_list,
2446 OID_AUTO, "fc_ability",
2447 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2448 ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled");
2450 SYSCTL_ADD_PROC(ctx, fec_list,
2451 OID_AUTO, "rs_ability",
2452 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2453 ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled");
2455 SYSCTL_ADD_PROC(ctx, fec_list,
2456 OID_AUTO, "fc_requested",
2457 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2458 ixl_sysctl_fec_fc_request, "I",
2459 "FC FEC mode requested on link");
2461 SYSCTL_ADD_PROC(ctx, fec_list,
2462 OID_AUTO, "rs_requested",
2463 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2464 ixl_sysctl_fec_rs_request, "I",
2465 "RS FEC mode requested on link");
2467 SYSCTL_ADD_PROC(ctx, fec_list,
2468 OID_AUTO, "auto_fec_enabled",
2469 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2470 ixl_sysctl_fec_auto_enable, "I",
2471 "Let FW decide FEC ability/request modes");
2474 SYSCTL_ADD_PROC(ctx, ctx_list,
2475 OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2476 pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP);
2478 eee_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2479 OID_AUTO, "eee", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
2480 "Energy Efficient Ethernet (EEE) Sysctls");
2481 eee_list = SYSCTL_CHILDREN(eee_node);
2483 SYSCTL_ADD_PROC(ctx, eee_list,
2484 OID_AUTO, "enable", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
2485 pf, 0, ixl_sysctl_eee_enable, "I",
2486 "Enable Energy Efficient Ethernet (EEE)");
2488 SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "tx_lpi_status",
2489 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_status, 0,
2492 SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "rx_lpi_status",
2493 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_status, 0,
2496 SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "tx_lpi_count",
2497 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_count,
2500 SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "rx_lpi_count",
2501 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_count,
2504 SYSCTL_ADD_PROC(ctx, ctx_list, OID_AUTO,
2505 "link_active_on_if_down",
2506 CTLTYPE_INT | CTLFLAG_RWTUN,
2507 pf, 0, ixl_sysctl_set_link_active, "I",
2508 IXL_SYSCTL_HELP_SET_LINK_ACTIVE);
2510 /* Add sysctls meant to print debug information, but don't list them
2511 * in "sysctl -a" output. */
2512 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2513 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL,
2515 debug_list = SYSCTL_CHILDREN(debug_node);
2517 SYSCTL_ADD_UINT(ctx, debug_list,
2518 OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
2519 &pf->hw.debug_mask, 0, "Shared code debug message level");
2521 SYSCTL_ADD_UINT(ctx, debug_list,
2522 OID_AUTO, "core_debug_mask", CTLFLAG_RW,
2523 &pf->dbg_mask, 0, "Non-shared code debug message level");
2525 SYSCTL_ADD_PROC(ctx, debug_list,
2526 OID_AUTO, "link_status",
2527 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2528 pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
2530 SYSCTL_ADD_PROC(ctx, debug_list,
2531 OID_AUTO, "phy_abilities_init",
2532 CTLTYPE_STRING | CTLFLAG_RD,
2533 pf, 1, ixl_sysctl_phy_abilities, "A", "Initial PHY Abilities");
2535 SYSCTL_ADD_PROC(ctx, debug_list,
2536 OID_AUTO, "phy_abilities",
2537 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2538 pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
2540 SYSCTL_ADD_PROC(ctx, debug_list,
2541 OID_AUTO, "filter_list",
2542 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2543 pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
2545 SYSCTL_ADD_PROC(ctx, debug_list,
2546 OID_AUTO, "hw_res_alloc",
2547 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2548 pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
2550 SYSCTL_ADD_PROC(ctx, debug_list,
2551 OID_AUTO, "switch_config",
2552 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2553 pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
2555 SYSCTL_ADD_PROC(ctx, debug_list,
2556 OID_AUTO, "switch_vlans",
2557 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2558 pf, 0, ixl_sysctl_switch_vlans, "I", "HW Switch VLAN Configuration");
2560 SYSCTL_ADD_PROC(ctx, debug_list,
2561 OID_AUTO, "rss_key",
2562 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2563 pf, 0, ixl_sysctl_hkey, "A", "View RSS key");
2565 SYSCTL_ADD_PROC(ctx, debug_list,
2566 OID_AUTO, "rss_lut",
2567 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2568 pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table");
2570 SYSCTL_ADD_PROC(ctx, debug_list,
2571 OID_AUTO, "rss_hena",
2572 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2573 pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS");
2575 SYSCTL_ADD_PROC(ctx, debug_list,
2576 OID_AUTO, "disable_fw_link_management",
2577 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2578 pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management");
2580 SYSCTL_ADD_PROC(ctx, debug_list,
2581 OID_AUTO, "dump_debug_data",
2582 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2583 pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
2585 SYSCTL_ADD_PROC(ctx, debug_list,
2586 OID_AUTO, "do_pf_reset",
2587 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2588 pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
2590 SYSCTL_ADD_PROC(ctx, debug_list,
2591 OID_AUTO, "do_core_reset",
2592 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2593 pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
2595 SYSCTL_ADD_PROC(ctx, debug_list,
2596 OID_AUTO, "do_global_reset",
2597 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2598 pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
2600 SYSCTL_ADD_PROC(ctx, debug_list,
2601 OID_AUTO, "queue_interrupt_table",
2602 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2603 pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
2606 SYSCTL_ADD_PROC(ctx, debug_list,
2607 OID_AUTO, "read_i2c_byte",
2608 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2609 pf, 0, ixl_sysctl_read_i2c_byte, "I", IXL_SYSCTL_HELP_READ_I2C);
2611 SYSCTL_ADD_PROC(ctx, debug_list,
2612 OID_AUTO, "write_i2c_byte",
2613 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2614 pf, 0, ixl_sysctl_write_i2c_byte, "I", IXL_SYSCTL_HELP_WRITE_I2C);
2616 SYSCTL_ADD_PROC(ctx, debug_list,
2617 OID_AUTO, "read_i2c_diag_data",
2618 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2619 pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW");
2624 * Primarily for finding out how many queues can be assigned to VFs,
2628 ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
2630 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2633 queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr);
2635 return sysctl_handle_int(oidp, NULL, queues, req);
2639 ixl_link_speed_string(enum i40e_aq_link_speed link_speed)
2641 const char * link_speed_str[] = {
2654 switch (link_speed) {
2655 case I40E_LINK_SPEED_100MB:
2658 case I40E_LINK_SPEED_1GB:
2661 case I40E_LINK_SPEED_10GB:
2664 case I40E_LINK_SPEED_40GB:
2667 case I40E_LINK_SPEED_20GB:
2670 case I40E_LINK_SPEED_25GB:
2673 case I40E_LINK_SPEED_2_5GB:
2676 case I40E_LINK_SPEED_5GB:
2679 case I40E_LINK_SPEED_UNKNOWN:
2685 return (link_speed_str[index]);
2689 ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
2691 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2692 struct i40e_hw *hw = &pf->hw;
2695 ixl_update_link_status(pf);
2697 error = sysctl_handle_string(oidp,
2699 ixl_link_speed_string(hw->phy.link_info.link_speed)),
2706 * Converts 8-bit speeds value to and from sysctl flags and
2707 * Admin Queue flags.
2710 ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
2712 #define SPEED_MAP_SIZE 8
2713 static u16 speedmap[SPEED_MAP_SIZE] = {
2714 (I40E_LINK_SPEED_100MB | (0x1 << 8)),
2715 (I40E_LINK_SPEED_1GB | (0x2 << 8)),
2716 (I40E_LINK_SPEED_10GB | (0x4 << 8)),
2717 (I40E_LINK_SPEED_20GB | (0x8 << 8)),
2718 (I40E_LINK_SPEED_25GB | (0x10 << 8)),
2719 (I40E_LINK_SPEED_40GB | (0x20 << 8)),
2720 (I40E_LINK_SPEED_2_5GB | (0x40 << 8)),
2721 (I40E_LINK_SPEED_5GB | (0x80 << 8)),
2725 for (int i = 0; i < SPEED_MAP_SIZE; i++) {
2727 retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0;
2729 retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0;
2736 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq)
2738 struct i40e_hw *hw = &pf->hw;
2739 device_t dev = pf->dev;
2740 struct i40e_aq_get_phy_abilities_resp abilities;
2741 struct i40e_aq_set_phy_config config;
2742 enum i40e_status_code aq_error = 0;
2744 /* Get current capability information */
2745 aq_error = i40e_aq_get_phy_capabilities(hw,
2746 FALSE, FALSE, &abilities, NULL);
2749 "%s: Error getting phy capabilities %d,"
2750 " aq error: %d\n", __func__, aq_error,
2751 hw->aq.asq_last_status);
2755 /* Prepare new config */
2756 bzero(&config, sizeof(config));
2758 config.link_speed = speeds;
2760 config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true);
2761 config.phy_type = abilities.phy_type;
2762 config.phy_type_ext = abilities.phy_type_ext;
2763 config.abilities = abilities.abilities
2764 | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2765 config.eee_capability = abilities.eee_capability;
2766 config.eeer = abilities.eeer_val;
2767 config.low_power_ctrl = abilities.d3_lpan;
2768 config.fec_config = abilities.fec_cfg_curr_mod_ext_info
2769 & I40E_AQ_PHY_FEC_CONFIG_MASK;
2771 /* Do aq command & restart link */
2772 aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
2775 "%s: Error setting new phy config %d,"
2776 " aq error: %d\n", __func__, aq_error,
2777 hw->aq.asq_last_status);
2785 ** Supported link speeds
2797 ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS)
2799 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2800 int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
2802 return sysctl_handle_int(oidp, NULL, supported, req);
2806 ** Control link advertise speed:
2808 ** 0x1 - advertise 100 Mb
2809 ** 0x2 - advertise 1G
2810 ** 0x4 - advertise 10G
2811 ** 0x8 - advertise 20G
2812 ** 0x10 - advertise 25G
2813 ** 0x20 - advertise 40G
2814 ** 0x40 - advertise 2.5G
2815 ** 0x80 - advertise 5G
2817 ** Set to 0 to disable link
2820 ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS)
2822 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2823 device_t dev = pf->dev;
2824 u8 converted_speeds;
2825 int requested_ls = 0;
2828 /* Read in new mode */
2829 requested_ls = pf->advertised_speed;
2830 error = sysctl_handle_int(oidp, &requested_ls, 0, req);
2831 if ((error) || (req->newptr == NULL))
2833 if (IXL_PF_IN_RECOVERY_MODE(pf)) {
2834 device_printf(dev, "Interface is currently in FW recovery mode. "
2835 "Setting advertise speed not supported\n");
2839 /* Error out if bits outside of possible flag range are set */
2840 if ((requested_ls & ~((u8)0xFF)) != 0) {
2841 device_printf(dev, "Input advertised speed out of range; "
2842 "valid flags are: 0x%02x\n",
2843 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
2847 /* Check if adapter supports input value */
2848 converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true);
2849 if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) {
2850 device_printf(dev, "Invalid advertised speed; "
2851 "valid flags are: 0x%02x\n",
2852 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
2856 error = ixl_set_advertised_speeds(pf, requested_ls, false);
2860 pf->advertised_speed = requested_ls;
2861 ixl_update_link_status(pf);
2866 * Input: bitmap of enum i40e_aq_link_speed
2869 ixl_max_aq_speed_to_value(u8 link_speeds)
2871 if (link_speeds & I40E_LINK_SPEED_40GB)
2873 if (link_speeds & I40E_LINK_SPEED_25GB)
2875 if (link_speeds & I40E_LINK_SPEED_20GB)
2877 if (link_speeds & I40E_LINK_SPEED_10GB)
2879 if (link_speeds & I40E_LINK_SPEED_5GB)
2881 if (link_speeds & I40E_LINK_SPEED_2_5GB)
2882 return IF_Mbps(2500);
2883 if (link_speeds & I40E_LINK_SPEED_1GB)
2885 if (link_speeds & I40E_LINK_SPEED_100MB)
2886 return IF_Mbps(100);
2888 /* Minimum supported link speed */
2889 return IF_Mbps(100);
2893 ** Get the width and transaction speed of
2894 ** the bus this adapter is plugged into.
2897 ixl_get_bus_info(struct ixl_pf *pf)
2899 struct i40e_hw *hw = &pf->hw;
2900 device_t dev = pf->dev;
2902 u32 offset, num_ports;
2905 /* Some devices don't use PCIE */
2906 if (hw->mac.type == I40E_MAC_X722)
2909 /* Read PCI Express Capabilities Link Status Register */
2910 pci_find_cap(dev, PCIY_EXPRESS, &offset);
2911 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
2913 /* Fill out hw struct with PCIE info */
2914 i40e_set_pci_config_data(hw, link);
2916 /* Use info to print out bandwidth messages */
2917 device_printf(dev,"PCI Express Bus: Speed %s %s\n",
2918 ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
2919 (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
2920 (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
2921 (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
2922 (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
2923 (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" :
2924 (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
2928 * If adapter is in slot with maximum supported speed,
2929 * no warning message needs to be printed out.
2931 if (hw->bus.speed >= i40e_bus_speed_8000
2932 && hw->bus.width >= i40e_bus_width_pcie_x8)
2935 num_ports = bitcount32(hw->func_caps.valid_functions);
2936 max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000;
2938 if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) {
2939 device_printf(dev, "PCI-Express bandwidth available"
2940 " for this device may be insufficient for"
2941 " optimal performance.\n");
2942 device_printf(dev, "Please move the device to a different"
2943 " PCI-e link with more lanes and/or higher"
2944 " transfer rate.\n");
2949 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
2951 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2952 struct i40e_hw *hw = &pf->hw;
2955 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
2956 ixl_nvm_version_str(hw, sbuf);
2964 ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
2966 u8 nvma_ptr = nvma->config & 0xFF;
2967 u8 nvma_flags = (nvma->config & 0xF00) >> 8;
2968 const char * cmd_str;
2970 switch (nvma->command) {
2972 if (nvma_ptr == 0xF && nvma_flags == 0xF &&
2973 nvma->offset == 0 && nvma->data_size == 1) {
2974 device_printf(dev, "NVMUPD: Get Driver Status Command\n");
2979 case I40E_NVM_WRITE:
2983 device_printf(dev, "NVMUPD: unknown command: 0x%08x\n", nvma->command);
2987 "NVMUPD: cmd: %s ptr: 0x%02x flags: 0x%01x offset: 0x%08x data_s: 0x%08x\n",
2988 cmd_str, nvma_ptr, nvma_flags, nvma->offset, nvma->data_size);
2992 ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
2994 struct i40e_hw *hw = &pf->hw;
2995 struct i40e_nvm_access *nvma;
2996 device_t dev = pf->dev;
2997 enum i40e_status_code status = 0;
2998 size_t nvma_size, ifd_len, exp_len;
3001 DEBUGFUNC("ixl_handle_nvmupd_cmd");
3004 nvma_size = sizeof(struct i40e_nvm_access);
3005 ifd_len = ifd->ifd_len;
3007 if (ifd_len < nvma_size ||
3008 ifd->ifd_data == NULL) {
3009 device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
3011 device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n",
3012 __func__, ifd_len, nvma_size);
3013 device_printf(dev, "%s: data pointer: %p\n", __func__,
3018 nvma = malloc(ifd_len, M_IXL, M_WAITOK);
3019 err = copyin(ifd->ifd_data, nvma, ifd_len);
3021 device_printf(dev, "%s: Cannot get request from user space\n",
3027 if (pf->dbg_mask & IXL_DBG_NVMUPD)
3028 ixl_print_nvm_cmd(dev, nvma);
3030 if (IXL_PF_IS_RESETTING(pf)) {
3032 while (count++ < 100) {
3033 i40e_msec_delay(100);
3034 if (!(IXL_PF_IS_RESETTING(pf)))
3039 if (IXL_PF_IS_RESETTING(pf)) {
3041 "%s: timeout waiting for EMP reset to finish\n",
3047 if (nvma->data_size < 1 || nvma->data_size > 4096) {
3049 "%s: invalid request, data size not in supported range\n",
3056 * Older versions of the NVM update tool don't set ifd_len to the size
3057 * of the entire buffer passed to the ioctl. Check the data_size field
3058 * in the contained i40e_nvm_access struct and ensure everything is
3059 * copied in from userspace.
3061 exp_len = nvma_size + nvma->data_size - 1; /* One byte is kept in struct */
3063 if (ifd_len < exp_len) {
3065 nvma = realloc(nvma, ifd_len, M_IXL, M_WAITOK);
3066 err = copyin(ifd->ifd_data, nvma, ifd_len);
3068 device_printf(dev, "%s: Cannot get request from user space\n",
3075 // TODO: Might need a different lock here
3077 status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
3078 // IXL_PF_UNLOCK(pf);
3080 err = copyout(nvma, ifd->ifd_data, ifd_len);
3083 device_printf(dev, "%s: Cannot return data to user space\n",
3088 /* Let the nvmupdate report errors, show them only when debug is enabled */
3089 if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0)
3090 device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n",
3091 i40e_stat_str(hw, status), perrno);
3094 * -EPERM is actually ERESTART, which the kernel interprets as it needing
3095 * to run this ioctl again. So use -EACCES for -EPERM instead.
3097 if (perrno == -EPERM)
3104 ixl_find_i2c_interface(struct ixl_pf *pf)
3106 struct i40e_hw *hw = &pf->hw;
3107 bool i2c_en, port_matched;
3110 for (int i = 0; i < 4; i++) {
3111 reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i));
3112 i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK);
3113 port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK)
3114 >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
3116 if (i2c_en && port_matched)
3124 ixl_set_link(struct ixl_pf *pf, bool enable)
3126 struct i40e_hw *hw = &pf->hw;
3127 device_t dev = pf->dev;
3128 struct i40e_aq_get_phy_abilities_resp abilities;
3129 struct i40e_aq_set_phy_config config;
3130 enum i40e_status_code aq_error = 0;
3131 u32 phy_type, phy_type_ext;
3133 /* Get initial capability information */
3134 aq_error = i40e_aq_get_phy_capabilities(hw,
3135 FALSE, TRUE, &abilities, NULL);
3138 "%s: Error getting phy capabilities %d,"
3139 " aq error: %d\n", __func__, aq_error,
3140 hw->aq.asq_last_status);
3144 phy_type = abilities.phy_type;
3145 phy_type_ext = abilities.phy_type_ext;
3147 /* Get current capability information */
3148 aq_error = i40e_aq_get_phy_capabilities(hw,
3149 FALSE, FALSE, &abilities, NULL);
3152 "%s: Error getting phy capabilities %d,"
3153 " aq error: %d\n", __func__, aq_error,
3154 hw->aq.asq_last_status);
3158 /* Prepare new config */
3159 memset(&config, 0, sizeof(config));
3160 config.link_speed = abilities.link_speed;
3161 config.abilities = abilities.abilities;
3162 config.eee_capability = abilities.eee_capability;
3163 config.eeer = abilities.eeer_val;
3164 config.low_power_ctrl = abilities.d3_lpan;
3165 config.fec_config = abilities.fec_cfg_curr_mod_ext_info
3166 & I40E_AQ_PHY_FEC_CONFIG_MASK;
3167 config.phy_type = 0;
3168 config.phy_type_ext = 0;
3170 config.abilities &= ~(I40E_AQ_PHY_FLAG_PAUSE_TX |
3171 I40E_AQ_PHY_FLAG_PAUSE_RX);
3175 config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_TX |
3176 I40E_AQ_PHY_FLAG_PAUSE_RX;
3178 case I40E_FC_RX_PAUSE:
3179 config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_RX;
3181 case I40E_FC_TX_PAUSE:
3182 config.abilities |= I40E_AQ_PHY_FLAG_PAUSE_TX;
3189 config.phy_type = phy_type;
3190 config.phy_type_ext = phy_type_ext;
3194 aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
3197 "%s: Error setting new phy config %d,"
3198 " aq error: %d\n", __func__, aq_error,
3199 hw->aq.asq_last_status);
3203 aq_error = i40e_aq_set_link_restart_an(hw, enable, NULL);
3206 "%s: Error set link config %d,"
3207 " aq error: %d\n", __func__, aq_error,
3208 hw->aq.asq_last_status);
3214 ixl_phy_type_string(u32 bit_pos, bool ext)
3216 static char * phy_types_str[32] = {
3246 "1000BASE-T Optical",
3250 static char * ext_phy_types_str[8] = {
3261 if (ext && bit_pos > 7) return "Invalid_Ext";
3262 if (bit_pos > 31) return "Invalid";
3264 return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos];
3267 /* TODO: ERJ: I don't this is necessary anymore. */
3269 ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status)
3271 device_t dev = pf->dev;
3272 struct i40e_hw *hw = &pf->hw;
3273 struct i40e_aq_desc desc;
3274 enum i40e_status_code status;
3276 struct i40e_aqc_get_link_status *aq_link_status =
3277 (struct i40e_aqc_get_link_status *)&desc.params.raw;
3279 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
3280 link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE);
3281 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
3284 "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n",
3285 __func__, i40e_stat_str(hw, status),
3286 i40e_aq_str(hw, hw->aq.asq_last_status));
3290 bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status));
3295 ixl_phy_type_string_ls(u8 val)
3298 return ixl_phy_type_string(val - 0x1F, true);
3300 return ixl_phy_type_string(val, false);
3304 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
3306 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3307 device_t dev = pf->dev;
3311 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3313 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3317 struct i40e_aqc_get_link_status link_status;
3318 error = ixl_aq_get_link_status(pf, &link_status);
3324 sbuf_printf(buf, "\n"
3325 "PHY Type : 0x%02x<%s>\n"
3327 "Link info: 0x%02x\n"
3328 "AN info : 0x%02x\n"
3329 "Ext info : 0x%02x\n"
3330 "Loopback : 0x%02x\n"
3334 link_status.phy_type,
3335 ixl_phy_type_string_ls(link_status.phy_type),
3336 link_status.link_speed,
3337 link_status.link_info,
3338 link_status.an_info,
3339 link_status.ext_info,
3340 link_status.loopback,
3341 link_status.max_frame_size,
3343 link_status.power_desc);
3345 error = sbuf_finish(buf);
3347 device_printf(dev, "Error finishing sbuf: %d\n", error);
3354 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
3356 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3357 struct i40e_hw *hw = &pf->hw;
3358 device_t dev = pf->dev;
3359 enum i40e_status_code status;
3360 struct i40e_aq_get_phy_abilities_resp abilities;
3364 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3366 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3370 status = i40e_aq_get_phy_capabilities(hw,
3371 FALSE, arg2 != 0, &abilities, NULL);
3374 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
3375 __func__, i40e_stat_str(hw, status),
3376 i40e_aq_str(hw, hw->aq.asq_last_status));
3381 sbuf_printf(buf, "\n"
3383 abilities.phy_type);
3385 if (abilities.phy_type != 0) {
3386 sbuf_printf(buf, "<");
3387 for (int i = 0; i < 32; i++)
3388 if ((1 << i) & abilities.phy_type)
3389 sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false));
3390 sbuf_printf(buf, ">");
3393 sbuf_printf(buf, "\nPHY Ext : %02x",
3394 abilities.phy_type_ext);
3396 if (abilities.phy_type_ext != 0) {
3397 sbuf_printf(buf, "<");
3398 for (int i = 0; i < 4; i++)
3399 if ((1 << i) & abilities.phy_type_ext)
3400 sbuf_printf(buf, "%s,",
3401 ixl_phy_type_string(i, true));
3402 sbuf_printf(buf, ">");
3405 sbuf_printf(buf, "\nSpeed : %02x", abilities.link_speed);
3406 if (abilities.link_speed != 0) {
3408 sbuf_printf(buf, " <");
3409 for (int i = 0; i < 8; i++) {
3410 link_speed = (1 << i) & abilities.link_speed;
3412 sbuf_printf(buf, "%s, ",
3413 ixl_link_speed_string(link_speed));
3415 sbuf_printf(buf, ">");
3418 sbuf_printf(buf, "\n"
3423 "ID : %02x %02x %02x %02x\n"
3424 "ModType : %02x %02x %02x\n"
3428 abilities.abilities, abilities.eee_capability,
3429 abilities.eeer_val, abilities.d3_lpan,
3430 abilities.phy_id[0], abilities.phy_id[1],
3431 abilities.phy_id[2], abilities.phy_id[3],
3432 abilities.module_type[0], abilities.module_type[1],
3433 abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5,
3434 abilities.fec_cfg_curr_mod_ext_info & 0x1F,
3435 abilities.ext_comp_code);
3437 error = sbuf_finish(buf);
3439 device_printf(dev, "Error finishing sbuf: %d\n", error);
3446 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
3448 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3449 struct ixl_vsi *vsi = &pf->vsi;
3450 struct ixl_mac_filter *f;
3451 device_t dev = pf->dev;
3452 int error = 0, ftl_len = 0, ftl_counter = 0;
3456 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3458 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3462 sbuf_printf(buf, "\n");
3464 /* Print MAC filters */
3465 sbuf_printf(buf, "PF Filters:\n");
3466 LIST_FOREACH(f, &vsi->ftl, ftle)
3470 sbuf_printf(buf, "(none)\n");
3472 LIST_FOREACH(f, &vsi->ftl, ftle) {
3474 MAC_FORMAT ", vlan %4d, flags %#06x",
3475 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
3476 /* don't print '\n' for last entry */
3477 if (++ftl_counter != ftl_len)
3478 sbuf_printf(buf, "\n");
3483 /* TODO: Give each VF its own filter list sysctl */
3485 if (pf->num_vfs > 0) {
3486 sbuf_printf(buf, "\n\n");
3487 for (int i = 0; i < pf->num_vfs; i++) {
3489 if (!(vf->vf_flags & VF_FLAG_ENABLED))
3493 ftl_len = 0, ftl_counter = 0;
3494 sbuf_printf(buf, "VF-%d Filters:\n", vf->vf_num);
3495 LIST_FOREACH(f, &vsi->ftl, ftle)
3499 sbuf_printf(buf, "(none)\n");
3501 LIST_FOREACH(f, &vsi->ftl, ftle) {
3503 MAC_FORMAT ", vlan %4d, flags %#06x\n",
3504 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
3511 error = sbuf_finish(buf);
3513 device_printf(dev, "Error finishing sbuf: %d\n", error);
3519 #define IXL_SW_RES_SIZE 0x14
3521 ixl_res_alloc_cmp(const void *a, const void *b)
3523 const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
3524 one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
3525 two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
3527 return ((int)one->resource_type - (int)two->resource_type);
3531 * Longest string length: 25
3534 ixl_switch_res_type_string(u8 type)
3536 static const char * ixl_switch_res_type_strings[IXL_SW_RES_SIZE] = {
3539 "Perfect Match MAC address",
3542 "Multicast hash entry",
3543 "Unicast hash entry",
3547 "VLAN Statistic Pool",
3550 "Inner VLAN Forward filter",
3559 if (type < IXL_SW_RES_SIZE)
3560 return ixl_switch_res_type_strings[type];
3562 return "(Reserved)";
3566 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
3568 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3569 struct i40e_hw *hw = &pf->hw;
3570 device_t dev = pf->dev;
3572 enum i40e_status_code status;
3576 struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
3578 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3580 device_printf(dev, "Could not allocate sbuf for output.\n");
3584 bzero(resp, sizeof(resp));
3585 status = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
3591 "%s: get_switch_resource_alloc() error %s, aq error %s\n",
3592 __func__, i40e_stat_str(hw, status),
3593 i40e_aq_str(hw, hw->aq.asq_last_status));
3598 /* Sort entries by type for display */
3599 qsort(resp, num_entries,
3600 sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
3601 &ixl_res_alloc_cmp);
3603 sbuf_cat(buf, "\n");
3604 sbuf_printf(buf, "# of entries: %d\n", num_entries);
3606 " Type | Guaranteed | Total | Used | Un-allocated\n"
3607 " | (this) | (all) | (this) | (all) \n");
3608 for (int i = 0; i < num_entries; i++) {
3610 "%25s | %10d %5d %6d %12d",
3611 ixl_switch_res_type_string(resp[i].resource_type),
3615 resp[i].total_unalloced);
3616 if (i < num_entries - 1)
3617 sbuf_cat(buf, "\n");
3620 error = sbuf_finish(buf);
3622 device_printf(dev, "Error finishing sbuf: %d\n", error);
3628 enum ixl_sw_seid_offset {
3629 IXL_SW_SEID_EMP = 1,
3630 IXL_SW_SEID_MAC_START = 2,
3631 IXL_SW_SEID_MAC_END = 5,
3632 IXL_SW_SEID_PF_START = 16,
3633 IXL_SW_SEID_PF_END = 31,
3634 IXL_SW_SEID_VF_START = 32,
3635 IXL_SW_SEID_VF_END = 159,
3639 * Caller must init and delete sbuf; this function will clear and
3640 * finish it for caller.
3642 * Note: The SEID argument only applies for elements defined by FW at
3643 * power-on; these include the EMP, Ports, PFs and VFs.
3646 ixl_switch_element_string(struct sbuf *s, u8 element_type, u16 seid)
3650 /* If SEID is in certain ranges, then we can infer the
3651 * mapping of SEID to switch element.
3653 if (seid == IXL_SW_SEID_EMP) {
3656 } else if (seid >= IXL_SW_SEID_MAC_START &&
3657 seid <= IXL_SW_SEID_MAC_END) {
3658 sbuf_printf(s, "MAC %2d",
3659 seid - IXL_SW_SEID_MAC_START);
3661 } else if (seid >= IXL_SW_SEID_PF_START &&
3662 seid <= IXL_SW_SEID_PF_END) {
3663 sbuf_printf(s, "PF %3d",
3664 seid - IXL_SW_SEID_PF_START);
3666 } else if (seid >= IXL_SW_SEID_VF_START &&
3667 seid <= IXL_SW_SEID_VF_END) {
3668 sbuf_printf(s, "VF %3d",
3669 seid - IXL_SW_SEID_VF_START);
3673 switch (element_type) {
3674 case I40E_AQ_SW_ELEM_TYPE_BMC:
3677 case I40E_AQ_SW_ELEM_TYPE_PV:
3680 case I40E_AQ_SW_ELEM_TYPE_VEB:
3683 case I40E_AQ_SW_ELEM_TYPE_PA:
3686 case I40E_AQ_SW_ELEM_TYPE_VSI:
3687 sbuf_printf(s, "VSI");
3696 return sbuf_data(s);
3700 ixl_sw_cfg_elem_seid_cmp(const void *a, const void *b)
3702 const struct i40e_aqc_switch_config_element_resp *one, *two;
3703 one = (const struct i40e_aqc_switch_config_element_resp *)a;
3704 two = (const struct i40e_aqc_switch_config_element_resp *)b;
3706 return ((int)one->seid - (int)two->seid);
3710 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
3712 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3713 struct i40e_hw *hw = &pf->hw;
3714 device_t dev = pf->dev;
3717 enum i40e_status_code status;
3720 u8 aq_buf[I40E_AQ_LARGE_BUF];
3722 struct i40e_aqc_switch_config_element_resp *elem;
3723 struct i40e_aqc_get_switch_config_resp *sw_config;
3724 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
3726 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3728 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3732 status = i40e_aq_get_switch_config(hw, sw_config,
3733 sizeof(aq_buf), &next, NULL);
3736 "%s: aq_get_switch_config() error %s, aq error %s\n",
3737 __func__, i40e_stat_str(hw, status),
3738 i40e_aq_str(hw, hw->aq.asq_last_status));
3743 device_printf(dev, "%s: TODO: get more config with SEID %d\n",
3746 nmbuf = sbuf_new_auto();
3748 device_printf(dev, "Could not allocate sbuf for name output.\n");
3753 /* Sort entries by SEID for display */
3754 qsort(sw_config->element, sw_config->header.num_reported,
3755 sizeof(struct i40e_aqc_switch_config_element_resp),
3756 &ixl_sw_cfg_elem_seid_cmp);
3758 sbuf_cat(buf, "\n");
3759 /* Assuming <= 255 elements in switch */
3760 sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
3761 sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
3763 * Revision -- all elements are revision 1 for now
3766 "SEID ( Name ) | Up ( Name ) | Down ( Name ) | Conn Type\n"
3767 " | | | (uplink)\n");
3768 for (int i = 0; i < sw_config->header.num_reported; i++) {
3769 elem = &sw_config->element[i];
3771 // "%4d (%8s) | %8s %8s %#8x",
3772 sbuf_printf(buf, "%4d", elem->seid);
3774 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
3775 elem->element_type, elem->seid));
3776 sbuf_cat(buf, " | ");
3777 sbuf_printf(buf, "%4d", elem->uplink_seid);
3779 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
3780 0, elem->uplink_seid));
3781 sbuf_cat(buf, " | ");
3782 sbuf_printf(buf, "%4d", elem->downlink_seid);
3784 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
3785 0, elem->downlink_seid));
3786 sbuf_cat(buf, " | ");
3787 sbuf_printf(buf, "%8d", elem->connection_type);
3788 if (i < sw_config->header.num_reported - 1)
3789 sbuf_cat(buf, "\n");
3793 error = sbuf_finish(buf);
3795 device_printf(dev, "Error finishing sbuf: %d\n", error);
3803 ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS)
3805 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3806 struct i40e_hw *hw = &pf->hw;
3807 device_t dev = pf->dev;
3808 int requested_vlan = -1;
3809 enum i40e_status_code status = 0;
3812 error = sysctl_handle_int(oidp, &requested_vlan, 0, req);
3813 if ((error) || (req->newptr == NULL))
3816 if ((hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) == 0) {
3817 device_printf(dev, "Flags disallow setting of vlans\n");
3821 hw->switch_tag = requested_vlan;
3823 "Setting switch config to switch_tag=%04x, first_tag=%04x, second_tag=%04x\n",
3824 hw->switch_tag, hw->first_tag, hw->second_tag);
3825 status = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL);
3828 "%s: aq_set_switch_config() error %s, aq error %s\n",
3829 __func__, i40e_stat_str(hw, status),
3830 i40e_aq_str(hw, hw->aq.asq_last_status));
3837 ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
3839 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3840 struct i40e_hw *hw = &pf->hw;
3841 device_t dev = pf->dev;
3844 enum i40e_status_code status;
3847 struct i40e_aqc_get_set_rss_key_data key_data;
3849 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3851 device_printf(dev, "Could not allocate sbuf for output.\n");
3855 bzero(&key_data, sizeof(key_data));
3857 sbuf_cat(buf, "\n");
3858 if (hw->mac.type == I40E_MAC_X722) {
3859 status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data);
3861 device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n",
3862 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
3864 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
3865 reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
3866 bcopy(®, ((caddr_t)&key_data) + (i << 2), 4);
3870 ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true);
3872 error = sbuf_finish(buf);
3874 device_printf(dev, "Error finishing sbuf: %d\n", error);
3881 ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text)
3886 if (length < 1 || buf == NULL) return;
3888 int byte_stride = 16;
3889 int lines = length / byte_stride;
3890 int rem = length % byte_stride;
3894 for (i = 0; i < lines; i++) {
3895 width = (rem > 0 && i == lines - 1)
3896 ? rem : byte_stride;
3898 sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride);
3900 for (j = 0; j < width; j++)
3901 sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]);
3903 if (width < byte_stride) {
3904 for (k = 0; k < (byte_stride - width); k++)
3905 sbuf_printf(sb, " ");
3909 sbuf_printf(sb, "\n");
3913 for (j = 0; j < width; j++) {
3914 c = (char)buf[i * byte_stride + j];
3915 if (c < 32 || c > 126)
3916 sbuf_printf(sb, ".");
3918 sbuf_printf(sb, "%c", c);
3921 sbuf_printf(sb, "\n");
3927 ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
3929 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3930 struct i40e_hw *hw = &pf->hw;
3931 device_t dev = pf->dev;
3934 enum i40e_status_code status;
3938 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3940 device_printf(dev, "Could not allocate sbuf for output.\n");
3944 bzero(hlut, sizeof(hlut));
3945 sbuf_cat(buf, "\n");
3946 if (hw->mac.type == I40E_MAC_X722) {
3947 status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut));
3949 device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n",
3950 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
3952 for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) {
3953 reg = rd32(hw, I40E_PFQF_HLUT(i));
3954 bcopy(®, &hlut[i << 2], 4);
3957 ixl_sbuf_print_bytes(buf, hlut, 512, 0, false);
3959 error = sbuf_finish(buf);
3961 device_printf(dev, "Error finishing sbuf: %d\n", error);
3968 ixl_sysctl_hena(SYSCTL_HANDLER_ARGS)
3970 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3971 struct i40e_hw *hw = &pf->hw;
3974 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
3975 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
3977 return sysctl_handle_long(oidp, NULL, hena, req);
3981 * Sysctl to disable firmware's link management
3983 * 1 - Disable link management on this port
3984 * 0 - Re-enable link management
3986 * On normal NVMs, firmware manages link by default.
3989 ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS)
3991 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3992 struct i40e_hw *hw = &pf->hw;
3993 device_t dev = pf->dev;
3994 int requested_mode = -1;
3995 enum i40e_status_code status = 0;
3998 /* Read in new mode */
3999 error = sysctl_handle_int(oidp, &requested_mode, 0, req);
4000 if ((error) || (req->newptr == NULL))
4002 /* Check for sane value */
4003 if (requested_mode < 0 || requested_mode > 1) {
4004 device_printf(dev, "Valid modes are 0 or 1\n");
4009 status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL);
4012 "%s: Error setting new phy debug mode %s,"
4013 " aq error: %s\n", __func__, i40e_stat_str(hw, status),
4014 i40e_aq_str(hw, hw->aq.asq_last_status));
4022 * Read some diagnostic data from a (Q)SFP+ module
4024 * SFP A2 QSFP Lower Page
4025 * Temperature 96-97 22-23
4027 * TX power 102-103 34-35..40-41
4028 * RX power 104-105 50-51..56-57
4031 ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS)
4033 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4034 device_t dev = pf->dev;
4039 if (req->oldptr == NULL) {
4040 error = SYSCTL_OUT(req, 0, 128);
4044 error = pf->read_i2c_byte(pf, 0, 0xA0, &output);
4046 device_printf(dev, "Error reading from i2c\n");
4050 /* 0x3 for SFP; 0xD/0x11 for QSFP+/QSFP28 */
4051 if (output == 0x3) {
4054 * - Internally calibrated data
4055 * - Diagnostic monitoring is implemented
4057 pf->read_i2c_byte(pf, 92, 0xA0, &output);
4058 if (!(output & 0x60)) {
4059 device_printf(dev, "Module doesn't support diagnostics: %02X\n", output);
4063 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4065 for (u8 offset = 96; offset < 100; offset++) {
4066 pf->read_i2c_byte(pf, offset, 0xA2, &output);
4067 sbuf_printf(sbuf, "%02X ", output);
4069 for (u8 offset = 102; offset < 106; offset++) {
4070 pf->read_i2c_byte(pf, offset, 0xA2, &output);
4071 sbuf_printf(sbuf, "%02X ", output);
4073 } else if (output == 0xD || output == 0x11) {
4075 * QSFP+ modules are always internally calibrated, and must indicate
4076 * what types of diagnostic monitoring are implemented
4078 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4080 for (u8 offset = 22; offset < 24; offset++) {
4081 pf->read_i2c_byte(pf, offset, 0xA0, &output);
4082 sbuf_printf(sbuf, "%02X ", output);
4084 for (u8 offset = 26; offset < 28; offset++) {
4085 pf->read_i2c_byte(pf, offset, 0xA0, &output);
4086 sbuf_printf(sbuf, "%02X ", output);
4088 /* Read the data from the first lane */
4089 for (u8 offset = 34; offset < 36; offset++) {
4090 pf->read_i2c_byte(pf, offset, 0xA0, &output);
4091 sbuf_printf(sbuf, "%02X ", output);
4093 for (u8 offset = 50; offset < 52; offset++) {
4094 pf->read_i2c_byte(pf, offset, 0xA0, &output);
4095 sbuf_printf(sbuf, "%02X ", output);
4098 device_printf(dev, "Module is not SFP/SFP+/SFP28/QSFP+ (%02X)\n", output);
4109 * Sysctl to read a byte from I2C bus.
4111 * Input: 32-bit value:
4112 * bits 0-7: device address (0xA0 or 0xA2)
4113 * bits 8-15: offset (0-255)
4114 * bits 16-31: unused
4115 * Output: 8-bit value read
4118 ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS)
4120 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4121 device_t dev = pf->dev;
4122 int input = -1, error = 0;
4123 u8 dev_addr, offset, output;
4125 /* Read in I2C read parameters */
4126 error = sysctl_handle_int(oidp, &input, 0, req);
4127 if ((error) || (req->newptr == NULL))
4129 /* Validate device address */
4130 dev_addr = input & 0xFF;
4131 if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4134 offset = (input >> 8) & 0xFF;
4136 error = pf->read_i2c_byte(pf, offset, dev_addr, &output);
4140 device_printf(dev, "%02X\n", output);
4145 * Sysctl to write a byte to the I2C bus.
4147 * Input: 32-bit value:
4148 * bits 0-7: device address (0xA0 or 0xA2)
4149 * bits 8-15: offset (0-255)
4150 * bits 16-23: value to write
4151 * bits 24-31: unused
4152 * Output: 8-bit value written
4155 ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS)
4157 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4158 device_t dev = pf->dev;
4159 int input = -1, error = 0;
4160 u8 dev_addr, offset, value;
4162 /* Read in I2C write parameters */
4163 error = sysctl_handle_int(oidp, &input, 0, req);
4164 if ((error) || (req->newptr == NULL))
4166 /* Validate device address */
4167 dev_addr = input & 0xFF;
4168 if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4171 offset = (input >> 8) & 0xFF;
4172 value = (input >> 16) & 0xFF;
4174 error = pf->write_i2c_byte(pf, offset, dev_addr, value);
4178 device_printf(dev, "%02X written\n", value);
4183 ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4184 u8 bit_pos, int *is_set)
4186 device_t dev = pf->dev;
4187 struct i40e_hw *hw = &pf->hw;
4188 enum i40e_status_code status;
4190 if (IXL_PF_IN_RECOVERY_MODE(pf))
4193 status = i40e_aq_get_phy_capabilities(hw,
4194 FALSE, FALSE, abilities, NULL);
4197 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
4198 __func__, i40e_stat_str(hw, status),
4199 i40e_aq_str(hw, hw->aq.asq_last_status));
4203 *is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos);
4208 ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4209 u8 bit_pos, int set)
4211 device_t dev = pf->dev;
4212 struct i40e_hw *hw = &pf->hw;
4213 struct i40e_aq_set_phy_config config;
4214 enum i40e_status_code status;
4216 /* Set new PHY config */
4217 memset(&config, 0, sizeof(config));
4218 config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos);
4220 config.fec_config |= bit_pos;
4221 if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) {
4222 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4223 config.phy_type = abilities->phy_type;
4224 config.phy_type_ext = abilities->phy_type_ext;
4225 config.link_speed = abilities->link_speed;
4226 config.eee_capability = abilities->eee_capability;
4227 config.eeer = abilities->eeer_val;
4228 config.low_power_ctrl = abilities->d3_lpan;
4229 status = i40e_aq_set_phy_config(hw, &config, NULL);
4233 "%s: i40e_aq_set_phy_config() status %s, aq error %s\n",
4234 __func__, i40e_stat_str(hw, status),
4235 i40e_aq_str(hw, hw->aq.asq_last_status));
4244 ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS)
4246 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4247 int mode, error = 0;
4249 struct i40e_aq_get_phy_abilities_resp abilities;
4250 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode);
4253 /* Read in new mode */
4254 error = sysctl_handle_int(oidp, &mode, 0, req);
4255 if ((error) || (req->newptr == NULL))
4258 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode));
4262 ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS)
4264 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4265 int mode, error = 0;
4267 struct i40e_aq_get_phy_abilities_resp abilities;
4268 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode);
4271 /* Read in new mode */
4272 error = sysctl_handle_int(oidp, &mode, 0, req);
4273 if ((error) || (req->newptr == NULL))
4276 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode));
4280 ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS)
4282 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4283 int mode, error = 0;
4285 struct i40e_aq_get_phy_abilities_resp abilities;
4286 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode);
4289 /* Read in new mode */
4290 error = sysctl_handle_int(oidp, &mode, 0, req);
4291 if ((error) || (req->newptr == NULL))
4294 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode));
4298 ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS)
4300 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4301 int mode, error = 0;
4303 struct i40e_aq_get_phy_abilities_resp abilities;
4304 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode);
4307 /* Read in new mode */
4308 error = sysctl_handle_int(oidp, &mode, 0, req);
4309 if ((error) || (req->newptr == NULL))
4312 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode));
4316 ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)
4318 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4319 int mode, error = 0;
4321 struct i40e_aq_get_phy_abilities_resp abilities;
4322 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode);
4325 /* Read in new mode */
4326 error = sysctl_handle_int(oidp, &mode, 0, req);
4327 if ((error) || (req->newptr == NULL))
4330 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode));
4334 ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS)
4336 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4337 struct i40e_hw *hw = &pf->hw;
4338 device_t dev = pf->dev;
4341 enum i40e_status_code status;
4343 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4345 device_printf(dev, "Could not allocate sbuf for output.\n");
4350 /* This amount is only necessary if reading the entire cluster into memory */
4351 #define IXL_FINAL_BUFF_SIZE (1280 * 1024)
4352 final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_IXL, M_NOWAIT);
4353 if (final_buff == NULL) {
4354 device_printf(dev, "Could not allocate memory for output.\n");
4357 int final_buff_len = 0;
4363 u16 curr_buff_size = 4096;
4364 u8 curr_next_table = 0;
4365 u32 curr_next_index = 0;
4371 sbuf_cat(buf, "\n");
4374 status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size,
4375 dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL);
4377 device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n",
4378 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4382 /* copy info out of temp buffer */
4383 bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size);
4384 final_buff_len += ret_buff_size;
4386 if (ret_next_table != curr_next_table) {
4387 /* We're done with the current table; we can dump out read data. */
4388 sbuf_printf(buf, "%d:", curr_next_table);
4389 int bytes_printed = 0;
4390 while (bytes_printed <= final_buff_len) {
4391 sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), "");
4392 bytes_printed += 16;
4394 sbuf_cat(buf, "\n");
4396 /* The entire cluster has been read; we're finished */
4397 if (ret_next_table == 0xFF)
4400 /* Otherwise clear the output buffer and continue reading */
4401 bzero(final_buff, IXL_FINAL_BUFF_SIZE);
4405 if (ret_next_index == 0xFFFFFFFF)
4408 bzero(dump_buf, sizeof(dump_buf));
4409 curr_next_table = ret_next_table;
4410 curr_next_index = ret_next_index;
4414 free(final_buff, M_IXL);
4416 error = sbuf_finish(buf);
4418 device_printf(dev, "Error finishing sbuf: %d\n", error);
4425 ixl_start_fw_lldp(struct ixl_pf *pf)
4427 struct i40e_hw *hw = &pf->hw;
4428 enum i40e_status_code status;
4430 status = i40e_aq_start_lldp(hw, false, NULL);
4431 if (status != I40E_SUCCESS) {
4432 switch (hw->aq.asq_last_status) {
4433 case I40E_AQ_RC_EEXIST:
4434 device_printf(pf->dev,
4435 "FW LLDP agent is already running\n");
4437 case I40E_AQ_RC_EPERM:
4438 device_printf(pf->dev,
4439 "Device configuration forbids SW from starting "
4440 "the LLDP agent. Set the \"LLDP Agent\" UEFI HII "
4441 "attribute to \"Enabled\" to use this sysctl\n");
4444 device_printf(pf->dev,
4445 "Starting FW LLDP agent failed: error: %s, %s\n",
4446 i40e_stat_str(hw, status),
4447 i40e_aq_str(hw, hw->aq.asq_last_status));
4452 atomic_clear_32(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4457 ixl_stop_fw_lldp(struct ixl_pf *pf)
4459 struct i40e_hw *hw = &pf->hw;
4460 device_t dev = pf->dev;
4461 enum i40e_status_code status;
4463 if (hw->func_caps.npar_enable != 0) {
4465 "Disabling FW LLDP agent is not supported on this device\n");
4469 if ((hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE) == 0) {
4471 "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n");
4475 status = i40e_aq_stop_lldp(hw, true, false, NULL);
4476 if (status != I40E_SUCCESS) {
4477 if (hw->aq.asq_last_status != I40E_AQ_RC_EPERM) {
4479 "Disabling FW LLDP agent failed: error: %s, %s\n",
4480 i40e_stat_str(hw, status),
4481 i40e_aq_str(hw, hw->aq.asq_last_status));
4485 device_printf(dev, "FW LLDP agent is already stopped\n");
4488 i40e_aq_set_dcb_parameters(hw, true, NULL);
4489 atomic_set_32(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4494 ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS)
4496 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4497 int state, new_state, error = 0;
4499 state = new_state = ((pf->state & IXL_PF_STATE_FW_LLDP_DISABLED) == 0);
4501 /* Read in new mode */
4502 error = sysctl_handle_int(oidp, &new_state, 0, req);
4503 if ((error) || (req->newptr == NULL))
4506 /* Already in requested state */
4507 if (new_state == state)
4511 return ixl_stop_fw_lldp(pf);
4513 return ixl_start_fw_lldp(pf);
4517 ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS)
4519 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4520 int state, new_state;
4521 int sysctl_handle_status = 0;
4522 enum i40e_status_code cmd_status;
4524 /* Init states' values */
4525 state = new_state = (!!(pf->state & IXL_PF_STATE_EEE_ENABLED));
4527 /* Get requested mode */
4528 sysctl_handle_status = sysctl_handle_int(oidp, &new_state, 0, req);
4529 if ((sysctl_handle_status) || (req->newptr == NULL))
4530 return (sysctl_handle_status);
4532 /* Check if state has changed */
4533 if (new_state == state)
4537 cmd_status = i40e_enable_eee(&pf->hw, (bool)(!!new_state));
4539 /* Save new state or report error */
4542 atomic_clear_32(&pf->state, IXL_PF_STATE_EEE_ENABLED);
4544 atomic_set_32(&pf->state, IXL_PF_STATE_EEE_ENABLED);
4545 } else if (cmd_status == I40E_ERR_CONFIG)
4554 ixl_sysctl_set_link_active(SYSCTL_HANDLER_ARGS)
4556 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4559 state = !!(atomic_load_acq_32(&pf->state) &
4560 IXL_PF_STATE_LINK_ACTIVE_ON_DOWN);
4562 error = sysctl_handle_int(oidp, &state, 0, req);
4563 if ((error) || (req->newptr == NULL))
4567 atomic_clear_32(&pf->state, IXL_PF_STATE_LINK_ACTIVE_ON_DOWN);
4569 atomic_set_32(&pf->state, IXL_PF_STATE_LINK_ACTIVE_ON_DOWN);
4576 ixl_attach_get_link_status(struct ixl_pf *pf)
4578 struct i40e_hw *hw = &pf->hw;
4579 device_t dev = pf->dev;
4582 if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
4583 (hw->aq.fw_maj_ver < 4)) {
4584 i40e_msec_delay(75);
4585 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
4587 device_printf(dev, "link restart failed, aq_err=%d\n",
4588 pf->hw.aq.asq_last_status);
4593 /* Determine link state */
4594 hw->phy.get_link_info = TRUE;
4595 i40e_get_link_status(hw, &pf->link_up);
4597 /* Flow Control mode not set by user, read current FW settings */
4599 pf->fc = hw->fc.current_mode;
4605 ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS)
4607 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4608 int requested = 0, error = 0;
4610 /* Read in new mode */
4611 error = sysctl_handle_int(oidp, &requested, 0, req);
4612 if ((error) || (req->newptr == NULL))
4615 /* Initiate the PF reset later in the admin task */
4616 atomic_set_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ);
4622 ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS)
4624 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4625 struct i40e_hw *hw = &pf->hw;
4626 int requested = 0, error = 0;
4628 /* Read in new mode */
4629 error = sysctl_handle_int(oidp, &requested, 0, req);
4630 if ((error) || (req->newptr == NULL))
4633 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
4639 ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS)
4641 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4642 struct i40e_hw *hw = &pf->hw;
4643 int requested = 0, error = 0;
4645 /* Read in new mode */
4646 error = sysctl_handle_int(oidp, &requested, 0, req);
4647 if ((error) || (req->newptr == NULL))
4650 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_GLOBR_MASK);
4656 * Print out mapping of TX queue indexes and Rx queue indexes
4660 ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)
4662 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4663 struct ixl_vsi *vsi = &pf->vsi;
4664 device_t dev = pf->dev;
4668 struct ixl_rx_queue *rx_que = vsi->rx_queues;
4669 struct ixl_tx_queue *tx_que = vsi->tx_queues;
4671 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4673 device_printf(dev, "Could not allocate sbuf for output.\n");
4677 sbuf_cat(buf, "\n");
4678 for (int i = 0; i < vsi->num_rx_queues; i++) {
4679 rx_que = &vsi->rx_queues[i];
4680 sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix);
4682 for (int i = 0; i < vsi->num_tx_queues; i++) {
4683 tx_que = &vsi->tx_queues[i];
4684 sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix);
4687 error = sbuf_finish(buf);
4689 device_printf(dev, "Error finishing sbuf: %d\n", error);