1 /******************************************************************************
3 Copyright (c) 2013-2018, Intel Corporation
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 3. Neither the name of the Intel Corporation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 POSSIBILITY OF SUCH DAMAGE.
32 ******************************************************************************/
39 #include "ixl_pf_iov.h"
44 #include "ixl_iw_int.h"
47 static u8 ixl_convert_sysctl_aq_link_speed(u8, bool);
48 static void ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool);
49 static const char * ixl_link_speed_string(enum i40e_aq_link_speed);
50 static u_int ixl_add_maddr(void *, struct sockaddr_dl *, u_int);
51 static u_int ixl_match_maddr(void *, struct sockaddr_dl *, u_int);
52 static char * ixl_switch_element_string(struct sbuf *, u8, u16);
53 static enum ixl_fw_mode ixl_get_fw_mode(struct ixl_pf *);
56 static int ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS);
57 static int ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS);
58 static int ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
59 static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
60 static int ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS);
61 static int ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS);
62 static int ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS);
64 static int ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS);
67 static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
68 static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
69 static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
70 static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
71 static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
72 static int ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS);
73 static int ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS);
74 static int ixl_sysctl_hena(SYSCTL_HANDLER_ARGS);
75 static int ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS);
76 static int ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS);
77 static int ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS);
78 static int ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS);
79 static int ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS);
80 static int ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS);
81 static int ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS);
82 static int ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS);
83 static int ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS);
84 static int ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS);
85 static int ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS);
86 static int ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS);
89 static int ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS);
90 static int ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS);
91 static int ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS);
92 static int ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS);
94 static int ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
95 static int ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
99 extern int ixl_enable_iwarp;
100 extern int ixl_limit_iwarp_msix;
103 static const char * const ixl_fc_string[6] = {
112 static char *ixl_fec_string[3] = {
114 "CL74 FC-FEC/BASE-R",
118 MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
121 ** Put the FW, API, NVM, EEtrackID, and OEM version information into a string
124 ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf)
126 u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24);
127 u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF);
128 u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF);
131 "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d",
132 hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
133 hw->aq.api_maj_ver, hw->aq.api_min_ver,
134 (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
135 IXL_NVM_VERSION_HI_SHIFT,
136 (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
137 IXL_NVM_VERSION_LO_SHIFT,
139 oem_ver, oem_build, oem_patch);
143 ixl_print_nvm_version(struct ixl_pf *pf)
145 struct i40e_hw *hw = &pf->hw;
146 device_t dev = pf->dev;
149 sbuf = sbuf_new_auto();
150 ixl_nvm_version_str(hw, sbuf);
152 device_printf(dev, "%s\n", sbuf_data(sbuf));
157 * ixl_get_fw_mode - Check the state of FW
158 * @hw: device hardware structure
160 * Identify state of FW. It might be in a recovery mode
161 * which limits functionality and requires special handling
164 * @returns FW mode (normal, recovery, unexpected EMP reset)
166 static enum ixl_fw_mode
167 ixl_get_fw_mode(struct ixl_pf *pf)
169 struct i40e_hw *hw = &pf->hw;
170 enum ixl_fw_mode fw_mode = IXL_FW_MODE_NORMAL;
174 if (pf->recovery_mode)
175 return IXL_FW_MODE_RECOVERY;
177 fwsts = rd32(hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK;
179 /* Is set and has one of expected values */
180 if ((fwsts >= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK &&
181 fwsts <= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK) ||
182 fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK ||
183 fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK)
184 fw_mode = IXL_FW_MODE_RECOVERY;
186 if (fwsts > I40E_GL_FWSTS_FWS1B_EMPR_0 &&
187 fwsts <= I40E_GL_FWSTS_FWS1B_EMPR_10)
188 fw_mode = IXL_FW_MODE_UEMPR;
194 * ixl_pf_reset - Reset the PF
197 * Ensure that FW is in the right state and do the reset
200 * @returns zero on success, or an error code on failure.
203 ixl_pf_reset(struct ixl_pf *pf)
205 struct i40e_hw *hw = &pf->hw;
206 enum i40e_status_code status;
207 enum ixl_fw_mode fw_mode;
209 fw_mode = ixl_get_fw_mode(pf);
210 ixl_dbg_info(pf, "%s: before PF reset FW mode: 0x%08x\n", __func__, fw_mode);
211 if (fw_mode == IXL_FW_MODE_RECOVERY) {
212 atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE);
213 /* Don't try to reset device if it's in recovery mode */
217 status = i40e_pf_reset(hw);
218 if (status == I40E_SUCCESS)
221 /* Check FW mode again in case it has changed while
222 * waiting for reset to complete */
223 fw_mode = ixl_get_fw_mode(pf);
224 ixl_dbg_info(pf, "%s: after PF reset FW mode: 0x%08x\n", __func__, fw_mode);
225 if (fw_mode == IXL_FW_MODE_RECOVERY) {
226 atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE);
230 if (fw_mode == IXL_FW_MODE_UEMPR)
231 device_printf(pf->dev,
232 "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n");
234 device_printf(pf->dev, "PF reset failure %s\n",
235 i40e_stat_str(hw, status));
240 * ixl_setup_hmc - Setup LAN Host Memory Cache
243 * Init and configure LAN Host Memory Cache
245 * @returns 0 on success, EIO on error
248 ixl_setup_hmc(struct ixl_pf *pf)
250 struct i40e_hw *hw = &pf->hw;
251 enum i40e_status_code status;
253 status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
254 hw->func_caps.num_rx_qp, 0, 0);
256 device_printf(pf->dev, "init_lan_hmc failed: %s\n",
257 i40e_stat_str(hw, status));
261 status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
263 device_printf(pf->dev, "configure_lan_hmc failed: %s\n",
264 i40e_stat_str(hw, status));
272 * ixl_shutdown_hmc - Shutdown LAN Host Memory Cache
275 * Shutdown Host Memory Cache if configured.
279 ixl_shutdown_hmc(struct ixl_pf *pf)
281 struct i40e_hw *hw = &pf->hw;
282 enum i40e_status_code status;
284 /* HMC not configured, no need to shutdown */
285 if (hw->hmc.hmc_obj == NULL)
288 status = i40e_shutdown_lan_hmc(hw);
290 device_printf(pf->dev,
291 "Shutdown LAN HMC failed with code %s\n",
292 i40e_stat_str(hw, status));
295 * Write PF ITR values to queue ITR registers.
298 ixl_configure_itr(struct ixl_pf *pf)
300 ixl_configure_tx_itr(pf);
301 ixl_configure_rx_itr(pf);
304 /*********************************************************************
306 * Get the hardware capabilities
308 **********************************************************************/
311 ixl_get_hw_capabilities(struct ixl_pf *pf)
313 struct i40e_aqc_list_capabilities_element_resp *buf;
314 struct i40e_hw *hw = &pf->hw;
315 device_t dev = pf->dev;
316 enum i40e_status_code status;
317 int len, i2c_intfc_num;
321 if (IXL_PF_IN_RECOVERY_MODE(pf)) {
322 hw->func_caps.iwarp = 0;
326 len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
328 if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
329 malloc(len, M_IXL, M_NOWAIT | M_ZERO))) {
330 device_printf(dev, "Unable to allocate cap memory\n");
334 /* This populates the hw struct */
335 status = i40e_aq_discover_capabilities(hw, buf, len,
336 &needed, i40e_aqc_opc_list_func_capabilities, NULL);
338 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
340 /* retry once with a larger buffer */
344 } else if (status != I40E_SUCCESS) {
345 device_printf(dev, "capability discovery failed; status %s, error %s\n",
346 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
351 * Some devices have both MDIO and I2C; since this isn't reported
352 * by the FW, check registers to see if an I2C interface exists.
354 i2c_intfc_num = ixl_find_i2c_interface(pf);
355 if (i2c_intfc_num != -1)
358 /* Determine functions to use for driver I2C accesses */
359 switch (pf->i2c_access_method) {
360 case IXL_I2C_ACCESS_METHOD_BEST_AVAILABLE: {
361 if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
362 pf->read_i2c_byte = ixl_read_i2c_byte_aq;
363 pf->write_i2c_byte = ixl_write_i2c_byte_aq;
365 pf->read_i2c_byte = ixl_read_i2c_byte_reg;
366 pf->write_i2c_byte = ixl_write_i2c_byte_reg;
370 case IXL_I2C_ACCESS_METHOD_AQ:
371 pf->read_i2c_byte = ixl_read_i2c_byte_aq;
372 pf->write_i2c_byte = ixl_write_i2c_byte_aq;
374 case IXL_I2C_ACCESS_METHOD_REGISTER_I2CCMD:
375 pf->read_i2c_byte = ixl_read_i2c_byte_reg;
376 pf->write_i2c_byte = ixl_write_i2c_byte_reg;
378 case IXL_I2C_ACCESS_METHOD_BIT_BANG_I2CPARAMS:
379 pf->read_i2c_byte = ixl_read_i2c_byte_bb;
380 pf->write_i2c_byte = ixl_write_i2c_byte_bb;
383 /* Should not happen */
384 device_printf(dev, "Error setting I2C access functions\n");
388 /* Print a subset of the capability information. */
390 "PF-ID[%d]: VFs %d, MSI-X %d, VF MSI-X %d, QPs %d, %s\n",
391 hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors,
392 hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp,
393 (hw->func_caps.mdio_port_mode == 2) ? "I2C" :
394 (hw->func_caps.mdio_port_mode == 1 && pf->has_i2c) ? "MDIO & I2C" :
395 (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
401 /* For the set_advertise sysctl */
403 ixl_set_initial_advertised_speeds(struct ixl_pf *pf)
405 device_t dev = pf->dev;
408 /* Make sure to initialize the device to the complete list of
409 * supported speeds on driver load, to ensure unloading and
410 * reloading the driver will restore this value.
412 err = ixl_set_advertised_speeds(pf, pf->supported_speeds, true);
414 /* Non-fatal error */
415 device_printf(dev, "%s: ixl_set_advertised_speeds() error %d\n",
420 pf->advertised_speed =
421 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
425 ixl_teardown_hw_structs(struct ixl_pf *pf)
427 enum i40e_status_code status = 0;
428 struct i40e_hw *hw = &pf->hw;
429 device_t dev = pf->dev;
431 /* Shutdown LAN HMC */
432 if (hw->hmc.hmc_obj) {
433 status = i40e_shutdown_lan_hmc(hw);
436 "init: LAN HMC shutdown failure; status %s\n",
437 i40e_stat_str(hw, status));
442 /* Shutdown admin queue */
443 ixl_disable_intr0(hw);
444 status = i40e_shutdown_adminq(hw);
447 "init: Admin Queue shutdown failure; status %s\n",
448 i40e_stat_str(hw, status));
450 ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
456 ** Creates new filter with given MAC address and VLAN ID
458 static struct ixl_mac_filter *
459 ixl_new_filter(struct ixl_ftl_head *headp, const u8 *macaddr, s16 vlan)
461 struct ixl_mac_filter *f;
463 /* create a new empty filter */
464 f = malloc(sizeof(struct ixl_mac_filter),
465 M_IXL, M_NOWAIT | M_ZERO);
467 LIST_INSERT_HEAD(headp, f, ftle);
468 bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
476 * ixl_free_filters - Free all filters in given list
477 * headp - pointer to list head
479 * Frees memory used by each entry in the list.
480 * Does not remove filters from HW.
483 ixl_free_filters(struct ixl_ftl_head *headp)
485 struct ixl_mac_filter *f, *nf;
487 f = LIST_FIRST(headp);
489 nf = LIST_NEXT(f, ftle);
498 ixl_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
500 struct ixl_add_maddr_arg *ama = arg;
501 struct ixl_vsi *vsi = ama->vsi;
502 const u8 *macaddr = (u8*)LLADDR(sdl);
503 struct ixl_mac_filter *f;
505 /* Does one already exist */
506 f = ixl_find_filter(&vsi->ftl, macaddr, IXL_VLAN_ANY);
510 f = ixl_new_filter(&ama->to_add, macaddr, IXL_VLAN_ANY);
512 device_printf(vsi->dev, "WARNING: no filter available!!\n");
515 f->flags |= IXL_FILTER_MC;
520 /*********************************************************************
523 * Routines for multicast and vlan filter management.
525 *********************************************************************/
527 ixl_add_multi(struct ixl_vsi *vsi)
529 struct ifnet *ifp = vsi->ifp;
530 struct i40e_hw *hw = vsi->hw;
532 struct ixl_add_maddr_arg cb_arg;
534 IOCTL_DEBUGOUT("ixl_add_multi: begin");
536 mcnt = if_llmaddr_count(ifp);
537 if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
538 i40e_aq_set_vsi_multicast_promiscuous(hw,
539 vsi->seid, TRUE, NULL);
540 /* delete all existing MC filters */
541 ixl_del_multi(vsi, true);
546 LIST_INIT(&cb_arg.to_add);
548 mcnt = if_foreach_llmaddr(ifp, ixl_add_maddr, &cb_arg);
550 ixl_add_hw_filters(vsi, &cb_arg.to_add, mcnt);
552 IOCTL_DEBUGOUT("ixl_add_multi: end");
556 ixl_match_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
558 struct ixl_mac_filter *f = arg;
560 if (ixl_ether_is_equal(f->macaddr, (u8 *)LLADDR(sdl)))
567 ixl_del_multi(struct ixl_vsi *vsi, bool all)
569 struct ixl_ftl_head to_del;
570 struct ifnet *ifp = vsi->ifp;
571 struct ixl_mac_filter *f, *fn;
574 IOCTL_DEBUGOUT("ixl_del_multi: begin");
577 /* Search for removed multicast addresses */
578 LIST_FOREACH_SAFE(f, &vsi->ftl, ftle, fn) {
579 if ((f->flags & IXL_FILTER_MC) == 0 ||
580 (!all && (if_foreach_llmaddr(ifp, ixl_match_maddr, f) == 0)))
583 LIST_REMOVE(f, ftle);
584 LIST_INSERT_HEAD(&to_del, f, ftle);
589 ixl_del_hw_filters(vsi, &to_del, mcnt);
593 ixl_link_up_msg(struct ixl_pf *pf)
595 struct i40e_hw *hw = &pf->hw;
596 struct ifnet *ifp = pf->vsi.ifp;
597 char *req_fec_string, *neg_fec_string;
600 fec_abilities = hw->phy.link_info.req_fec_info;
601 /* If both RS and KR are requested, only show RS */
602 if (fec_abilities & I40E_AQ_REQUEST_FEC_RS)
603 req_fec_string = ixl_fec_string[0];
604 else if (fec_abilities & I40E_AQ_REQUEST_FEC_KR)
605 req_fec_string = ixl_fec_string[1];
607 req_fec_string = ixl_fec_string[2];
609 if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA)
610 neg_fec_string = ixl_fec_string[0];
611 else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA)
612 neg_fec_string = ixl_fec_string[1];
614 neg_fec_string = ixl_fec_string[2];
616 log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
618 ixl_link_speed_string(hw->phy.link_info.link_speed),
619 req_fec_string, neg_fec_string,
620 (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False",
621 (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX &&
622 hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
623 ixl_fc_string[3] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ?
624 ixl_fc_string[2] : (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ?
625 ixl_fc_string[1] : ixl_fc_string[0]);
629 * Configure admin queue/misc interrupt cause registers in hardware.
632 ixl_configure_intr0_msix(struct ixl_pf *pf)
634 struct i40e_hw *hw = &pf->hw;
637 /* First set up the adminq - vector 0 */
638 wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
639 rd32(hw, I40E_PFINT_ICR0); /* read to clear */
641 reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
642 I40E_PFINT_ICR0_ENA_GRST_MASK |
643 I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
644 I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
645 I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
646 I40E_PFINT_ICR0_ENA_VFLR_MASK |
647 I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK |
648 I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
649 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
652 * 0x7FF is the end of the queue list.
653 * This means we won't use MSI-X vector 0 for a queue interrupt
656 wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
657 /* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
658 wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
660 wr32(hw, I40E_PFINT_DYN_CTL0,
661 I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
662 I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
664 wr32(hw, I40E_PFINT_STAT_CTL0, 0);
668 ixl_add_ifmedia(struct ifmedia *media, u64 phy_types)
670 /* Display supported media types */
671 if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX))
672 ifmedia_add(media, IFM_ETHER | IFM_100_TX, 0, NULL);
674 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T))
675 ifmedia_add(media, IFM_ETHER | IFM_1000_T, 0, NULL);
676 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX))
677 ifmedia_add(media, IFM_ETHER | IFM_1000_SX, 0, NULL);
678 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX))
679 ifmedia_add(media, IFM_ETHER | IFM_1000_LX, 0, NULL);
681 if (phy_types & (I40E_CAP_PHY_TYPE_2_5GBASE_T))
682 ifmedia_add(media, IFM_ETHER | IFM_2500_T, 0, NULL);
684 if (phy_types & (I40E_CAP_PHY_TYPE_5GBASE_T))
685 ifmedia_add(media, IFM_ETHER | IFM_5000_T, 0, NULL);
687 if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) ||
688 phy_types & (I40E_CAP_PHY_TYPE_XFI) ||
689 phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU))
690 ifmedia_add(media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
692 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR))
693 ifmedia_add(media, IFM_ETHER | IFM_10G_SR, 0, NULL);
694 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR))
695 ifmedia_add(media, IFM_ETHER | IFM_10G_LR, 0, NULL);
696 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T))
697 ifmedia_add(media, IFM_ETHER | IFM_10G_T, 0, NULL);
699 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) ||
700 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) ||
701 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) ||
702 phy_types & (I40E_CAP_PHY_TYPE_XLAUI) ||
703 phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
704 ifmedia_add(media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
705 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4))
706 ifmedia_add(media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
707 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4))
708 ifmedia_add(media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
710 if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX))
711 ifmedia_add(media, IFM_ETHER | IFM_1000_KX, 0, NULL);
713 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU)
714 || phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1))
715 ifmedia_add(media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
716 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC))
717 ifmedia_add(media, IFM_ETHER | IFM_10G_AOC, 0, NULL);
718 if (phy_types & (I40E_CAP_PHY_TYPE_SFI))
719 ifmedia_add(media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
720 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4))
721 ifmedia_add(media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
722 if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR))
723 ifmedia_add(media, IFM_ETHER | IFM_10G_KR, 0, NULL);
725 if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2))
726 ifmedia_add(media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
728 if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
729 ifmedia_add(media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
730 if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI))
731 ifmedia_add(media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
733 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR))
734 ifmedia_add(media, IFM_ETHER | IFM_25G_KR, 0, NULL);
735 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR))
736 ifmedia_add(media, IFM_ETHER | IFM_25G_CR, 0, NULL);
737 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR))
738 ifmedia_add(media, IFM_ETHER | IFM_25G_SR, 0, NULL);
739 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR))
740 ifmedia_add(media, IFM_ETHER | IFM_25G_LR, 0, NULL);
741 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC))
742 ifmedia_add(media, IFM_ETHER | IFM_25G_AOC, 0, NULL);
743 if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC))
744 ifmedia_add(media, IFM_ETHER | IFM_25G_ACC, 0, NULL);
747 /*********************************************************************
749 * Get Firmware Switch configuration
750 * - this will need to be more robust when more complex
751 * switch configurations are enabled.
753 **********************************************************************/
755 ixl_switch_config(struct ixl_pf *pf)
757 struct i40e_hw *hw = &pf->hw;
758 struct ixl_vsi *vsi = &pf->vsi;
759 device_t dev = iflib_get_dev(vsi->ctx);
760 struct i40e_aqc_get_switch_config_resp *sw_config;
761 u8 aq_buf[I40E_AQ_LARGE_BUF];
765 memset(&aq_buf, 0, sizeof(aq_buf));
766 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
767 ret = i40e_aq_get_switch_config(hw, sw_config,
768 sizeof(aq_buf), &next, NULL);
770 device_printf(dev, "aq_get_switch_config() failed, error %d,"
771 " aq_error %d\n", ret, pf->hw.aq.asq_last_status);
774 if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) {
776 "Switch config: header reported: %d in structure, %d total\n",
777 LE16_TO_CPU(sw_config->header.num_reported),
778 LE16_TO_CPU(sw_config->header.num_total));
780 i < LE16_TO_CPU(sw_config->header.num_reported); i++) {
782 "-> %d: type=%d seid=%d uplink=%d downlink=%d\n", i,
783 sw_config->element[i].element_type,
784 LE16_TO_CPU(sw_config->element[i].seid),
785 LE16_TO_CPU(sw_config->element[i].uplink_seid),
786 LE16_TO_CPU(sw_config->element[i].downlink_seid));
789 /* Simplified due to a single VSI */
790 vsi->uplink_seid = LE16_TO_CPU(sw_config->element[0].uplink_seid);
791 vsi->downlink_seid = LE16_TO_CPU(sw_config->element[0].downlink_seid);
792 vsi->seid = LE16_TO_CPU(sw_config->element[0].seid);
797 ixl_vsi_add_sysctls(struct ixl_vsi * vsi, const char * sysctl_name, bool queues_sysctls)
799 struct sysctl_oid *tree;
800 struct sysctl_oid_list *child;
801 struct sysctl_oid_list *vsi_list;
803 tree = device_get_sysctl_tree(vsi->dev);
804 child = SYSCTL_CHILDREN(tree);
805 vsi->vsi_node = SYSCTL_ADD_NODE(&vsi->sysctl_ctx, child, OID_AUTO, sysctl_name,
806 CTLFLAG_RD, NULL, "VSI Number");
808 vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
809 ixl_add_sysctls_eth_stats(&vsi->sysctl_ctx, vsi_list, &vsi->eth_stats);
812 ixl_vsi_add_queues_stats(vsi, &vsi->sysctl_ctx);
816 * Used to set the Tx ITR value for all of the PF LAN VSI's queues.
817 * Writes to the ITR registers immediately.
820 ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS)
822 struct ixl_pf *pf = (struct ixl_pf *)arg1;
823 device_t dev = pf->dev;
825 int requested_tx_itr;
827 requested_tx_itr = pf->tx_itr;
828 error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
829 if ((error) || (req->newptr == NULL))
831 if (pf->dynamic_tx_itr) {
833 "Cannot set TX itr value while dynamic TX itr is enabled\n");
836 if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
838 "Invalid TX itr value; value must be between 0 and %d\n",
843 pf->tx_itr = requested_tx_itr;
844 ixl_configure_tx_itr(pf);
850 * Used to set the Rx ITR value for all of the PF LAN VSI's queues.
851 * Writes to the ITR registers immediately.
854 ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)
856 struct ixl_pf *pf = (struct ixl_pf *)arg1;
857 device_t dev = pf->dev;
859 int requested_rx_itr;
861 requested_rx_itr = pf->rx_itr;
862 error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
863 if ((error) || (req->newptr == NULL))
865 if (pf->dynamic_rx_itr) {
867 "Cannot set RX itr value while dynamic RX itr is enabled\n");
870 if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
872 "Invalid RX itr value; value must be between 0 and %d\n",
877 pf->rx_itr = requested_rx_itr;
878 ixl_configure_rx_itr(pf);
884 ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
885 struct sysctl_oid_list *child,
886 struct i40e_hw_port_stats *stats)
888 struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO,
889 "mac", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Mac Statistics");
890 struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
892 struct i40e_eth_stats *eth_stats = &stats->eth;
893 ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
895 struct ixl_sysctl_info ctls[] =
897 {&stats->crc_errors, "crc_errors", "CRC Errors"},
898 {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
899 {&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
900 {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
901 {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
902 /* Packet Reception Stats */
903 {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
904 {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
905 {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
906 {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
907 {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
908 {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
909 {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
910 {&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
911 {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
912 {&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
913 {&stats->rx_jabber, "rx_jabber", "Received Jabber"},
914 {&stats->checksum_error, "checksum_errors", "Checksum Errors"},
915 /* Packet Transmission Stats */
916 {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
917 {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
918 {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
919 {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
920 {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
921 {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
922 {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
924 {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
925 {&stats->link_xon_rx, "xon_recvd", "Link XON received"},
926 {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
927 {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
932 struct ixl_sysctl_info *entry = ctls;
933 while (entry->stat != 0)
935 SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
936 CTLFLAG_RD, entry->stat,
943 ixl_set_rss_key(struct ixl_pf *pf)
945 struct i40e_hw *hw = &pf->hw;
946 struct ixl_vsi *vsi = &pf->vsi;
947 device_t dev = pf->dev;
948 u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
949 enum i40e_status_code status;
952 /* Fetch the configured RSS key */
953 rss_getkey((uint8_t *) &rss_seed);
955 ixl_get_default_rss_key(rss_seed);
957 /* Fill out hash function seed */
958 if (hw->mac.type == I40E_MAC_X722) {
959 struct i40e_aqc_get_set_rss_key_data key_data;
960 bcopy(rss_seed, &key_data, 52);
961 status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data);
964 "i40e_aq_set_rss_key status %s, error %s\n",
965 i40e_stat_str(hw, status),
966 i40e_aq_str(hw, hw->aq.asq_last_status));
968 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
969 i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
974 * Configure enabled PCTYPES for RSS.
977 ixl_set_rss_pctypes(struct ixl_pf *pf)
979 struct i40e_hw *hw = &pf->hw;
980 u64 set_hena = 0, hena;
985 rss_hash_config = rss_gethashconfig();
986 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
987 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
988 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
989 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
990 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
991 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
992 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
993 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
994 if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
995 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
996 if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
997 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
998 if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
999 set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
1001 if (hw->mac.type == I40E_MAC_X722)
1002 set_hena = IXL_DEFAULT_RSS_HENA_X722;
1004 set_hena = IXL_DEFAULT_RSS_HENA_XL710;
1006 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
1007 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
1009 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
1010 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
1015 ** Setup the PF's RSS parameters.
1018 ixl_config_rss(struct ixl_pf *pf)
1020 ixl_set_rss_key(pf);
1021 ixl_set_rss_pctypes(pf);
1022 ixl_set_rss_hlut(pf);
1026 * In some firmware versions there is default MAC/VLAN filter
1027 * configured which interferes with filters managed by driver.
1028 * Make sure it's removed.
1031 ixl_del_default_hw_filters(struct ixl_vsi *vsi)
1033 struct i40e_aqc_remove_macvlan_element_data e;
1035 bzero(&e, sizeof(e));
1036 bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1038 e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1039 i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1041 bzero(&e, sizeof(e));
1042 bcopy(vsi->hw->mac.perm_addr, e.mac_addr, ETHER_ADDR_LEN);
1044 e.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1045 I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1046 i40e_aq_remove_macvlan(vsi->hw, vsi->seid, &e, 1, NULL);
1050 ** Initialize filter list and add filters that the hardware
1051 ** needs to know about.
1053 ** Requires VSI's seid to be set before calling.
1056 ixl_init_filters(struct ixl_vsi *vsi)
1058 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
1060 ixl_dbg_filter(pf, "%s: start\n", __func__);
1062 /* Initialize mac filter list for VSI */
1063 LIST_INIT(&vsi->ftl);
1064 vsi->num_hw_filters = 0;
1066 /* Receive broadcast Ethernet frames */
1067 i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL);
1069 if (IXL_VSI_IS_VF(vsi))
1072 ixl_del_default_hw_filters(vsi);
1074 ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY);
1077 * Prevent Tx flow control frames from being sent out by
1078 * non-firmware transmitters.
1079 * This affects every VSI in the PF.
1081 #ifndef IXL_DEBUG_FC
1082 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
1084 if (pf->enable_tx_fc_filter)
1085 i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
1090 ixl_reconfigure_filters(struct ixl_vsi *vsi)
1092 struct i40e_hw *hw = vsi->hw;
1093 struct ixl_ftl_head tmp;
1097 * The ixl_add_hw_filters function adds filters configured
1098 * in HW to a list in VSI. Move all filters to a temporary
1099 * list to avoid corrupting it by concatenating to itself.
1102 LIST_CONCAT(&tmp, &vsi->ftl, ixl_mac_filter, ftle);
1103 cnt = vsi->num_hw_filters;
1104 vsi->num_hw_filters = 0;
1106 ixl_add_hw_filters(vsi, &tmp, cnt);
1108 /* Filter could be removed if MAC address was changed */
1109 ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
1111 if ((if_getcapenable(vsi->ifp) & IFCAP_VLAN_HWFILTER) == 0)
1114 * VLAN HW filtering is enabled, make sure that filters
1115 * for all registered VLAN tags are configured
1117 ixl_add_vlan_filters(vsi, hw->mac.addr);
1121 * This routine adds a MAC/VLAN filter to the software filter
1122 * list, then adds that new filter to the HW if it doesn't already
1123 * exist in the SW filter list.
1126 ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1128 struct ixl_mac_filter *f, *tmp;
1131 struct ixl_ftl_head to_add;
1138 ixl_dbg_filter(pf, "ixl_add_filter: " MAC_FORMAT ", vlan %4d\n",
1139 MAC_FORMAT_ARGS(macaddr), vlan);
1141 /* Does one already exist */
1142 f = ixl_find_filter(&vsi->ftl, macaddr, vlan);
1147 f = ixl_new_filter(&to_add, macaddr, vlan);
1149 device_printf(dev, "WARNING: no filter available!!\n");
1152 if (f->vlan != IXL_VLAN_ANY)
1153 f->flags |= IXL_FILTER_VLAN;
1158 ** Is this the first vlan being registered, if so we
1159 ** need to remove the ANY filter that indicates we are
1160 ** not in a vlan, and replace that with a 0 filter.
1162 if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
1163 tmp = ixl_find_filter(&vsi->ftl, macaddr, IXL_VLAN_ANY);
1165 struct ixl_ftl_head to_del;
1167 /* Prepare new filter first to avoid removing
1168 * VLAN_ANY filter if allocation fails */
1169 f = ixl_new_filter(&to_add, macaddr, 0);
1171 device_printf(dev, "WARNING: no filter available!!\n");
1172 free(LIST_FIRST(&to_add), M_IXL);
1177 LIST_REMOVE(tmp, ftle);
1179 LIST_INSERT_HEAD(&to_del, tmp, ftle);
1180 ixl_del_hw_filters(vsi, &to_del, 1);
1184 ixl_add_hw_filters(vsi, &to_add, to_add_cnt);
1188 * ixl_add_vlan_filters - Add MAC/VLAN filters for all registered VLANs
1189 * @vsi: pointer to VSI
1190 * @macaddr: MAC address
1192 * Adds MAC/VLAN filter for each VLAN configured on the interface
1193 * if there is enough HW filters. Otherwise adds a single filter
1194 * for all tagged and untagged frames to allow all configured VLANs
1195 * to recieve traffic.
1198 ixl_add_vlan_filters(struct ixl_vsi *vsi, const u8 *macaddr)
1200 struct ixl_ftl_head to_add;
1201 struct ixl_mac_filter *f;
1205 if (vsi->num_vlans == 0 || vsi->num_vlans > IXL_MAX_VLAN_FILTERS) {
1206 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
1211 /* Add filter for untagged frames if it does not exist yet */
1212 f = ixl_find_filter(&vsi->ftl, macaddr, 0);
1214 f = ixl_new_filter(&to_add, macaddr, 0);
1216 device_printf(vsi->dev, "WARNING: no filter available!!\n");
1222 for (i = 1; i < EVL_VLID_MASK; i = vlan + 1) {
1223 bit_ffs_at(vsi->vlans_map, i, IXL_VLANS_MAP_LEN, &vlan);
1227 /* Does one already exist */
1228 f = ixl_find_filter(&vsi->ftl, macaddr, vlan);
1232 f = ixl_new_filter(&to_add, macaddr, vlan);
1234 device_printf(vsi->dev, "WARNING: no filter available!!\n");
1235 ixl_free_filters(&to_add);
1241 ixl_add_hw_filters(vsi, &to_add, to_add_cnt);
1245 ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
1247 struct ixl_mac_filter *f, *tmp;
1248 struct ixl_ftl_head ftl_head;
1251 ixl_dbg_filter((struct ixl_pf *)vsi->back,
1252 "ixl_del_filter: " MAC_FORMAT ", vlan %4d\n",
1253 MAC_FORMAT_ARGS(macaddr), vlan);
1255 f = ixl_find_filter(&vsi->ftl, macaddr, vlan);
1259 LIST_REMOVE(f, ftle);
1260 LIST_INIT(&ftl_head);
1261 LIST_INSERT_HEAD(&ftl_head, f, ftle);
1262 if (f->vlan == IXL_VLAN_ANY && (f->flags & IXL_FILTER_VLAN) != 0)
1265 /* If this is not the last vlan just remove the filter */
1266 if (vlan == IXL_VLAN_ANY || vsi->num_vlans > 0) {
1267 ixl_del_hw_filters(vsi, &ftl_head, to_del_cnt);
1271 /* It's the last vlan, we need to switch back to a non-vlan filter */
1272 tmp = ixl_find_filter(&vsi->ftl, macaddr, 0);
1274 LIST_REMOVE(tmp, ftle);
1275 LIST_INSERT_AFTER(f, tmp, ftle);
1278 ixl_del_hw_filters(vsi, &ftl_head, to_del_cnt);
1280 ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
1284 * ixl_del_all_vlan_filters - Delete all VLAN filters with given MAC
1285 * @vsi: VSI which filters need to be removed
1286 * @macaddr: MAC address
1288 * Remove all MAC/VLAN filters with a given MAC address. For multicast
1289 * addresses there is always single filter for all VLANs used (IXL_VLAN_ANY)
1290 * so skip them to speed up processing. Those filters should be removed
1291 * using ixl_del_filter function.
1294 ixl_del_all_vlan_filters(struct ixl_vsi *vsi, const u8 *macaddr)
1296 struct ixl_mac_filter *f, *tmp;
1297 struct ixl_ftl_head to_del;
1302 LIST_FOREACH_SAFE(f, &vsi->ftl, ftle, tmp) {
1303 if ((f->flags & IXL_FILTER_MC) != 0 ||
1304 !ixl_ether_is_equal(f->macaddr, macaddr))
1307 LIST_REMOVE(f, ftle);
1308 LIST_INSERT_HEAD(&to_del, f, ftle);
1312 ixl_dbg_filter((struct ixl_pf *)vsi->back,
1313 "%s: " MAC_FORMAT ", to_del_cnt: %d\n",
1314 __func__, MAC_FORMAT_ARGS(macaddr), to_del_cnt);
1316 ixl_del_hw_filters(vsi, &to_del, to_del_cnt);
1320 ** Find the filter with both matching mac addr and vlan id
1322 struct ixl_mac_filter *
1323 ixl_find_filter(struct ixl_ftl_head *headp, const u8 *macaddr, s16 vlan)
1325 struct ixl_mac_filter *f;
1327 LIST_FOREACH(f, headp, ftle) {
1328 if (ixl_ether_is_equal(f->macaddr, macaddr) &&
1329 (f->vlan == vlan)) {
1338 ** This routine takes additions to the vsi filter
1339 ** table and creates an Admin Queue call to create
1340 ** the filters in the hardware.
1343 ixl_add_hw_filters(struct ixl_vsi *vsi, struct ixl_ftl_head *to_add, int cnt)
1345 struct i40e_aqc_add_macvlan_element_data *a, *b;
1346 struct ixl_mac_filter *f, *fn;
1350 enum i40e_status_code status;
1357 ixl_dbg_filter(pf, "ixl_add_hw_filters: cnt: %d\n", cnt);
1360 ixl_dbg_info(pf, "ixl_add_hw_filters: cnt == 0\n");
1364 a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
1365 M_IXL, M_NOWAIT | M_ZERO);
1367 device_printf(dev, "add_hw_filters failed to get memory\n");
1371 LIST_FOREACH(f, to_add, ftle) {
1372 b = &a[j]; // a pox on fvl long names :)
1373 bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
1374 if (f->vlan == IXL_VLAN_ANY) {
1376 b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
1378 b->vlan_tag = f->vlan;
1381 b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
1382 ixl_dbg_filter(pf, "ADD: " MAC_FORMAT "\n",
1383 MAC_FORMAT_ARGS(f->macaddr));
1389 /* Something went wrong */
1391 "%s ERROR: list of filters to short expected: %d, found: %d\n",
1393 ixl_free_filters(to_add);
1397 status = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
1398 if (status == I40E_SUCCESS) {
1399 LIST_CONCAT(&vsi->ftl, to_add, ixl_mac_filter, ftle);
1400 vsi->num_hw_filters += j;
1405 "i40e_aq_add_macvlan status %s, error %s\n",
1406 i40e_stat_str(hw, status),
1407 i40e_aq_str(hw, hw->aq.asq_last_status));
1410 /* Verify which filters were actually configured in HW
1411 * and add them to the list */
1412 LIST_FOREACH_SAFE(f, to_add, ftle, fn) {
1413 LIST_REMOVE(f, ftle);
1414 if (a[j].match_method == I40E_AQC_MM_ERR_NO_RES) {
1416 "%s filter " MAC_FORMAT " VTAG: %d not added\n",
1418 MAC_FORMAT_ARGS(f->macaddr),
1422 LIST_INSERT_HEAD(&vsi->ftl, f, ftle);
1423 vsi->num_hw_filters++;
1433 ** This routine takes removals in the vsi filter
1434 ** table and creates an Admin Queue call to delete
1435 ** the filters in the hardware.
1438 ixl_del_hw_filters(struct ixl_vsi *vsi, struct ixl_ftl_head *to_del, int cnt)
1440 struct i40e_aqc_remove_macvlan_element_data *d, *e;
1444 struct ixl_mac_filter *f, *f_temp;
1445 enum i40e_status_code status;
1452 ixl_dbg_filter(pf, "%s: start, cnt: %d\n", __func__, cnt);
1454 d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
1455 M_IXL, M_NOWAIT | M_ZERO);
1457 device_printf(dev, "%s: failed to get memory\n", __func__);
1461 LIST_FOREACH_SAFE(f, to_del, ftle, f_temp) {
1462 e = &d[j]; // a pox on fvl long names :)
1463 bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
1464 e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1465 if (f->vlan == IXL_VLAN_ANY) {
1467 e->flags |= I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1469 e->vlan_tag = f->vlan;
1472 ixl_dbg_filter(pf, "DEL: " MAC_FORMAT "\n",
1473 MAC_FORMAT_ARGS(f->macaddr));
1475 /* delete entry from the list */
1476 LIST_REMOVE(f, ftle);
1481 if (j != cnt || !LIST_EMPTY(to_del)) {
1482 /* Something went wrong */
1484 "%s ERROR: wrong size of list of filters, expected: %d, found: %d\n",
1486 ixl_free_filters(to_del);
1489 status = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
1492 "%s: i40e_aq_remove_macvlan status %s, error %s\n",
1493 __func__, i40e_stat_str(hw, status),
1494 i40e_aq_str(hw, hw->aq.asq_last_status));
1495 for (int i = 0; i < j; i++) {
1496 if (d[i].error_code == 0)
1499 "%s Filter does not exist " MAC_FORMAT " VTAG: %d\n",
1500 __func__, MAC_FORMAT_ARGS(d[i].mac_addr),
1505 vsi->num_hw_filters -= j;
1510 ixl_dbg_filter(pf, "%s: end\n", __func__);
1514 ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1516 struct i40e_hw *hw = &pf->hw;
1521 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1523 ixl_dbg(pf, IXL_DBG_EN_DIS,
1524 "Enabling PF TX ring %4d / VSI TX ring %4d...\n",
1527 i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE);
1529 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1530 reg |= I40E_QTX_ENA_QENA_REQ_MASK |
1531 I40E_QTX_ENA_QENA_STAT_MASK;
1532 wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
1533 /* Verify the enable took */
1534 for (int j = 0; j < 10; j++) {
1535 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1536 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
1538 i40e_usec_delay(10);
1540 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
1541 device_printf(pf->dev, "TX queue %d still disabled!\n",
1550 ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1552 struct i40e_hw *hw = &pf->hw;
1557 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1559 ixl_dbg(pf, IXL_DBG_EN_DIS,
1560 "Enabling PF RX ring %4d / VSI RX ring %4d...\n",
1563 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1564 reg |= I40E_QRX_ENA_QENA_REQ_MASK |
1565 I40E_QRX_ENA_QENA_STAT_MASK;
1566 wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
1567 /* Verify the enable took */
1568 for (int j = 0; j < 10; j++) {
1569 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1570 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
1572 i40e_usec_delay(10);
1574 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
1575 device_printf(pf->dev, "RX queue %d still disabled!\n",
1584 ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1588 error = ixl_enable_tx_ring(pf, qtag, vsi_qidx);
1589 /* Called function already prints error message */
1592 error = ixl_enable_rx_ring(pf, qtag, vsi_qidx);
1597 * Returns error on first ring that is detected hung.
1600 ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1602 struct i40e_hw *hw = &pf->hw;
1607 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1609 ixl_dbg(pf, IXL_DBG_EN_DIS,
1610 "Disabling PF TX ring %4d / VSI TX ring %4d...\n",
1613 i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE);
1614 i40e_usec_delay(500);
1616 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1617 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
1618 wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
1619 /* Verify the disable took */
1620 for (int j = 0; j < 10; j++) {
1621 reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
1622 if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
1624 i40e_msec_delay(10);
1626 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
1627 device_printf(pf->dev, "TX queue %d still enabled!\n",
1636 * Returns error on first ring that is detected hung.
1639 ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1641 struct i40e_hw *hw = &pf->hw;
1646 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
1648 ixl_dbg(pf, IXL_DBG_EN_DIS,
1649 "Disabling PF RX ring %4d / VSI RX ring %4d...\n",
1652 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1653 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
1654 wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
1655 /* Verify the disable took */
1656 for (int j = 0; j < 10; j++) {
1657 reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
1658 if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
1660 i40e_msec_delay(10);
1662 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
1663 device_printf(pf->dev, "RX queue %d still enabled!\n",
1672 ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
1676 error = ixl_disable_tx_ring(pf, qtag, vsi_qidx);
1677 /* Called function already prints error message */
1680 error = ixl_disable_rx_ring(pf, qtag, vsi_qidx);
1685 ixl_handle_tx_mdd_event(struct ixl_pf *pf)
1687 struct i40e_hw *hw = &pf->hw;
1688 device_t dev = pf->dev;
1690 bool mdd_detected = false;
1691 bool pf_mdd_detected = false;
1692 bool vf_mdd_detected = false;
1695 u8 pf_mdet_num, vp_mdet_num;
1698 /* find what triggered the MDD event */
1699 reg = rd32(hw, I40E_GL_MDET_TX);
1700 if (reg & I40E_GL_MDET_TX_VALID_MASK) {
1701 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
1702 I40E_GL_MDET_TX_PF_NUM_SHIFT;
1703 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
1704 I40E_GL_MDET_TX_VF_NUM_SHIFT;
1705 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
1706 I40E_GL_MDET_TX_EVENT_SHIFT;
1707 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
1708 I40E_GL_MDET_TX_QUEUE_SHIFT;
1709 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
1710 mdd_detected = true;
1716 reg = rd32(hw, I40E_PF_MDET_TX);
1717 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
1718 wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
1719 pf_mdet_num = hw->pf_id;
1720 pf_mdd_detected = true;
1723 /* Check if MDD was caused by a VF */
1724 for (int i = 0; i < pf->num_vfs; i++) {
1726 reg = rd32(hw, I40E_VP_MDET_TX(i));
1727 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
1728 wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
1730 vf->num_mdd_events++;
1731 vf_mdd_detected = true;
1735 /* Print out an error message */
1736 if (vf_mdd_detected && pf_mdd_detected)
1738 "Malicious Driver Detection event %d"
1739 " on TX queue %d, pf number %d (PF-%d), vf number %d (VF-%d)\n",
1740 event, queue, pf_num, pf_mdet_num, vf_num, vp_mdet_num);
1741 else if (vf_mdd_detected && !pf_mdd_detected)
1743 "Malicious Driver Detection event %d"
1744 " on TX queue %d, pf number %d, vf number %d (VF-%d)\n",
1745 event, queue, pf_num, vf_num, vp_mdet_num);
1746 else if (!vf_mdd_detected && pf_mdd_detected)
1748 "Malicious Driver Detection event %d"
1749 " on TX queue %d, pf number %d (PF-%d)\n",
1750 event, queue, pf_num, pf_mdet_num);
1751 /* Theoretically shouldn't happen */
1754 "TX Malicious Driver Detection event (unknown)\n");
1758 ixl_handle_rx_mdd_event(struct ixl_pf *pf)
1760 struct i40e_hw *hw = &pf->hw;
1761 device_t dev = pf->dev;
1763 bool mdd_detected = false;
1764 bool pf_mdd_detected = false;
1765 bool vf_mdd_detected = false;
1768 u8 pf_mdet_num, vp_mdet_num;
1772 * GL_MDET_RX doesn't contain VF number information, unlike
1775 reg = rd32(hw, I40E_GL_MDET_RX);
1776 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
1777 pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
1778 I40E_GL_MDET_RX_FUNCTION_SHIFT;
1779 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
1780 I40E_GL_MDET_RX_EVENT_SHIFT;
1781 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
1782 I40E_GL_MDET_RX_QUEUE_SHIFT;
1783 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
1784 mdd_detected = true;
1790 reg = rd32(hw, I40E_PF_MDET_RX);
1791 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
1792 wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
1793 pf_mdet_num = hw->pf_id;
1794 pf_mdd_detected = true;
1797 /* Check if MDD was caused by a VF */
1798 for (int i = 0; i < pf->num_vfs; i++) {
1800 reg = rd32(hw, I40E_VP_MDET_RX(i));
1801 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
1802 wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
1804 vf->num_mdd_events++;
1805 vf_mdd_detected = true;
1809 /* Print out an error message */
1810 if (vf_mdd_detected && pf_mdd_detected)
1812 "Malicious Driver Detection event %d"
1813 " on RX queue %d, pf number %d (PF-%d), (VF-%d)\n",
1814 event, queue, pf_num, pf_mdet_num, vp_mdet_num);
1815 else if (vf_mdd_detected && !pf_mdd_detected)
1817 "Malicious Driver Detection event %d"
1818 " on RX queue %d, pf number %d, (VF-%d)\n",
1819 event, queue, pf_num, vp_mdet_num);
1820 else if (!vf_mdd_detected && pf_mdd_detected)
1822 "Malicious Driver Detection event %d"
1823 " on RX queue %d, pf number %d (PF-%d)\n",
1824 event, queue, pf_num, pf_mdet_num);
1825 /* Theoretically shouldn't happen */
1828 "RX Malicious Driver Detection event (unknown)\n");
1832 * ixl_handle_mdd_event
1834 * Called from interrupt handler to identify possibly malicious vfs
1835 * (But also detects events from the PF, as well)
1838 ixl_handle_mdd_event(struct ixl_pf *pf)
1840 struct i40e_hw *hw = &pf->hw;
1844 * Handle both TX/RX because it's possible they could
1845 * both trigger in the same interrupt.
1847 ixl_handle_tx_mdd_event(pf);
1848 ixl_handle_rx_mdd_event(pf);
1850 atomic_clear_32(&pf->state, IXL_PF_STATE_MDD_PENDING);
1852 /* re-enable mdd interrupt cause */
1853 reg = rd32(hw, I40E_PFINT_ICR0_ENA);
1854 reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
1855 wr32(hw, I40E_PFINT_ICR0_ENA, reg);
1860 ixl_enable_intr0(struct i40e_hw *hw)
1864 /* Use IXL_ITR_NONE so ITR isn't updated here */
1865 reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
1866 I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
1867 (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
1868 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1872 ixl_disable_intr0(struct i40e_hw *hw)
1876 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
1877 wr32(hw, I40E_PFINT_DYN_CTL0, reg);
1882 ixl_enable_queue(struct i40e_hw *hw, int id)
1886 reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
1887 I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
1888 (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
1889 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
1893 ixl_disable_queue(struct i40e_hw *hw, int id)
1897 reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
1898 wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
1902 ixl_handle_empr_reset(struct ixl_pf *pf)
1904 struct ixl_vsi *vsi = &pf->vsi;
1905 bool is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
1907 ixl_prepare_for_reset(pf, is_up);
1909 * i40e_pf_reset checks the type of reset and acts
1910 * accordingly. If EMP or Core reset was performed
1911 * doing PF reset is not necessary and it sometimes
1916 if (!IXL_PF_IN_RECOVERY_MODE(pf) &&
1917 ixl_get_fw_mode(pf) == IXL_FW_MODE_RECOVERY) {
1918 atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE);
1919 device_printf(pf->dev,
1920 "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
1921 pf->link_up = FALSE;
1922 ixl_update_link_status(pf);
1925 ixl_rebuild_hw_structs_after_reset(pf, is_up);
1927 atomic_clear_32(&pf->state, IXL_PF_STATE_RESETTING);
1931 ixl_update_stats_counters(struct ixl_pf *pf)
1933 struct i40e_hw *hw = &pf->hw;
1934 struct ixl_vsi *vsi = &pf->vsi;
1936 u64 prev_link_xoff_rx = pf->stats.link_xoff_rx;
1938 struct i40e_hw_port_stats *nsd = &pf->stats;
1939 struct i40e_hw_port_stats *osd = &pf->stats_offsets;
1941 /* Update hw stats */
1942 ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
1943 pf->stat_offsets_loaded,
1944 &osd->crc_errors, &nsd->crc_errors);
1945 ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
1946 pf->stat_offsets_loaded,
1947 &osd->illegal_bytes, &nsd->illegal_bytes);
1948 ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
1949 I40E_GLPRT_GORCL(hw->port),
1950 pf->stat_offsets_loaded,
1951 &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
1952 ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
1953 I40E_GLPRT_GOTCL(hw->port),
1954 pf->stat_offsets_loaded,
1955 &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
1956 ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
1957 pf->stat_offsets_loaded,
1958 &osd->eth.rx_discards,
1959 &nsd->eth.rx_discards);
1960 ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
1961 I40E_GLPRT_UPRCL(hw->port),
1962 pf->stat_offsets_loaded,
1963 &osd->eth.rx_unicast,
1964 &nsd->eth.rx_unicast);
1965 ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
1966 I40E_GLPRT_UPTCL(hw->port),
1967 pf->stat_offsets_loaded,
1968 &osd->eth.tx_unicast,
1969 &nsd->eth.tx_unicast);
1970 ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
1971 I40E_GLPRT_MPRCL(hw->port),
1972 pf->stat_offsets_loaded,
1973 &osd->eth.rx_multicast,
1974 &nsd->eth.rx_multicast);
1975 ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
1976 I40E_GLPRT_MPTCL(hw->port),
1977 pf->stat_offsets_loaded,
1978 &osd->eth.tx_multicast,
1979 &nsd->eth.tx_multicast);
1980 ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
1981 I40E_GLPRT_BPRCL(hw->port),
1982 pf->stat_offsets_loaded,
1983 &osd->eth.rx_broadcast,
1984 &nsd->eth.rx_broadcast);
1985 ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
1986 I40E_GLPRT_BPTCL(hw->port),
1987 pf->stat_offsets_loaded,
1988 &osd->eth.tx_broadcast,
1989 &nsd->eth.tx_broadcast);
1991 ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
1992 pf->stat_offsets_loaded,
1993 &osd->tx_dropped_link_down,
1994 &nsd->tx_dropped_link_down);
1995 ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
1996 pf->stat_offsets_loaded,
1997 &osd->mac_local_faults,
1998 &nsd->mac_local_faults);
1999 ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
2000 pf->stat_offsets_loaded,
2001 &osd->mac_remote_faults,
2002 &nsd->mac_remote_faults);
2003 ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
2004 pf->stat_offsets_loaded,
2005 &osd->rx_length_errors,
2006 &nsd->rx_length_errors);
2008 /* Flow control (LFC) stats */
2009 ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
2010 pf->stat_offsets_loaded,
2011 &osd->link_xon_rx, &nsd->link_xon_rx);
2012 ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
2013 pf->stat_offsets_loaded,
2014 &osd->link_xon_tx, &nsd->link_xon_tx);
2015 ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
2016 pf->stat_offsets_loaded,
2017 &osd->link_xoff_rx, &nsd->link_xoff_rx);
2018 ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
2019 pf->stat_offsets_loaded,
2020 &osd->link_xoff_tx, &nsd->link_xoff_tx);
2023 * For watchdog management we need to know if we have been paused
2024 * during the last interval, so capture that here.
2026 if (pf->stats.link_xoff_rx != prev_link_xoff_rx)
2027 vsi->shared->isc_pause_frames = 1;
2029 /* Packet size stats rx */
2030 ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
2031 I40E_GLPRT_PRC64L(hw->port),
2032 pf->stat_offsets_loaded,
2033 &osd->rx_size_64, &nsd->rx_size_64);
2034 ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
2035 I40E_GLPRT_PRC127L(hw->port),
2036 pf->stat_offsets_loaded,
2037 &osd->rx_size_127, &nsd->rx_size_127);
2038 ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
2039 I40E_GLPRT_PRC255L(hw->port),
2040 pf->stat_offsets_loaded,
2041 &osd->rx_size_255, &nsd->rx_size_255);
2042 ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
2043 I40E_GLPRT_PRC511L(hw->port),
2044 pf->stat_offsets_loaded,
2045 &osd->rx_size_511, &nsd->rx_size_511);
2046 ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
2047 I40E_GLPRT_PRC1023L(hw->port),
2048 pf->stat_offsets_loaded,
2049 &osd->rx_size_1023, &nsd->rx_size_1023);
2050 ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
2051 I40E_GLPRT_PRC1522L(hw->port),
2052 pf->stat_offsets_loaded,
2053 &osd->rx_size_1522, &nsd->rx_size_1522);
2054 ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
2055 I40E_GLPRT_PRC9522L(hw->port),
2056 pf->stat_offsets_loaded,
2057 &osd->rx_size_big, &nsd->rx_size_big);
2059 /* Packet size stats tx */
2060 ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
2061 I40E_GLPRT_PTC64L(hw->port),
2062 pf->stat_offsets_loaded,
2063 &osd->tx_size_64, &nsd->tx_size_64);
2064 ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
2065 I40E_GLPRT_PTC127L(hw->port),
2066 pf->stat_offsets_loaded,
2067 &osd->tx_size_127, &nsd->tx_size_127);
2068 ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
2069 I40E_GLPRT_PTC255L(hw->port),
2070 pf->stat_offsets_loaded,
2071 &osd->tx_size_255, &nsd->tx_size_255);
2072 ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
2073 I40E_GLPRT_PTC511L(hw->port),
2074 pf->stat_offsets_loaded,
2075 &osd->tx_size_511, &nsd->tx_size_511);
2076 ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
2077 I40E_GLPRT_PTC1023L(hw->port),
2078 pf->stat_offsets_loaded,
2079 &osd->tx_size_1023, &nsd->tx_size_1023);
2080 ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
2081 I40E_GLPRT_PTC1522L(hw->port),
2082 pf->stat_offsets_loaded,
2083 &osd->tx_size_1522, &nsd->tx_size_1522);
2084 ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
2085 I40E_GLPRT_PTC9522L(hw->port),
2086 pf->stat_offsets_loaded,
2087 &osd->tx_size_big, &nsd->tx_size_big);
2089 ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
2090 pf->stat_offsets_loaded,
2091 &osd->rx_undersize, &nsd->rx_undersize);
2092 ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
2093 pf->stat_offsets_loaded,
2094 &osd->rx_fragments, &nsd->rx_fragments);
2095 ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
2096 pf->stat_offsets_loaded,
2097 &osd->rx_oversize, &nsd->rx_oversize);
2098 ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
2099 pf->stat_offsets_loaded,
2100 &osd->rx_jabber, &nsd->rx_jabber);
2102 i40e_get_phy_lpi_status(hw, nsd);
2104 i40e_lpi_stat_update(hw, pf->stat_offsets_loaded,
2105 &osd->tx_lpi_count, &nsd->tx_lpi_count,
2106 &osd->rx_lpi_count, &nsd->rx_lpi_count);
2108 pf->stat_offsets_loaded = true;
2111 /* Update vsi stats */
2112 ixl_update_vsi_stats(vsi);
2114 for (int i = 0; i < pf->num_vfs; i++) {
2116 if (vf->vf_flags & VF_FLAG_ENABLED)
2117 ixl_update_eth_stats(&pf->vfs[i].vsi);
2122 * Update VSI-specific ethernet statistics counters.
2125 ixl_update_eth_stats(struct ixl_vsi *vsi)
2127 struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
2128 struct i40e_hw *hw = &pf->hw;
2129 struct i40e_eth_stats *es;
2130 struct i40e_eth_stats *oes;
2131 u16 stat_idx = vsi->info.stat_counter_idx;
2133 es = &vsi->eth_stats;
2134 oes = &vsi->eth_stats_offsets;
2136 /* Gather up the stats that the hw collects */
2137 ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
2138 vsi->stat_offsets_loaded,
2139 &oes->tx_errors, &es->tx_errors);
2140 ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
2141 vsi->stat_offsets_loaded,
2142 &oes->rx_discards, &es->rx_discards);
2144 ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
2145 I40E_GLV_GORCL(stat_idx),
2146 vsi->stat_offsets_loaded,
2147 &oes->rx_bytes, &es->rx_bytes);
2148 ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
2149 I40E_GLV_UPRCL(stat_idx),
2150 vsi->stat_offsets_loaded,
2151 &oes->rx_unicast, &es->rx_unicast);
2152 ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
2153 I40E_GLV_MPRCL(stat_idx),
2154 vsi->stat_offsets_loaded,
2155 &oes->rx_multicast, &es->rx_multicast);
2156 ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
2157 I40E_GLV_BPRCL(stat_idx),
2158 vsi->stat_offsets_loaded,
2159 &oes->rx_broadcast, &es->rx_broadcast);
2161 ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
2162 I40E_GLV_GOTCL(stat_idx),
2163 vsi->stat_offsets_loaded,
2164 &oes->tx_bytes, &es->tx_bytes);
2165 ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
2166 I40E_GLV_UPTCL(stat_idx),
2167 vsi->stat_offsets_loaded,
2168 &oes->tx_unicast, &es->tx_unicast);
2169 ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
2170 I40E_GLV_MPTCL(stat_idx),
2171 vsi->stat_offsets_loaded,
2172 &oes->tx_multicast, &es->tx_multicast);
2173 ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
2174 I40E_GLV_BPTCL(stat_idx),
2175 vsi->stat_offsets_loaded,
2176 &oes->tx_broadcast, &es->tx_broadcast);
2177 vsi->stat_offsets_loaded = true;
2181 ixl_update_vsi_stats(struct ixl_vsi *vsi)
2185 struct i40e_eth_stats *es;
2188 struct i40e_hw_port_stats *nsd;
2192 es = &vsi->eth_stats;
2195 ixl_update_eth_stats(vsi);
2197 tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
2199 /* Update ifnet stats */
2200 IXL_SET_IPACKETS(vsi, es->rx_unicast +
2203 IXL_SET_OPACKETS(vsi, es->tx_unicast +
2206 IXL_SET_IBYTES(vsi, es->rx_bytes);
2207 IXL_SET_OBYTES(vsi, es->tx_bytes);
2208 IXL_SET_IMCASTS(vsi, es->rx_multicast);
2209 IXL_SET_OMCASTS(vsi, es->tx_multicast);
2211 IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
2212 nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
2214 IXL_SET_OERRORS(vsi, es->tx_errors);
2215 IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
2216 IXL_SET_OQDROPS(vsi, tx_discards);
2217 IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
2218 IXL_SET_COLLISIONS(vsi, 0);
2222 * Reset all of the stats for the given pf
2225 ixl_pf_reset_stats(struct ixl_pf *pf)
2227 bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
2228 bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
2229 pf->stat_offsets_loaded = false;
2233 * Resets all stats of the given vsi
2236 ixl_vsi_reset_stats(struct ixl_vsi *vsi)
2238 bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
2239 bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
2240 vsi->stat_offsets_loaded = false;
2244 * Read and update a 48 bit stat from the hw
2246 * Since the device stats are not reset at PFReset, they likely will not
2247 * be zeroed when the driver starts. We'll save the first values read
2248 * and use them as offsets to be subtracted from the raw values in order
2249 * to report stats that count from zero.
2252 ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
2253 bool offset_loaded, u64 *offset, u64 *stat)
2257 #if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
2258 new_data = rd64(hw, loreg);
2261 * Use two rd32's instead of one rd64; FreeBSD versions before
2262 * 10 don't support 64-bit bus reads/writes.
2264 new_data = rd32(hw, loreg);
2265 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
2270 if (new_data >= *offset)
2271 *stat = new_data - *offset;
2273 *stat = (new_data + ((u64)1 << 48)) - *offset;
2274 *stat &= 0xFFFFFFFFFFFFULL;
2278 * Read and update a 32 bit stat from the hw
2281 ixl_stat_update32(struct i40e_hw *hw, u32 reg,
2282 bool offset_loaded, u64 *offset, u64 *stat)
2286 new_data = rd32(hw, reg);
2289 if (new_data >= *offset)
2290 *stat = (u32)(new_data - *offset);
2292 *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
2296 * Add subset of device sysctls safe to use in recovery mode
2299 ixl_add_sysctls_recovery_mode(struct ixl_pf *pf)
2301 device_t dev = pf->dev;
2303 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2304 struct sysctl_oid_list *ctx_list =
2305 SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2307 struct sysctl_oid *debug_node;
2308 struct sysctl_oid_list *debug_list;
2310 SYSCTL_ADD_PROC(ctx, ctx_list,
2311 OID_AUTO, "fw_version",
2312 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2313 ixl_sysctl_show_fw, "A", "Firmware version");
2315 /* Add sysctls meant to print debug information, but don't list them
2316 * in "sysctl -a" output. */
2317 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2318 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL,
2320 debug_list = SYSCTL_CHILDREN(debug_node);
2322 SYSCTL_ADD_UINT(ctx, debug_list,
2323 OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
2324 &pf->hw.debug_mask, 0, "Shared code debug message level");
2326 SYSCTL_ADD_UINT(ctx, debug_list,
2327 OID_AUTO, "core_debug_mask", CTLFLAG_RW,
2328 &pf->dbg_mask, 0, "Non-shared code debug message level");
2330 SYSCTL_ADD_PROC(ctx, debug_list,
2331 OID_AUTO, "dump_debug_data",
2332 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2333 pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
2335 SYSCTL_ADD_PROC(ctx, debug_list,
2336 OID_AUTO, "do_pf_reset",
2337 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2338 pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
2340 SYSCTL_ADD_PROC(ctx, debug_list,
2341 OID_AUTO, "do_core_reset",
2342 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2343 pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
2345 SYSCTL_ADD_PROC(ctx, debug_list,
2346 OID_AUTO, "do_global_reset",
2347 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2348 pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
2350 SYSCTL_ADD_PROC(ctx, debug_list,
2351 OID_AUTO, "queue_interrupt_table",
2352 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2353 pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
2357 ixl_add_device_sysctls(struct ixl_pf *pf)
2359 device_t dev = pf->dev;
2360 struct i40e_hw *hw = &pf->hw;
2362 struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
2363 struct sysctl_oid_list *ctx_list =
2364 SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
2366 struct sysctl_oid *debug_node;
2367 struct sysctl_oid_list *debug_list;
2369 struct sysctl_oid *fec_node;
2370 struct sysctl_oid_list *fec_list;
2371 struct sysctl_oid *eee_node;
2372 struct sysctl_oid_list *eee_list;
2374 /* Set up sysctls */
2375 SYSCTL_ADD_PROC(ctx, ctx_list,
2376 OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2377 pf, 0, ixl_sysctl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
2379 SYSCTL_ADD_PROC(ctx, ctx_list,
2380 OID_AUTO, "advertise_speed",
2381 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2382 ixl_sysctl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
2384 SYSCTL_ADD_PROC(ctx, ctx_list,
2385 OID_AUTO, "supported_speeds",
2386 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2387 ixl_sysctl_supported_speeds, "I", IXL_SYSCTL_HELP_SUPPORTED_SPEED);
2389 SYSCTL_ADD_PROC(ctx, ctx_list,
2390 OID_AUTO, "current_speed",
2391 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2392 ixl_sysctl_current_speed, "A", "Current Port Speed");
2394 SYSCTL_ADD_PROC(ctx, ctx_list,
2395 OID_AUTO, "fw_version",
2396 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2397 ixl_sysctl_show_fw, "A", "Firmware version");
2399 SYSCTL_ADD_PROC(ctx, ctx_list,
2400 OID_AUTO, "unallocated_queues",
2401 CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
2402 ixl_sysctl_unallocated_queues, "I",
2403 "Queues not allocated to a PF or VF");
2405 SYSCTL_ADD_PROC(ctx, ctx_list,
2407 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2408 ixl_sysctl_pf_tx_itr, "I",
2409 "Immediately set TX ITR value for all queues");
2411 SYSCTL_ADD_PROC(ctx, ctx_list,
2413 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2414 ixl_sysctl_pf_rx_itr, "I",
2415 "Immediately set RX ITR value for all queues");
2417 SYSCTL_ADD_INT(ctx, ctx_list,
2418 OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
2419 &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR");
2421 SYSCTL_ADD_INT(ctx, ctx_list,
2422 OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
2423 &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
2425 /* Add FEC sysctls for 25G adapters */
2426 if (i40e_is_25G_device(hw->device_id)) {
2427 fec_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2428 OID_AUTO, "fec", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
2430 fec_list = SYSCTL_CHILDREN(fec_node);
2432 SYSCTL_ADD_PROC(ctx, fec_list,
2433 OID_AUTO, "fc_ability",
2434 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2435 ixl_sysctl_fec_fc_ability, "I", "FC FEC ability enabled");
2437 SYSCTL_ADD_PROC(ctx, fec_list,
2438 OID_AUTO, "rs_ability",
2439 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2440 ixl_sysctl_fec_rs_ability, "I", "RS FEC ability enabled");
2442 SYSCTL_ADD_PROC(ctx, fec_list,
2443 OID_AUTO, "fc_requested",
2444 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2445 ixl_sysctl_fec_fc_request, "I",
2446 "FC FEC mode requested on link");
2448 SYSCTL_ADD_PROC(ctx, fec_list,
2449 OID_AUTO, "rs_requested",
2450 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2451 ixl_sysctl_fec_rs_request, "I",
2452 "RS FEC mode requested on link");
2454 SYSCTL_ADD_PROC(ctx, fec_list,
2455 OID_AUTO, "auto_fec_enabled",
2456 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, pf, 0,
2457 ixl_sysctl_fec_auto_enable, "I",
2458 "Let FW decide FEC ability/request modes");
2461 SYSCTL_ADD_PROC(ctx, ctx_list,
2462 OID_AUTO, "fw_lldp", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2463 pf, 0, ixl_sysctl_fw_lldp, "I", IXL_SYSCTL_HELP_FW_LLDP);
2465 eee_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2466 OID_AUTO, "eee", CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
2467 "Energy Efficient Ethernet (EEE) Sysctls");
2468 eee_list = SYSCTL_CHILDREN(eee_node);
2470 SYSCTL_ADD_PROC(ctx, eee_list,
2471 OID_AUTO, "enable", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
2472 pf, 0, ixl_sysctl_eee_enable, "I",
2473 "Enable Energy Efficient Ethernet (EEE)");
2475 SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "tx_lpi_status",
2476 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_status, 0,
2479 SYSCTL_ADD_UINT(ctx, eee_list, OID_AUTO, "rx_lpi_status",
2480 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_status, 0,
2483 SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "tx_lpi_count",
2484 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.tx_lpi_count,
2487 SYSCTL_ADD_UQUAD(ctx, eee_list, OID_AUTO, "rx_lpi_count",
2488 CTLFLAG_RD | CTLFLAG_MPSAFE, &pf->stats.rx_lpi_count,
2491 /* Add sysctls meant to print debug information, but don't list them
2492 * in "sysctl -a" output. */
2493 debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
2494 OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL,
2496 debug_list = SYSCTL_CHILDREN(debug_node);
2498 SYSCTL_ADD_UINT(ctx, debug_list,
2499 OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
2500 &pf->hw.debug_mask, 0, "Shared code debug message level");
2502 SYSCTL_ADD_UINT(ctx, debug_list,
2503 OID_AUTO, "core_debug_mask", CTLFLAG_RW,
2504 &pf->dbg_mask, 0, "Non-shared code debug message level");
2506 SYSCTL_ADD_PROC(ctx, debug_list,
2507 OID_AUTO, "link_status",
2508 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2509 pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
2511 SYSCTL_ADD_PROC(ctx, debug_list,
2512 OID_AUTO, "phy_abilities",
2513 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2514 pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
2516 SYSCTL_ADD_PROC(ctx, debug_list,
2517 OID_AUTO, "filter_list",
2518 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2519 pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
2521 SYSCTL_ADD_PROC(ctx, debug_list,
2522 OID_AUTO, "hw_res_alloc",
2523 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2524 pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
2526 SYSCTL_ADD_PROC(ctx, debug_list,
2527 OID_AUTO, "switch_config",
2528 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2529 pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
2531 SYSCTL_ADD_PROC(ctx, debug_list,
2532 OID_AUTO, "switch_vlans",
2533 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2534 pf, 0, ixl_sysctl_switch_vlans, "I", "HW Switch VLAN Configuration");
2536 SYSCTL_ADD_PROC(ctx, debug_list,
2537 OID_AUTO, "rss_key",
2538 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2539 pf, 0, ixl_sysctl_hkey, "A", "View RSS key");
2541 SYSCTL_ADD_PROC(ctx, debug_list,
2542 OID_AUTO, "rss_lut",
2543 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2544 pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table");
2546 SYSCTL_ADD_PROC(ctx, debug_list,
2547 OID_AUTO, "rss_hena",
2548 CTLTYPE_ULONG | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2549 pf, 0, ixl_sysctl_hena, "LU", "View enabled packet types for RSS");
2551 SYSCTL_ADD_PROC(ctx, debug_list,
2552 OID_AUTO, "disable_fw_link_management",
2553 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2554 pf, 0, ixl_sysctl_fw_link_management, "I", "Disable FW Link Management");
2556 SYSCTL_ADD_PROC(ctx, debug_list,
2557 OID_AUTO, "dump_debug_data",
2558 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2559 pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
2561 SYSCTL_ADD_PROC(ctx, debug_list,
2562 OID_AUTO, "do_pf_reset",
2563 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2564 pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
2566 SYSCTL_ADD_PROC(ctx, debug_list,
2567 OID_AUTO, "do_core_reset",
2568 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2569 pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
2571 SYSCTL_ADD_PROC(ctx, debug_list,
2572 OID_AUTO, "do_global_reset",
2573 CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
2574 pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
2576 SYSCTL_ADD_PROC(ctx, debug_list,
2577 OID_AUTO, "queue_interrupt_table",
2578 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2579 pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
2582 SYSCTL_ADD_PROC(ctx, debug_list,
2583 OID_AUTO, "read_i2c_byte",
2584 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2585 pf, 0, ixl_sysctl_read_i2c_byte, "I", IXL_SYSCTL_HELP_READ_I2C);
2587 SYSCTL_ADD_PROC(ctx, debug_list,
2588 OID_AUTO, "write_i2c_byte",
2589 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT,
2590 pf, 0, ixl_sysctl_write_i2c_byte, "I", IXL_SYSCTL_HELP_WRITE_I2C);
2592 SYSCTL_ADD_PROC(ctx, debug_list,
2593 OID_AUTO, "read_i2c_diag_data",
2594 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
2595 pf, 0, ixl_sysctl_read_i2c_diag_data, "A", "Dump selected diagnostic data from FW");
2600 * Primarily for finding out how many queues can be assigned to VFs,
2604 ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
2606 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2609 queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr);
2611 return sysctl_handle_int(oidp, NULL, queues, req);
2615 ixl_link_speed_string(enum i40e_aq_link_speed link_speed)
2617 const char * link_speed_str[] = {
2630 switch (link_speed) {
2631 case I40E_LINK_SPEED_100MB:
2634 case I40E_LINK_SPEED_1GB:
2637 case I40E_LINK_SPEED_10GB:
2640 case I40E_LINK_SPEED_40GB:
2643 case I40E_LINK_SPEED_20GB:
2646 case I40E_LINK_SPEED_25GB:
2649 case I40E_LINK_SPEED_2_5GB:
2652 case I40E_LINK_SPEED_5GB:
2655 case I40E_LINK_SPEED_UNKNOWN:
2661 return (link_speed_str[index]);
2665 ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
2667 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2668 struct i40e_hw *hw = &pf->hw;
2671 ixl_update_link_status(pf);
2673 error = sysctl_handle_string(oidp,
2675 ixl_link_speed_string(hw->phy.link_info.link_speed)),
2682 * Converts 8-bit speeds value to and from sysctl flags and
2683 * Admin Queue flags.
2686 ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
2688 #define SPEED_MAP_SIZE 8
2689 static u16 speedmap[SPEED_MAP_SIZE] = {
2690 (I40E_LINK_SPEED_100MB | (0x1 << 8)),
2691 (I40E_LINK_SPEED_1GB | (0x2 << 8)),
2692 (I40E_LINK_SPEED_10GB | (0x4 << 8)),
2693 (I40E_LINK_SPEED_20GB | (0x8 << 8)),
2694 (I40E_LINK_SPEED_25GB | (0x10 << 8)),
2695 (I40E_LINK_SPEED_40GB | (0x20 << 8)),
2696 (I40E_LINK_SPEED_2_5GB | (0x40 << 8)),
2697 (I40E_LINK_SPEED_5GB | (0x80 << 8)),
2701 for (int i = 0; i < SPEED_MAP_SIZE; i++) {
2703 retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0;
2705 retval |= (speeds & speedmap[i]) ? (speedmap[i] >> 8) : 0;
2712 ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq)
2714 struct i40e_hw *hw = &pf->hw;
2715 device_t dev = pf->dev;
2716 struct i40e_aq_get_phy_abilities_resp abilities;
2717 struct i40e_aq_set_phy_config config;
2718 enum i40e_status_code aq_error = 0;
2720 /* Get current capability information */
2721 aq_error = i40e_aq_get_phy_capabilities(hw,
2722 FALSE, FALSE, &abilities, NULL);
2725 "%s: Error getting phy capabilities %d,"
2726 " aq error: %d\n", __func__, aq_error,
2727 hw->aq.asq_last_status);
2731 /* Prepare new config */
2732 bzero(&config, sizeof(config));
2734 config.link_speed = speeds;
2736 config.link_speed = ixl_convert_sysctl_aq_link_speed(speeds, true);
2737 config.phy_type = abilities.phy_type;
2738 config.phy_type_ext = abilities.phy_type_ext;
2739 config.abilities = abilities.abilities
2740 | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
2741 config.eee_capability = abilities.eee_capability;
2742 config.eeer = abilities.eeer_val;
2743 config.low_power_ctrl = abilities.d3_lpan;
2744 config.fec_config = abilities.fec_cfg_curr_mod_ext_info
2745 & I40E_AQ_PHY_FEC_CONFIG_MASK;
2747 /* Do aq command & restart link */
2748 aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
2751 "%s: Error setting new phy config %d,"
2752 " aq error: %d\n", __func__, aq_error,
2753 hw->aq.asq_last_status);
2761 ** Supported link speeds
2773 ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS)
2775 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2776 int supported = ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false);
2778 return sysctl_handle_int(oidp, NULL, supported, req);
2782 ** Control link advertise speed:
2784 ** 0x1 - advertise 100 Mb
2785 ** 0x2 - advertise 1G
2786 ** 0x4 - advertise 10G
2787 ** 0x8 - advertise 20G
2788 ** 0x10 - advertise 25G
2789 ** 0x20 - advertise 40G
2790 ** 0x40 - advertise 2.5G
2791 ** 0x80 - advertise 5G
2793 ** Set to 0 to disable link
2796 ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS)
2798 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2799 device_t dev = pf->dev;
2800 u8 converted_speeds;
2801 int requested_ls = 0;
2804 /* Read in new mode */
2805 requested_ls = pf->advertised_speed;
2806 error = sysctl_handle_int(oidp, &requested_ls, 0, req);
2807 if ((error) || (req->newptr == NULL))
2809 if (IXL_PF_IN_RECOVERY_MODE(pf)) {
2810 device_printf(dev, "Interface is currently in FW recovery mode. "
2811 "Setting advertise speed not supported\n");
2815 /* Error out if bits outside of possible flag range are set */
2816 if ((requested_ls & ~((u8)0xFF)) != 0) {
2817 device_printf(dev, "Input advertised speed out of range; "
2818 "valid flags are: 0x%02x\n",
2819 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
2823 /* Check if adapter supports input value */
2824 converted_speeds = ixl_convert_sysctl_aq_link_speed((u8)requested_ls, true);
2825 if ((converted_speeds | pf->supported_speeds) != pf->supported_speeds) {
2826 device_printf(dev, "Invalid advertised speed; "
2827 "valid flags are: 0x%02x\n",
2828 ixl_convert_sysctl_aq_link_speed(pf->supported_speeds, false));
2832 error = ixl_set_advertised_speeds(pf, requested_ls, false);
2836 pf->advertised_speed = requested_ls;
2837 ixl_update_link_status(pf);
2842 * Input: bitmap of enum i40e_aq_link_speed
2845 ixl_max_aq_speed_to_value(u8 link_speeds)
2847 if (link_speeds & I40E_LINK_SPEED_40GB)
2849 if (link_speeds & I40E_LINK_SPEED_25GB)
2851 if (link_speeds & I40E_LINK_SPEED_20GB)
2853 if (link_speeds & I40E_LINK_SPEED_10GB)
2855 if (link_speeds & I40E_LINK_SPEED_5GB)
2857 if (link_speeds & I40E_LINK_SPEED_2_5GB)
2858 return IF_Mbps(2500);
2859 if (link_speeds & I40E_LINK_SPEED_1GB)
2861 if (link_speeds & I40E_LINK_SPEED_100MB)
2862 return IF_Mbps(100);
2864 /* Minimum supported link speed */
2865 return IF_Mbps(100);
2869 ** Get the width and transaction speed of
2870 ** the bus this adapter is plugged into.
2873 ixl_get_bus_info(struct ixl_pf *pf)
2875 struct i40e_hw *hw = &pf->hw;
2876 device_t dev = pf->dev;
2878 u32 offset, num_ports;
2881 /* Some devices don't use PCIE */
2882 if (hw->mac.type == I40E_MAC_X722)
2885 /* Read PCI Express Capabilities Link Status Register */
2886 pci_find_cap(dev, PCIY_EXPRESS, &offset);
2887 link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
2889 /* Fill out hw struct with PCIE info */
2890 i40e_set_pci_config_data(hw, link);
2892 /* Use info to print out bandwidth messages */
2893 device_printf(dev,"PCI Express Bus: Speed %s %s\n",
2894 ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
2895 (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
2896 (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
2897 (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
2898 (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
2899 (hw->bus.width == i40e_bus_width_pcie_x2) ? "Width x2" :
2900 (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
2904 * If adapter is in slot with maximum supported speed,
2905 * no warning message needs to be printed out.
2907 if (hw->bus.speed >= i40e_bus_speed_8000
2908 && hw->bus.width >= i40e_bus_width_pcie_x8)
2911 num_ports = bitcount32(hw->func_caps.valid_functions);
2912 max_speed = ixl_max_aq_speed_to_value(pf->supported_speeds) / 1000000;
2914 if ((num_ports * max_speed) > hw->bus.speed * hw->bus.width) {
2915 device_printf(dev, "PCI-Express bandwidth available"
2916 " for this device may be insufficient for"
2917 " optimal performance.\n");
2918 device_printf(dev, "Please move the device to a different"
2919 " PCI-e link with more lanes and/or higher"
2920 " transfer rate.\n");
2925 ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
2927 struct ixl_pf *pf = (struct ixl_pf *)arg1;
2928 struct i40e_hw *hw = &pf->hw;
2931 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
2932 ixl_nvm_version_str(hw, sbuf);
2940 ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
2942 u8 nvma_ptr = nvma->config & 0xFF;
2943 u8 nvma_flags = (nvma->config & 0xF00) >> 8;
2944 const char * cmd_str;
2946 switch (nvma->command) {
2948 if (nvma_ptr == 0xF && nvma_flags == 0xF &&
2949 nvma->offset == 0 && nvma->data_size == 1) {
2950 device_printf(dev, "NVMUPD: Get Driver Status Command\n");
2955 case I40E_NVM_WRITE:
2959 device_printf(dev, "NVMUPD: unknown command: 0x%08x\n", nvma->command);
2963 "NVMUPD: cmd: %s ptr: 0x%02x flags: 0x%01x offset: 0x%08x data_s: 0x%08x\n",
2964 cmd_str, nvma_ptr, nvma_flags, nvma->offset, nvma->data_size);
2968 ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
2970 struct i40e_hw *hw = &pf->hw;
2971 struct i40e_nvm_access *nvma;
2972 device_t dev = pf->dev;
2973 enum i40e_status_code status = 0;
2974 size_t nvma_size, ifd_len, exp_len;
2977 DEBUGFUNC("ixl_handle_nvmupd_cmd");
2980 nvma_size = sizeof(struct i40e_nvm_access);
2981 ifd_len = ifd->ifd_len;
2983 if (ifd_len < nvma_size ||
2984 ifd->ifd_data == NULL) {
2985 device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
2987 device_printf(dev, "%s: ifdrv length: %zu, sizeof(struct i40e_nvm_access): %zu\n",
2988 __func__, ifd_len, nvma_size);
2989 device_printf(dev, "%s: data pointer: %p\n", __func__,
2994 nvma = malloc(ifd_len, M_IXL, M_WAITOK);
2995 err = copyin(ifd->ifd_data, nvma, ifd_len);
2997 device_printf(dev, "%s: Cannot get request from user space\n",
3003 if (pf->dbg_mask & IXL_DBG_NVMUPD)
3004 ixl_print_nvm_cmd(dev, nvma);
3006 if (IXL_PF_IS_RESETTING(pf)) {
3008 while (count++ < 100) {
3009 i40e_msec_delay(100);
3010 if (!(IXL_PF_IS_RESETTING(pf)))
3015 if (IXL_PF_IS_RESETTING(pf)) {
3017 "%s: timeout waiting for EMP reset to finish\n",
3023 if (nvma->data_size < 1 || nvma->data_size > 4096) {
3025 "%s: invalid request, data size not in supported range\n",
3032 * Older versions of the NVM update tool don't set ifd_len to the size
3033 * of the entire buffer passed to the ioctl. Check the data_size field
3034 * in the contained i40e_nvm_access struct and ensure everything is
3035 * copied in from userspace.
3037 exp_len = nvma_size + nvma->data_size - 1; /* One byte is kept in struct */
3039 if (ifd_len < exp_len) {
3041 nvma = realloc(nvma, ifd_len, M_IXL, M_WAITOK);
3042 err = copyin(ifd->ifd_data, nvma, ifd_len);
3044 device_printf(dev, "%s: Cannot get request from user space\n",
3051 // TODO: Might need a different lock here
3053 status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
3054 // IXL_PF_UNLOCK(pf);
3056 err = copyout(nvma, ifd->ifd_data, ifd_len);
3059 device_printf(dev, "%s: Cannot return data to user space\n",
3064 /* Let the nvmupdate report errors, show them only when debug is enabled */
3065 if (status != 0 && (pf->dbg_mask & IXL_DBG_NVMUPD) != 0)
3066 device_printf(dev, "i40e_nvmupd_command status %s, perrno %d\n",
3067 i40e_stat_str(hw, status), perrno);
3070 * -EPERM is actually ERESTART, which the kernel interprets as it needing
3071 * to run this ioctl again. So use -EACCES for -EPERM instead.
3073 if (perrno == -EPERM)
3080 ixl_find_i2c_interface(struct ixl_pf *pf)
3082 struct i40e_hw *hw = &pf->hw;
3083 bool i2c_en, port_matched;
3086 for (int i = 0; i < 4; i++) {
3087 reg = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(i));
3088 i2c_en = (reg & I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK);
3089 port_matched = ((reg & I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK)
3090 >> I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
3092 if (i2c_en && port_matched)
3100 ixl_phy_type_string(u32 bit_pos, bool ext)
3102 static char * phy_types_str[32] = {
3132 "1000BASE-T Optical",
3136 static char * ext_phy_types_str[8] = {
3147 if (ext && bit_pos > 7) return "Invalid_Ext";
3148 if (bit_pos > 31) return "Invalid";
3150 return (ext) ? ext_phy_types_str[bit_pos] : phy_types_str[bit_pos];
3153 /* TODO: ERJ: I don't this is necessary anymore. */
3155 ixl_aq_get_link_status(struct ixl_pf *pf, struct i40e_aqc_get_link_status *link_status)
3157 device_t dev = pf->dev;
3158 struct i40e_hw *hw = &pf->hw;
3159 struct i40e_aq_desc desc;
3160 enum i40e_status_code status;
3162 struct i40e_aqc_get_link_status *aq_link_status =
3163 (struct i40e_aqc_get_link_status *)&desc.params.raw;
3165 i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
3166 link_status->command_flags = CPU_TO_LE16(I40E_AQ_LSE_ENABLE);
3167 status = i40e_asq_send_command(hw, &desc, NULL, 0, NULL);
3170 "%s: i40e_aqc_opc_get_link_status status %s, aq error %s\n",
3171 __func__, i40e_stat_str(hw, status),
3172 i40e_aq_str(hw, hw->aq.asq_last_status));
3176 bcopy(aq_link_status, link_status, sizeof(struct i40e_aqc_get_link_status));
3181 ixl_phy_type_string_ls(u8 val)
3184 return ixl_phy_type_string(val - 0x1F, true);
3186 return ixl_phy_type_string(val, false);
3190 ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
3192 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3193 device_t dev = pf->dev;
3197 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3199 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3203 struct i40e_aqc_get_link_status link_status;
3204 error = ixl_aq_get_link_status(pf, &link_status);
3210 sbuf_printf(buf, "\n"
3211 "PHY Type : 0x%02x<%s>\n"
3213 "Link info: 0x%02x\n"
3214 "AN info : 0x%02x\n"
3215 "Ext info : 0x%02x\n"
3216 "Loopback : 0x%02x\n"
3220 link_status.phy_type,
3221 ixl_phy_type_string_ls(link_status.phy_type),
3222 link_status.link_speed,
3223 link_status.link_info,
3224 link_status.an_info,
3225 link_status.ext_info,
3226 link_status.loopback,
3227 link_status.max_frame_size,
3229 link_status.power_desc);
3231 error = sbuf_finish(buf);
3233 device_printf(dev, "Error finishing sbuf: %d\n", error);
3240 ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
3242 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3243 struct i40e_hw *hw = &pf->hw;
3244 device_t dev = pf->dev;
3245 enum i40e_status_code status;
3246 struct i40e_aq_get_phy_abilities_resp abilities;
3250 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3252 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3256 status = i40e_aq_get_phy_capabilities(hw,
3257 FALSE, FALSE, &abilities, NULL);
3260 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
3261 __func__, i40e_stat_str(hw, status),
3262 i40e_aq_str(hw, hw->aq.asq_last_status));
3267 sbuf_printf(buf, "\n"
3269 abilities.phy_type);
3271 if (abilities.phy_type != 0) {
3272 sbuf_printf(buf, "<");
3273 for (int i = 0; i < 32; i++)
3274 if ((1 << i) & abilities.phy_type)
3275 sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false));
3276 sbuf_printf(buf, ">");
3279 sbuf_printf(buf, "\nPHY Ext : %02x",
3280 abilities.phy_type_ext);
3282 if (abilities.phy_type_ext != 0) {
3283 sbuf_printf(buf, "<");
3284 for (int i = 0; i < 4; i++)
3285 if ((1 << i) & abilities.phy_type_ext)
3286 sbuf_printf(buf, "%s,",
3287 ixl_phy_type_string(i, true));
3288 sbuf_printf(buf, ">");
3291 sbuf_printf(buf, "\nSpeed : %02x", abilities.link_speed);
3292 if (abilities.link_speed != 0) {
3294 sbuf_printf(buf, " <");
3295 for (int i = 0; i < 8; i++) {
3296 link_speed = (1 << i) & abilities.link_speed;
3298 sbuf_printf(buf, "%s, ",
3299 ixl_link_speed_string(link_speed));
3301 sbuf_printf(buf, ">");
3304 sbuf_printf(buf, "\n"
3309 "ID : %02x %02x %02x %02x\n"
3310 "ModType : %02x %02x %02x\n"
3314 abilities.abilities, abilities.eee_capability,
3315 abilities.eeer_val, abilities.d3_lpan,
3316 abilities.phy_id[0], abilities.phy_id[1],
3317 abilities.phy_id[2], abilities.phy_id[3],
3318 abilities.module_type[0], abilities.module_type[1],
3319 abilities.module_type[2], (abilities.fec_cfg_curr_mod_ext_info & 0xe0) >> 5,
3320 abilities.fec_cfg_curr_mod_ext_info & 0x1F,
3321 abilities.ext_comp_code);
3323 error = sbuf_finish(buf);
3325 device_printf(dev, "Error finishing sbuf: %d\n", error);
3332 ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
3334 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3335 struct ixl_vsi *vsi = &pf->vsi;
3336 struct ixl_mac_filter *f;
3337 device_t dev = pf->dev;
3338 int error = 0, ftl_len = 0, ftl_counter = 0;
3342 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3344 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3348 sbuf_printf(buf, "\n");
3350 /* Print MAC filters */
3351 sbuf_printf(buf, "PF Filters:\n");
3352 LIST_FOREACH(f, &vsi->ftl, ftle)
3356 sbuf_printf(buf, "(none)\n");
3358 LIST_FOREACH(f, &vsi->ftl, ftle) {
3360 MAC_FORMAT ", vlan %4d, flags %#06x",
3361 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
3362 /* don't print '\n' for last entry */
3363 if (++ftl_counter != ftl_len)
3364 sbuf_printf(buf, "\n");
3369 /* TODO: Give each VF its own filter list sysctl */
3371 if (pf->num_vfs > 0) {
3372 sbuf_printf(buf, "\n\n");
3373 for (int i = 0; i < pf->num_vfs; i++) {
3375 if (!(vf->vf_flags & VF_FLAG_ENABLED))
3379 ftl_len = 0, ftl_counter = 0;
3380 sbuf_printf(buf, "VF-%d Filters:\n", vf->vf_num);
3381 LIST_FOREACH(f, &vsi->ftl, ftle)
3385 sbuf_printf(buf, "(none)\n");
3387 LIST_FOREACH(f, &vsi->ftl, ftle) {
3389 MAC_FORMAT ", vlan %4d, flags %#06x\n",
3390 MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
3397 error = sbuf_finish(buf);
3399 device_printf(dev, "Error finishing sbuf: %d\n", error);
3405 #define IXL_SW_RES_SIZE 0x14
3407 ixl_res_alloc_cmp(const void *a, const void *b)
3409 const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
3410 one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
3411 two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
3413 return ((int)one->resource_type - (int)two->resource_type);
3417 * Longest string length: 25
3420 ixl_switch_res_type_string(u8 type)
3422 static const char * ixl_switch_res_type_strings[IXL_SW_RES_SIZE] = {
3425 "Perfect Match MAC address",
3428 "Multicast hash entry",
3429 "Unicast hash entry",
3433 "VLAN Statistic Pool",
3436 "Inner VLAN Forward filter",
3445 if (type < IXL_SW_RES_SIZE)
3446 return ixl_switch_res_type_strings[type];
3448 return "(Reserved)";
3452 ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
3454 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3455 struct i40e_hw *hw = &pf->hw;
3456 device_t dev = pf->dev;
3458 enum i40e_status_code status;
3462 struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
3464 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3466 device_printf(dev, "Could not allocate sbuf for output.\n");
3470 bzero(resp, sizeof(resp));
3471 status = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
3477 "%s: get_switch_resource_alloc() error %s, aq error %s\n",
3478 __func__, i40e_stat_str(hw, status),
3479 i40e_aq_str(hw, hw->aq.asq_last_status));
3484 /* Sort entries by type for display */
3485 qsort(resp, num_entries,
3486 sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
3487 &ixl_res_alloc_cmp);
3489 sbuf_cat(buf, "\n");
3490 sbuf_printf(buf, "# of entries: %d\n", num_entries);
3492 " Type | Guaranteed | Total | Used | Un-allocated\n"
3493 " | (this) | (all) | (this) | (all) \n");
3494 for (int i = 0; i < num_entries; i++) {
3496 "%25s | %10d %5d %6d %12d",
3497 ixl_switch_res_type_string(resp[i].resource_type),
3501 resp[i].total_unalloced);
3502 if (i < num_entries - 1)
3503 sbuf_cat(buf, "\n");
3506 error = sbuf_finish(buf);
3508 device_printf(dev, "Error finishing sbuf: %d\n", error);
3514 enum ixl_sw_seid_offset {
3515 IXL_SW_SEID_EMP = 1,
3516 IXL_SW_SEID_MAC_START = 2,
3517 IXL_SW_SEID_MAC_END = 5,
3518 IXL_SW_SEID_PF_START = 16,
3519 IXL_SW_SEID_PF_END = 31,
3520 IXL_SW_SEID_VF_START = 32,
3521 IXL_SW_SEID_VF_END = 159,
3525 * Caller must init and delete sbuf; this function will clear and
3526 * finish it for caller.
3528 * Note: The SEID argument only applies for elements defined by FW at
3529 * power-on; these include the EMP, Ports, PFs and VFs.
3532 ixl_switch_element_string(struct sbuf *s, u8 element_type, u16 seid)
3536 /* If SEID is in certain ranges, then we can infer the
3537 * mapping of SEID to switch element.
3539 if (seid == IXL_SW_SEID_EMP) {
3542 } else if (seid >= IXL_SW_SEID_MAC_START &&
3543 seid <= IXL_SW_SEID_MAC_END) {
3544 sbuf_printf(s, "MAC %2d",
3545 seid - IXL_SW_SEID_MAC_START);
3547 } else if (seid >= IXL_SW_SEID_PF_START &&
3548 seid <= IXL_SW_SEID_PF_END) {
3549 sbuf_printf(s, "PF %3d",
3550 seid - IXL_SW_SEID_PF_START);
3552 } else if (seid >= IXL_SW_SEID_VF_START &&
3553 seid <= IXL_SW_SEID_VF_END) {
3554 sbuf_printf(s, "VF %3d",
3555 seid - IXL_SW_SEID_VF_START);
3559 switch (element_type) {
3560 case I40E_AQ_SW_ELEM_TYPE_BMC:
3563 case I40E_AQ_SW_ELEM_TYPE_PV:
3566 case I40E_AQ_SW_ELEM_TYPE_VEB:
3569 case I40E_AQ_SW_ELEM_TYPE_PA:
3572 case I40E_AQ_SW_ELEM_TYPE_VSI:
3573 sbuf_printf(s, "VSI");
3582 return sbuf_data(s);
3586 ixl_sw_cfg_elem_seid_cmp(const void *a, const void *b)
3588 const struct i40e_aqc_switch_config_element_resp *one, *two;
3589 one = (const struct i40e_aqc_switch_config_element_resp *)a;
3590 two = (const struct i40e_aqc_switch_config_element_resp *)b;
3592 return ((int)one->seid - (int)two->seid);
3596 ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
3598 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3599 struct i40e_hw *hw = &pf->hw;
3600 device_t dev = pf->dev;
3603 enum i40e_status_code status;
3606 u8 aq_buf[I40E_AQ_LARGE_BUF];
3608 struct i40e_aqc_switch_config_element_resp *elem;
3609 struct i40e_aqc_get_switch_config_resp *sw_config;
3610 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
3612 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3614 device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
3618 status = i40e_aq_get_switch_config(hw, sw_config,
3619 sizeof(aq_buf), &next, NULL);
3622 "%s: aq_get_switch_config() error %s, aq error %s\n",
3623 __func__, i40e_stat_str(hw, status),
3624 i40e_aq_str(hw, hw->aq.asq_last_status));
3629 device_printf(dev, "%s: TODO: get more config with SEID %d\n",
3632 nmbuf = sbuf_new_auto();
3634 device_printf(dev, "Could not allocate sbuf for name output.\n");
3639 /* Sort entries by SEID for display */
3640 qsort(sw_config->element, sw_config->header.num_reported,
3641 sizeof(struct i40e_aqc_switch_config_element_resp),
3642 &ixl_sw_cfg_elem_seid_cmp);
3644 sbuf_cat(buf, "\n");
3645 /* Assuming <= 255 elements in switch */
3646 sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
3647 sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
3649 * Revision -- all elements are revision 1 for now
3652 "SEID ( Name ) | Up ( Name ) | Down ( Name ) | Conn Type\n"
3653 " | | | (uplink)\n");
3654 for (int i = 0; i < sw_config->header.num_reported; i++) {
3655 elem = &sw_config->element[i];
3657 // "%4d (%8s) | %8s %8s %#8x",
3658 sbuf_printf(buf, "%4d", elem->seid);
3660 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
3661 elem->element_type, elem->seid));
3662 sbuf_cat(buf, " | ");
3663 sbuf_printf(buf, "%4d", elem->uplink_seid);
3665 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
3666 0, elem->uplink_seid));
3667 sbuf_cat(buf, " | ");
3668 sbuf_printf(buf, "%4d", elem->downlink_seid);
3670 sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
3671 0, elem->downlink_seid));
3672 sbuf_cat(buf, " | ");
3673 sbuf_printf(buf, "%8d", elem->connection_type);
3674 if (i < sw_config->header.num_reported - 1)
3675 sbuf_cat(buf, "\n");
3679 error = sbuf_finish(buf);
3681 device_printf(dev, "Error finishing sbuf: %d\n", error);
3689 ixl_sysctl_switch_vlans(SYSCTL_HANDLER_ARGS)
3691 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3692 struct i40e_hw *hw = &pf->hw;
3693 device_t dev = pf->dev;
3694 int requested_vlan = -1;
3695 enum i40e_status_code status = 0;
3698 error = sysctl_handle_int(oidp, &requested_vlan, 0, req);
3699 if ((error) || (req->newptr == NULL))
3702 if ((hw->flags & I40E_HW_FLAG_802_1AD_CAPABLE) == 0) {
3703 device_printf(dev, "Flags disallow setting of vlans\n");
3707 hw->switch_tag = requested_vlan;
3709 "Setting switch config to switch_tag=%04x, first_tag=%04x, second_tag=%04x\n",
3710 hw->switch_tag, hw->first_tag, hw->second_tag);
3711 status = i40e_aq_set_switch_config(hw, 0, 0, 0, NULL);
3714 "%s: aq_set_switch_config() error %s, aq error %s\n",
3715 __func__, i40e_stat_str(hw, status),
3716 i40e_aq_str(hw, hw->aq.asq_last_status));
3723 ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
3725 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3726 struct i40e_hw *hw = &pf->hw;
3727 device_t dev = pf->dev;
3730 enum i40e_status_code status;
3733 struct i40e_aqc_get_set_rss_key_data key_data;
3735 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3737 device_printf(dev, "Could not allocate sbuf for output.\n");
3741 bzero(&key_data, sizeof(key_data));
3743 sbuf_cat(buf, "\n");
3744 if (hw->mac.type == I40E_MAC_X722) {
3745 status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data);
3747 device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n",
3748 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
3750 for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
3751 reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
3752 bcopy(®, ((caddr_t)&key_data) + (i << 2), 4);
3756 ixl_sbuf_print_bytes(buf, (u8 *)&key_data, sizeof(key_data), 0, true);
3758 error = sbuf_finish(buf);
3760 device_printf(dev, "Error finishing sbuf: %d\n", error);
3767 ixl_sbuf_print_bytes(struct sbuf *sb, u8 *buf, int length, int label_offset, bool text)
3772 if (length < 1 || buf == NULL) return;
3774 int byte_stride = 16;
3775 int lines = length / byte_stride;
3776 int rem = length % byte_stride;
3780 for (i = 0; i < lines; i++) {
3781 width = (rem > 0 && i == lines - 1)
3782 ? rem : byte_stride;
3784 sbuf_printf(sb, "%4d | ", label_offset + i * byte_stride);
3786 for (j = 0; j < width; j++)
3787 sbuf_printf(sb, "%02x ", buf[i * byte_stride + j]);
3789 if (width < byte_stride) {
3790 for (k = 0; k < (byte_stride - width); k++)
3791 sbuf_printf(sb, " ");
3795 sbuf_printf(sb, "\n");
3799 for (j = 0; j < width; j++) {
3800 c = (char)buf[i * byte_stride + j];
3801 if (c < 32 || c > 126)
3802 sbuf_printf(sb, ".");
3804 sbuf_printf(sb, "%c", c);
3807 sbuf_printf(sb, "\n");
3813 ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
3815 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3816 struct i40e_hw *hw = &pf->hw;
3817 device_t dev = pf->dev;
3820 enum i40e_status_code status;
3824 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3826 device_printf(dev, "Could not allocate sbuf for output.\n");
3830 bzero(hlut, sizeof(hlut));
3831 sbuf_cat(buf, "\n");
3832 if (hw->mac.type == I40E_MAC_X722) {
3833 status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut));
3835 device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n",
3836 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
3838 for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) {
3839 reg = rd32(hw, I40E_PFQF_HLUT(i));
3840 bcopy(®, &hlut[i << 2], 4);
3843 ixl_sbuf_print_bytes(buf, hlut, 512, 0, false);
3845 error = sbuf_finish(buf);
3847 device_printf(dev, "Error finishing sbuf: %d\n", error);
3854 ixl_sysctl_hena(SYSCTL_HANDLER_ARGS)
3856 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3857 struct i40e_hw *hw = &pf->hw;
3860 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
3861 ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
3863 return sysctl_handle_long(oidp, NULL, hena, req);
3867 * Sysctl to disable firmware's link management
3869 * 1 - Disable link management on this port
3870 * 0 - Re-enable link management
3872 * On normal NVMs, firmware manages link by default.
3875 ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS)
3877 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3878 struct i40e_hw *hw = &pf->hw;
3879 device_t dev = pf->dev;
3880 int requested_mode = -1;
3881 enum i40e_status_code status = 0;
3884 /* Read in new mode */
3885 error = sysctl_handle_int(oidp, &requested_mode, 0, req);
3886 if ((error) || (req->newptr == NULL))
3888 /* Check for sane value */
3889 if (requested_mode < 0 || requested_mode > 1) {
3890 device_printf(dev, "Valid modes are 0 or 1\n");
3895 status = i40e_aq_set_phy_debug(hw, !!(requested_mode) << 4, NULL);
3898 "%s: Error setting new phy debug mode %s,"
3899 " aq error: %s\n", __func__, i40e_stat_str(hw, status),
3900 i40e_aq_str(hw, hw->aq.asq_last_status));
3908 * Read some diagnostic data from a (Q)SFP+ module
3910 * SFP A2 QSFP Lower Page
3911 * Temperature 96-97 22-23
3913 * TX power 102-103 34-35..40-41
3914 * RX power 104-105 50-51..56-57
3917 ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS)
3919 struct ixl_pf *pf = (struct ixl_pf *)arg1;
3920 device_t dev = pf->dev;
3925 if (req->oldptr == NULL) {
3926 error = SYSCTL_OUT(req, 0, 128);
3930 error = pf->read_i2c_byte(pf, 0, 0xA0, &output);
3932 device_printf(dev, "Error reading from i2c\n");
3936 /* 0x3 for SFP; 0xD/0x11 for QSFP+/QSFP28 */
3937 if (output == 0x3) {
3940 * - Internally calibrated data
3941 * - Diagnostic monitoring is implemented
3943 pf->read_i2c_byte(pf, 92, 0xA0, &output);
3944 if (!(output & 0x60)) {
3945 device_printf(dev, "Module doesn't support diagnostics: %02X\n", output);
3949 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3951 for (u8 offset = 96; offset < 100; offset++) {
3952 pf->read_i2c_byte(pf, offset, 0xA2, &output);
3953 sbuf_printf(sbuf, "%02X ", output);
3955 for (u8 offset = 102; offset < 106; offset++) {
3956 pf->read_i2c_byte(pf, offset, 0xA2, &output);
3957 sbuf_printf(sbuf, "%02X ", output);
3959 } else if (output == 0xD || output == 0x11) {
3961 * QSFP+ modules are always internally calibrated, and must indicate
3962 * what types of diagnostic monitoring are implemented
3964 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
3966 for (u8 offset = 22; offset < 24; offset++) {
3967 pf->read_i2c_byte(pf, offset, 0xA0, &output);
3968 sbuf_printf(sbuf, "%02X ", output);
3970 for (u8 offset = 26; offset < 28; offset++) {
3971 pf->read_i2c_byte(pf, offset, 0xA0, &output);
3972 sbuf_printf(sbuf, "%02X ", output);
3974 /* Read the data from the first lane */
3975 for (u8 offset = 34; offset < 36; offset++) {
3976 pf->read_i2c_byte(pf, offset, 0xA0, &output);
3977 sbuf_printf(sbuf, "%02X ", output);
3979 for (u8 offset = 50; offset < 52; offset++) {
3980 pf->read_i2c_byte(pf, offset, 0xA0, &output);
3981 sbuf_printf(sbuf, "%02X ", output);
3984 device_printf(dev, "Module is not SFP/SFP+/SFP28/QSFP+ (%02X)\n", output);
3995 * Sysctl to read a byte from I2C bus.
3997 * Input: 32-bit value:
3998 * bits 0-7: device address (0xA0 or 0xA2)
3999 * bits 8-15: offset (0-255)
4000 * bits 16-31: unused
4001 * Output: 8-bit value read
4004 ixl_sysctl_read_i2c_byte(SYSCTL_HANDLER_ARGS)
4006 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4007 device_t dev = pf->dev;
4008 int input = -1, error = 0;
4009 u8 dev_addr, offset, output;
4011 /* Read in I2C read parameters */
4012 error = sysctl_handle_int(oidp, &input, 0, req);
4013 if ((error) || (req->newptr == NULL))
4015 /* Validate device address */
4016 dev_addr = input & 0xFF;
4017 if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4020 offset = (input >> 8) & 0xFF;
4022 error = pf->read_i2c_byte(pf, offset, dev_addr, &output);
4026 device_printf(dev, "%02X\n", output);
4031 * Sysctl to write a byte to the I2C bus.
4033 * Input: 32-bit value:
4034 * bits 0-7: device address (0xA0 or 0xA2)
4035 * bits 8-15: offset (0-255)
4036 * bits 16-23: value to write
4037 * bits 24-31: unused
4038 * Output: 8-bit value written
4041 ixl_sysctl_write_i2c_byte(SYSCTL_HANDLER_ARGS)
4043 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4044 device_t dev = pf->dev;
4045 int input = -1, error = 0;
4046 u8 dev_addr, offset, value;
4048 /* Read in I2C write parameters */
4049 error = sysctl_handle_int(oidp, &input, 0, req);
4050 if ((error) || (req->newptr == NULL))
4052 /* Validate device address */
4053 dev_addr = input & 0xFF;
4054 if (dev_addr != 0xA0 && dev_addr != 0xA2) {
4057 offset = (input >> 8) & 0xFF;
4058 value = (input >> 16) & 0xFF;
4060 error = pf->write_i2c_byte(pf, offset, dev_addr, value);
4064 device_printf(dev, "%02X written\n", value);
4069 ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4070 u8 bit_pos, int *is_set)
4072 device_t dev = pf->dev;
4073 struct i40e_hw *hw = &pf->hw;
4074 enum i40e_status_code status;
4076 if (IXL_PF_IN_RECOVERY_MODE(pf))
4079 status = i40e_aq_get_phy_capabilities(hw,
4080 FALSE, FALSE, abilities, NULL);
4083 "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
4084 __func__, i40e_stat_str(hw, status),
4085 i40e_aq_str(hw, hw->aq.asq_last_status));
4089 *is_set = !!(abilities->fec_cfg_curr_mod_ext_info & bit_pos);
4094 ixl_set_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abilities,
4095 u8 bit_pos, int set)
4097 device_t dev = pf->dev;
4098 struct i40e_hw *hw = &pf->hw;
4099 struct i40e_aq_set_phy_config config;
4100 enum i40e_status_code status;
4102 /* Set new PHY config */
4103 memset(&config, 0, sizeof(config));
4104 config.fec_config = abilities->fec_cfg_curr_mod_ext_info & ~(bit_pos);
4106 config.fec_config |= bit_pos;
4107 if (config.fec_config != abilities->fec_cfg_curr_mod_ext_info) {
4108 config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
4109 config.phy_type = abilities->phy_type;
4110 config.phy_type_ext = abilities->phy_type_ext;
4111 config.link_speed = abilities->link_speed;
4112 config.eee_capability = abilities->eee_capability;
4113 config.eeer = abilities->eeer_val;
4114 config.low_power_ctrl = abilities->d3_lpan;
4115 status = i40e_aq_set_phy_config(hw, &config, NULL);
4119 "%s: i40e_aq_set_phy_config() status %s, aq error %s\n",
4120 __func__, i40e_stat_str(hw, status),
4121 i40e_aq_str(hw, hw->aq.asq_last_status));
4130 ixl_sysctl_fec_fc_ability(SYSCTL_HANDLER_ARGS)
4132 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4133 int mode, error = 0;
4135 struct i40e_aq_get_phy_abilities_resp abilities;
4136 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_KR, &mode);
4139 /* Read in new mode */
4140 error = sysctl_handle_int(oidp, &mode, 0, req);
4141 if ((error) || (req->newptr == NULL))
4144 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_KR, !!(mode));
4148 ixl_sysctl_fec_rs_ability(SYSCTL_HANDLER_ARGS)
4150 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4151 int mode, error = 0;
4153 struct i40e_aq_get_phy_abilities_resp abilities;
4154 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_RS, &mode);
4157 /* Read in new mode */
4158 error = sysctl_handle_int(oidp, &mode, 0, req);
4159 if ((error) || (req->newptr == NULL))
4162 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_ABILITY_RS, !!(mode));
4166 ixl_sysctl_fec_fc_request(SYSCTL_HANDLER_ARGS)
4168 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4169 int mode, error = 0;
4171 struct i40e_aq_get_phy_abilities_resp abilities;
4172 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_KR, &mode);
4175 /* Read in new mode */
4176 error = sysctl_handle_int(oidp, &mode, 0, req);
4177 if ((error) || (req->newptr == NULL))
4180 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_KR, !!(mode));
4184 ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS)
4186 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4187 int mode, error = 0;
4189 struct i40e_aq_get_phy_abilities_resp abilities;
4190 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_REQUEST_FEC_RS, &mode);
4193 /* Read in new mode */
4194 error = sysctl_handle_int(oidp, &mode, 0, req);
4195 if ((error) || (req->newptr == NULL))
4198 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_REQUEST_RS, !!(mode));
4202 ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS)
4204 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4205 int mode, error = 0;
4207 struct i40e_aq_get_phy_abilities_resp abilities;
4208 error = ixl_get_fec_config(pf, &abilities, I40E_AQ_ENABLE_FEC_AUTO, &mode);
4211 /* Read in new mode */
4212 error = sysctl_handle_int(oidp, &mode, 0, req);
4213 if ((error) || (req->newptr == NULL))
4216 return ixl_set_fec_config(pf, &abilities, I40E_AQ_SET_FEC_AUTO, !!(mode));
4220 ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS)
4222 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4223 struct i40e_hw *hw = &pf->hw;
4224 device_t dev = pf->dev;
4227 enum i40e_status_code status;
4229 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4231 device_printf(dev, "Could not allocate sbuf for output.\n");
4236 /* This amount is only necessary if reading the entire cluster into memory */
4237 #define IXL_FINAL_BUFF_SIZE (1280 * 1024)
4238 final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_IXL, M_NOWAIT);
4239 if (final_buff == NULL) {
4240 device_printf(dev, "Could not allocate memory for output.\n");
4243 int final_buff_len = 0;
4249 u16 curr_buff_size = 4096;
4250 u8 curr_next_table = 0;
4251 u32 curr_next_index = 0;
4257 sbuf_cat(buf, "\n");
4260 status = i40e_aq_debug_dump(hw, cluster_id, curr_next_table, curr_next_index, curr_buff_size,
4261 dump_buf, &ret_buff_size, &ret_next_table, &ret_next_index, NULL);
4263 device_printf(dev, "i40e_aq_debug_dump status %s, error %s\n",
4264 i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
4268 /* copy info out of temp buffer */
4269 bcopy(dump_buf, (caddr_t)final_buff + final_buff_len, ret_buff_size);
4270 final_buff_len += ret_buff_size;
4272 if (ret_next_table != curr_next_table) {
4273 /* We're done with the current table; we can dump out read data. */
4274 sbuf_printf(buf, "%d:", curr_next_table);
4275 int bytes_printed = 0;
4276 while (bytes_printed <= final_buff_len) {
4277 sbuf_printf(buf, "%16D", ((caddr_t)final_buff + bytes_printed), "");
4278 bytes_printed += 16;
4280 sbuf_cat(buf, "\n");
4282 /* The entire cluster has been read; we're finished */
4283 if (ret_next_table == 0xFF)
4286 /* Otherwise clear the output buffer and continue reading */
4287 bzero(final_buff, IXL_FINAL_BUFF_SIZE);
4291 if (ret_next_index == 0xFFFFFFFF)
4294 bzero(dump_buf, sizeof(dump_buf));
4295 curr_next_table = ret_next_table;
4296 curr_next_index = ret_next_index;
4300 free(final_buff, M_IXL);
4302 error = sbuf_finish(buf);
4304 device_printf(dev, "Error finishing sbuf: %d\n", error);
4311 ixl_start_fw_lldp(struct ixl_pf *pf)
4313 struct i40e_hw *hw = &pf->hw;
4314 enum i40e_status_code status;
4316 status = i40e_aq_start_lldp(hw, false, NULL);
4317 if (status != I40E_SUCCESS) {
4318 switch (hw->aq.asq_last_status) {
4319 case I40E_AQ_RC_EEXIST:
4320 device_printf(pf->dev,
4321 "FW LLDP agent is already running\n");
4323 case I40E_AQ_RC_EPERM:
4324 device_printf(pf->dev,
4325 "Device configuration forbids SW from starting "
4326 "the LLDP agent. Set the \"LLDP Agent\" UEFI HII "
4327 "attribute to \"Enabled\" to use this sysctl\n");
4330 device_printf(pf->dev,
4331 "Starting FW LLDP agent failed: error: %s, %s\n",
4332 i40e_stat_str(hw, status),
4333 i40e_aq_str(hw, hw->aq.asq_last_status));
4338 atomic_clear_32(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4343 ixl_stop_fw_lldp(struct ixl_pf *pf)
4345 struct i40e_hw *hw = &pf->hw;
4346 device_t dev = pf->dev;
4347 enum i40e_status_code status;
4349 if (hw->func_caps.npar_enable != 0) {
4351 "Disabling FW LLDP agent is not supported on this device\n");
4355 if ((hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE) == 0) {
4357 "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n");
4361 status = i40e_aq_stop_lldp(hw, true, false, NULL);
4362 if (status != I40E_SUCCESS) {
4363 if (hw->aq.asq_last_status != I40E_AQ_RC_EPERM) {
4365 "Disabling FW LLDP agent failed: error: %s, %s\n",
4366 i40e_stat_str(hw, status),
4367 i40e_aq_str(hw, hw->aq.asq_last_status));
4371 device_printf(dev, "FW LLDP agent is already stopped\n");
4374 i40e_aq_set_dcb_parameters(hw, true, NULL);
4375 atomic_set_32(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
4380 ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS)
4382 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4383 int state, new_state, error = 0;
4385 state = new_state = ((pf->state & IXL_PF_STATE_FW_LLDP_DISABLED) == 0);
4387 /* Read in new mode */
4388 error = sysctl_handle_int(oidp, &new_state, 0, req);
4389 if ((error) || (req->newptr == NULL))
4392 /* Already in requested state */
4393 if (new_state == state)
4397 return ixl_stop_fw_lldp(pf);
4399 return ixl_start_fw_lldp(pf);
4403 ixl_sysctl_eee_enable(SYSCTL_HANDLER_ARGS)
4405 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4406 int state, new_state;
4407 int sysctl_handle_status = 0;
4408 enum i40e_status_code cmd_status;
4410 /* Init states' values */
4411 state = new_state = (!!(pf->state & IXL_PF_STATE_EEE_ENABLED));
4413 /* Get requested mode */
4414 sysctl_handle_status = sysctl_handle_int(oidp, &new_state, 0, req);
4415 if ((sysctl_handle_status) || (req->newptr == NULL))
4416 return (sysctl_handle_status);
4418 /* Check if state has changed */
4419 if (new_state == state)
4423 cmd_status = i40e_enable_eee(&pf->hw, (bool)(!!new_state));
4425 /* Save new state or report error */
4428 atomic_clear_32(&pf->state, IXL_PF_STATE_EEE_ENABLED);
4430 atomic_set_32(&pf->state, IXL_PF_STATE_EEE_ENABLED);
4431 } else if (cmd_status == I40E_ERR_CONFIG)
4440 ixl_attach_get_link_status(struct ixl_pf *pf)
4442 struct i40e_hw *hw = &pf->hw;
4443 device_t dev = pf->dev;
4446 if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
4447 (hw->aq.fw_maj_ver < 4)) {
4448 i40e_msec_delay(75);
4449 error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
4451 device_printf(dev, "link restart failed, aq_err=%d\n",
4452 pf->hw.aq.asq_last_status);
4457 /* Determine link state */
4458 hw->phy.get_link_info = TRUE;
4459 i40e_get_link_status(hw, &pf->link_up);
4464 ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS)
4466 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4467 int requested = 0, error = 0;
4469 /* Read in new mode */
4470 error = sysctl_handle_int(oidp, &requested, 0, req);
4471 if ((error) || (req->newptr == NULL))
4474 /* Initiate the PF reset later in the admin task */
4475 atomic_set_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ);
4481 ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS)
4483 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4484 struct i40e_hw *hw = &pf->hw;
4485 int requested = 0, error = 0;
4487 /* Read in new mode */
4488 error = sysctl_handle_int(oidp, &requested, 0, req);
4489 if ((error) || (req->newptr == NULL))
4492 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
4498 ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS)
4500 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4501 struct i40e_hw *hw = &pf->hw;
4502 int requested = 0, error = 0;
4504 /* Read in new mode */
4505 error = sysctl_handle_int(oidp, &requested, 0, req);
4506 if ((error) || (req->newptr == NULL))
4509 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_GLOBR_MASK);
4515 * Print out mapping of TX queue indexes and Rx queue indexes
4519 ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS)
4521 struct ixl_pf *pf = (struct ixl_pf *)arg1;
4522 struct ixl_vsi *vsi = &pf->vsi;
4523 device_t dev = pf->dev;
4527 struct ixl_rx_queue *rx_que = vsi->rx_queues;
4528 struct ixl_tx_queue *tx_que = vsi->tx_queues;
4530 buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
4532 device_printf(dev, "Could not allocate sbuf for output.\n");
4536 sbuf_cat(buf, "\n");
4537 for (int i = 0; i < vsi->num_rx_queues; i++) {
4538 rx_que = &vsi->rx_queues[i];
4539 sbuf_printf(buf, "(rxq %3d): %d\n", i, rx_que->msix);
4541 for (int i = 0; i < vsi->num_tx_queues; i++) {
4542 tx_que = &vsi->tx_queues[i];
4543 sbuf_printf(buf, "(txq %3d): %d\n", i, tx_que->msix);
4546 error = sbuf_finish(buf);
4548 device_printf(dev, "Error finishing sbuf: %d\n", error);